[PATCH 08/10] Scrub socket buffers of guest network I/O

Aruna Balakrishnaiah aruna at linux.vnet.ibm.com
Thu Feb 27 01:31:51 EST 2014


vhost_net instance will be attached to the file's private data.
To get to the right file check the fdtable for each task, if the file
has registered its fops with vhost_net_open, if so we can retreive the
file's private data.

	if (task->files->fdt->fd[i]->f_op->open == &vhost_net_open)
		struct vhost_net *net = f->private_data;

	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_MAX];
	struct vhost_virtqueue *vq = &nvq->vq;
	struct socket *sock = vq->private_data;
	struct sock *sk = sock->sk;

	struct sk_buff *next = sk->sk_receive_queue.next;
	struct sk_buff *prev = sk->sk_receive_queue.prev;

	Scrub next->data till the end of the sk_receive_queue and sk_write_queue list


Signed-off-by: Aruna Balakrishnaiah <aruna at linux.vnet.ibm.com>
---
 eppic_scripts/vhost_net_buffers.c |   95 +++++++++++++++++++++++++++++++++++++
 1 file changed, 95 insertions(+)
 create mode 100644 eppic_scripts/vhost_net_buffers.c

diff --git a/eppic_scripts/vhost_net_buffers.c b/eppic_scripts/vhost_net_buffers.c
new file mode 100644
index 0000000..6c2b8df
--- /dev/null
+++ b/eppic_scripts/vhost_net_buffers.c
@@ -0,0 +1,95 @@
+string
+vhost_opt()
+{
+	    return "l";
+}
+
+string
+vhost_usage()
+{
+	    return "\n";
+}
+
+static void
+vhost_showusage()
+{
+	    printf("usage : net_ %s", vhost_usage());
+}
+
+string
+vhost_help()
+{
+	    return "Help";
+}
+
+void
+vhost_net(struct vhost_net *net)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct vhost_net_virtqueue *nvq = &net->vqs[i];
+		struct vhost_virtqueue *vq = &nvq->vq;
+		struct socket *sock = (struct socket *)vq->private_data;
+		struct sock *sk = sock->sk;
+
+		struct sk_buff_head *head = &(sk->sk_receive_queue);
+		struct sk_buff *next = sk->sk_receive_queue.next;
+
+		while (next != head)
+		{
+			struct sk_buff *buff = (struct sk_buff *) next;
+
+			memset((unsigned char *)buff->data, 'L', buff->data_len);
+			memset((char *)&(buff->data_len), 'L', 0x4);
+
+			next = buff->next;
+		}
+
+		head = (struct sk_buff_head *)&(sk->sk_write_queue);
+		next = (struct sk_buff *)sk->sk_write_queue.next;
+
+		while (next != head)
+		{
+			struct sk_buff *buff = (struct sk_buff *) next;
+
+			memset((char *)buff->data, 'L', buff->data_len);
+			memset((char *)&(buff->data_len), 'L', 0x4);
+
+			next = buff->next;
+
+		}
+	}
+}
+
+int
+vhost()
+{
+	struct list_head *head, *next;
+	struct task_struct *tsk;
+
+	tsk = &init_task;
+
+	head = (struct list_head *) &(tsk->tasks);
+	next = (struct list_head *) tsk->tasks.next;
+
+	while (next != head)
+	{
+		int i;
+		struct task_struct *task, *off = 0;
+
+		task = (struct task_struct *)((unsigned long)next - ((unsigned long)&(off->tasks)));
+
+		if (task->files && task->files->fdt) {
+			for (i = 0; i < task->files->fdt->max_fds; i++) {
+				if (task->files->fdt->fd[i] && task->files->fdt->fd[i]->f_op
+					&& task->files->fdt->fd[i]->f_op->open == &vhost_net_open)
+					vhost_net((struct vhost_net *)task->files->fdt->fd[i]->private_data);
+			}
+		}
+
+		next = (struct list_head *)task->tasks.next;
+	}
+
+	return 1;
+}




More information about the kexec mailing list