2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/delay.h>
29 #include <linux/slab.h>
32 #include "hyperv_net.h"
35 static struct netvsc_device *alloc_net_device(struct hv_device *device)
37 struct netvsc_device *net_device;
39 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
44 net_device->destroy = false;
45 net_device->dev = device;
46 device->ext = net_device;
51 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
53 struct netvsc_device *net_device;
55 net_device = device->ext;
56 if (net_device && net_device->destroy)
62 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
64 struct netvsc_device *net_device;
66 net_device = device->ext;
71 if (net_device->destroy &&
72 atomic_read(&net_device->num_outstanding_sends) == 0)
80 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
82 struct nvsp_message *revoke_packet;
86 * If we got a section count, it means we received a
87 * SendReceiveBufferComplete msg (ie sent
88 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
89 * to send a revoke msg here
91 if (net_device->recv_section_cnt) {
92 /* Send the revoke receive buffer */
93 revoke_packet = &net_device->revoke_packet;
94 memset(revoke_packet, 0, sizeof(struct nvsp_message));
96 revoke_packet->hdr.msg_type =
97 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
98 revoke_packet->msg.v1_msg.
99 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
101 ret = vmbus_sendpacket(net_device->dev->channel,
103 sizeof(struct nvsp_message),
104 (unsigned long)revoke_packet,
105 VM_PKT_DATA_INBAND, 0);
107 * If we failed here, we might as well return and
108 * have a leak rather than continue and a bugchk
111 dev_err(&net_device->dev->device, "unable to send "
112 "revoke receive buffer to netvsp");
117 /* Teardown the gpadl on the vsp end */
118 if (net_device->recv_buf_gpadl_handle) {
119 ret = vmbus_teardown_gpadl(net_device->dev->channel,
120 net_device->recv_buf_gpadl_handle);
122 /* If we failed here, we might as well return and have a leak
123 * rather than continue and a bugchk
126 dev_err(&net_device->dev->device,
127 "unable to teardown receive buffer's gpadl");
130 net_device->recv_buf_gpadl_handle = 0;
133 if (net_device->recv_buf) {
134 /* Free up the receive buffer */
135 free_pages((unsigned long)net_device->recv_buf,
136 get_order(net_device->recv_buf_size));
137 net_device->recv_buf = NULL;
140 if (net_device->recv_section) {
141 net_device->recv_section_cnt = 0;
142 kfree(net_device->recv_section);
143 net_device->recv_section = NULL;
149 static int netvsc_init_recv_buf(struct hv_device *device)
153 struct netvsc_device *net_device;
154 struct nvsp_message *init_packet;
156 net_device = get_outbound_net_device(device);
158 dev_err(&device->device, "unable to get net device..."
159 "device being destroyed?");
163 net_device->recv_buf =
164 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
165 get_order(net_device->recv_buf_size));
166 if (!net_device->recv_buf) {
167 dev_err(&device->device, "unable to allocate receive "
168 "buffer of size %d", net_device->recv_buf_size);
174 * Establish the gpadl handle for this buffer on this
175 * channel. Note: This call uses the vmbus connection rather
176 * than the channel to establish the gpadl handle.
178 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
179 net_device->recv_buf_size,
180 &net_device->recv_buf_gpadl_handle);
182 dev_err(&device->device,
183 "unable to establish receive buffer's gpadl");
188 /* Notify the NetVsp of the gpadl handle */
189 init_packet = &net_device->channel_init_pkt;
191 memset(init_packet, 0, sizeof(struct nvsp_message));
193 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
194 init_packet->msg.v1_msg.send_recv_buf.
195 gpadl_handle = net_device->recv_buf_gpadl_handle;
196 init_packet->msg.v1_msg.
197 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
199 /* Send the gpadl notification request */
200 ret = vmbus_sendpacket(device->channel, init_packet,
201 sizeof(struct nvsp_message),
202 (unsigned long)init_packet,
204 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
206 dev_err(&device->device,
207 "unable to send receive buffer's gpadl to netvsp");
211 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
215 /* Check the response */
216 if (init_packet->msg.v1_msg.
217 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
218 dev_err(&device->device, "Unable to complete receive buffer "
219 "initialzation with NetVsp - status %d",
220 init_packet->msg.v1_msg.
221 send_recv_buf_complete.status);
226 /* Parse the response */
228 net_device->recv_section_cnt = init_packet->msg.
229 v1_msg.send_recv_buf_complete.num_sections;
231 net_device->recv_section = kmalloc(net_device->recv_section_cnt
232 * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
233 if (net_device->recv_section == NULL) {
238 memcpy(net_device->recv_section,
239 init_packet->msg.v1_msg.
240 send_recv_buf_complete.sections,
241 net_device->recv_section_cnt *
242 sizeof(struct nvsp_1_receive_buffer_section));
245 * For 1st release, there should only be 1 section that represents the
246 * entire receive buffer
248 if (net_device->recv_section_cnt != 1 ||
249 net_device->recv_section->offset != 0) {
257 netvsc_destroy_recv_buf(net_device);
264 static int netvsc_connect_vsp(struct hv_device *device)
267 struct netvsc_device *net_device;
268 struct nvsp_message *init_packet;
271 net_device = get_outbound_net_device(device);
273 dev_err(&device->device, "unable to get net device..."
274 "device being destroyed?");
278 init_packet = &net_device->channel_init_pkt;
280 memset(init_packet, 0, sizeof(struct nvsp_message));
281 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
282 init_packet->msg.init_msg.init.min_protocol_ver =
283 NVSP_MIN_PROTOCOL_VERSION;
284 init_packet->msg.init_msg.init.max_protocol_ver =
285 NVSP_MAX_PROTOCOL_VERSION;
287 /* Send the init request */
288 ret = vmbus_sendpacket(device->channel, init_packet,
289 sizeof(struct nvsp_message),
290 (unsigned long)init_packet,
292 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
297 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
304 if (init_packet->msg.init_msg.init_complete.status !=
310 if (init_packet->msg.init_msg.init_complete.
311 negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
315 /* Send the ndis version */
316 memset(init_packet, 0, sizeof(struct nvsp_message));
318 ndis_version = 0x00050000;
320 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
321 init_packet->msg.v1_msg.
322 send_ndis_ver.ndis_major_ver =
323 (ndis_version & 0xFFFF0000) >> 16;
324 init_packet->msg.v1_msg.
325 send_ndis_ver.ndis_minor_ver =
326 ndis_version & 0xFFFF;
328 /* Send the init request */
329 ret = vmbus_sendpacket(device->channel, init_packet,
330 sizeof(struct nvsp_message),
331 (unsigned long)init_packet,
332 VM_PKT_DATA_INBAND, 0);
336 /* Post the big receive buffer to NetVSP */
337 ret = netvsc_init_recv_buf(device);
343 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
345 netvsc_destroy_recv_buf(net_device);
349 * netvsc_device_remove - Callback when the root bus device is removed
351 int netvsc_device_remove(struct hv_device *device)
353 struct netvsc_device *net_device;
354 struct hv_netvsc_packet *netvsc_packet, *pos;
357 net_device = (struct netvsc_device *)device->ext;
358 spin_lock_irqsave(&device->channel->inbound_lock, flags);
359 net_device->destroy = true;
360 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
362 /* Wait for all send completions */
363 while (atomic_read(&net_device->num_outstanding_sends)) {
364 dev_err(&device->device,
365 "waiting for %d requests to complete...",
366 atomic_read(&net_device->num_outstanding_sends));
370 netvsc_disconnect_vsp(net_device);
373 * Since we have already drained, we don't need to busy wait
374 * as was done in final_release_stor_device()
375 * Note that we cannot set the ext pointer to NULL until
376 * we have drained - to drain the outgoing packets, we need to
377 * allow incoming packets.
380 spin_lock_irqsave(&device->channel->inbound_lock, flags);
382 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
384 /* At this point, no one should be accessing netDevice except in here */
385 dev_notice(&device->device, "net device safe to remove");
387 /* Now, we can close the channel safely */
388 vmbus_close(device->channel);
390 /* Release all resources */
391 list_for_each_entry_safe(netvsc_packet, pos,
392 &net_device->recv_pkt_list, list_ent) {
393 list_del(&netvsc_packet->list_ent);
394 kfree(netvsc_packet);
401 static void netvsc_send_completion(struct hv_device *device,
402 struct vmpacket_descriptor *packet)
404 struct netvsc_device *net_device;
405 struct nvsp_message *nvsp_packet;
406 struct hv_netvsc_packet *nvsc_packet;
408 net_device = get_inbound_net_device(device);
410 dev_err(&device->device, "unable to get net device..."
411 "device being destroyed?");
415 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
416 (packet->offset8 << 3));
418 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
419 (nvsp_packet->hdr.msg_type ==
420 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
421 (nvsp_packet->hdr.msg_type ==
422 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
423 /* Copy the response back */
424 memcpy(&net_device->channel_init_pkt, nvsp_packet,
425 sizeof(struct nvsp_message));
426 complete(&net_device->channel_init_wait);
427 } else if (nvsp_packet->hdr.msg_type ==
428 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
429 /* Get the send context */
430 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
433 /* Notify the layer above us */
434 nvsc_packet->completion.send.send_completion(
435 nvsc_packet->completion.send.send_completion_ctx);
437 atomic_dec(&net_device->num_outstanding_sends);
439 dev_err(&device->device, "Unknown send completion packet type- "
440 "%d received!!", nvsp_packet->hdr.msg_type);
445 int netvsc_send(struct hv_device *device,
446 struct hv_netvsc_packet *packet)
448 struct netvsc_device *net_device;
451 struct nvsp_message sendMessage;
453 net_device = get_outbound_net_device(device);
455 dev_err(&device->device, "net device (%p) shutting down..."
456 "ignoring outbound packets", net_device);
460 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
461 if (packet->is_data_pkt) {
463 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
465 /* 1 is RMC_CONTROL; */
466 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
469 /* Not using send buffer section */
470 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
472 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
474 if (packet->page_buf_cnt) {
475 ret = vmbus_sendpacket_pagebuffer(device->channel,
477 packet->page_buf_cnt,
479 sizeof(struct nvsp_message),
480 (unsigned long)packet);
482 ret = vmbus_sendpacket(device->channel, &sendMessage,
483 sizeof(struct nvsp_message),
484 (unsigned long)packet,
486 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
491 dev_err(&device->device, "Unable to send packet %p ret %d",
494 atomic_inc(&net_device->num_outstanding_sends);
498 static void netvsc_send_recv_completion(struct hv_device *device,
501 struct nvsp_message recvcompMessage;
505 recvcompMessage.hdr.msg_type =
506 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
508 /* FIXME: Pass in the status */
509 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
513 /* Send the completion */
514 ret = vmbus_sendpacket(device->channel, &recvcompMessage,
515 sizeof(struct nvsp_message), transaction_id,
520 } else if (ret == -EAGAIN) {
521 /* no more room...wait a bit and attempt to retry 3 times */
523 dev_err(&device->device, "unable to send receive completion pkt"
524 " (tid %llx)...retrying %d", transaction_id, retries);
528 goto retry_send_cmplt;
530 dev_err(&device->device, "unable to send receive "
531 "completion pkt (tid %llx)...give up retrying",
535 dev_err(&device->device, "unable to send receive "
536 "completion pkt - %llx", transaction_id);
540 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
541 static void netvsc_receive_completion(void *context)
543 struct hv_netvsc_packet *packet = context;
544 struct hv_device *device = (struct hv_device *)packet->device;
545 struct netvsc_device *net_device;
546 u64 transaction_id = 0;
547 bool fsend_receive_comp = false;
551 * Even though it seems logical to do a GetOutboundNetDevice() here to
552 * send out receive completion, we are using GetInboundNetDevice()
553 * since we may have disable outbound traffic already.
555 net_device = get_inbound_net_device(device);
557 dev_err(&device->device, "unable to get net device..."
558 "device being destroyed?");
562 /* Overloading use of the lock. */
563 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
565 packet->xfer_page_pkt->count--;
568 * Last one in the line that represent 1 xfer page packet.
569 * Return the xfer page packet itself to the freelist
571 if (packet->xfer_page_pkt->count == 0) {
572 fsend_receive_comp = true;
573 transaction_id = packet->completion.recv.recv_completion_tid;
574 list_add_tail(&packet->xfer_page_pkt->list_ent,
575 &net_device->recv_pkt_list);
579 /* Put the packet back */
580 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
581 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
583 /* Send a receive completion for the xfer page packet */
584 if (fsend_receive_comp)
585 netvsc_send_recv_completion(device, transaction_id);
589 static void netvsc_receive(struct hv_device *device,
590 struct vmpacket_descriptor *packet)
592 struct netvsc_device *net_device;
593 struct vmtransfer_page_packet_header *vmxferpage_packet;
594 struct nvsp_message *nvsp_packet;
595 struct hv_netvsc_packet *netvsc_packet = NULL;
597 unsigned long end, end_virtual;
598 /* struct netvsc_driver *netvscDriver; */
599 struct xferpage_packet *xferpage_packet = NULL;
601 int count = 0, bytes_remain = 0;
606 net_device = get_inbound_net_device(device);
608 dev_err(&device->device, "unable to get net device..."
609 "device being destroyed?");
614 * All inbound packets other than send completion should be xfer page
617 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
618 dev_err(&device->device, "Unknown packet type received - %d",
623 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
624 (packet->offset8 << 3));
626 /* Make sure this is a valid nvsp packet */
627 if (nvsp_packet->hdr.msg_type !=
628 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
629 dev_err(&device->device, "Unknown nvsp packet type received-"
630 " %d", nvsp_packet->hdr.msg_type);
634 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
636 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
637 dev_err(&device->device, "Invalid xfer page set id - "
638 "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
639 vmxferpage_packet->xfer_pageset_id);
644 * Grab free packets (range count + 1) to represent this xfer
645 * page packet. +1 to represent the xfer page packet itself.
646 * We grab it here so that we know exactly how many we can
649 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
650 while (!list_empty(&net_device->recv_pkt_list)) {
651 list_move_tail(net_device->recv_pkt_list.next, &listHead);
652 if (++count == vmxferpage_packet->range_cnt + 1)
655 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
658 * We need at least 2 netvsc pkts (1 to represent the xfer
659 * page and at least 1 for the range) i.e. we can handled
660 * some of the xfer page packet ranges...
663 dev_err(&device->device, "Got only %d netvsc pkt...needed "
664 "%d pkts. Dropping this xfer page packet completely!",
665 count, vmxferpage_packet->range_cnt + 1);
667 /* Return it to the freelist */
668 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
669 for (i = count; i != 0; i--) {
670 list_move_tail(listHead.next,
671 &net_device->recv_pkt_list);
673 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
676 netvsc_send_recv_completion(device,
677 vmxferpage_packet->d.trans_id);
682 /* Remove the 1st packet to represent the xfer page packet itself */
683 xferpage_packet = (struct xferpage_packet *)listHead.next;
684 list_del(&xferpage_packet->list_ent);
686 /* This is how much we can satisfy */
687 xferpage_packet->count = count - 1;
689 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
690 dev_err(&device->device, "Needed %d netvsc pkts to satisy "
691 "this xfer page...got %d",
692 vmxferpage_packet->range_cnt, xferpage_packet->count);
695 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
696 for (i = 0; i < (count - 1); i++) {
697 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
698 list_del(&netvsc_packet->list_ent);
700 /* Initialize the netvsc packet */
701 netvsc_packet->xfer_page_pkt = xferpage_packet;
702 netvsc_packet->completion.recv.recv_completion =
703 netvsc_receive_completion;
704 netvsc_packet->completion.recv.recv_completion_ctx =
706 netvsc_packet->device = device;
707 /* Save this so that we can send it back */
708 netvsc_packet->completion.recv.recv_completion_tid =
709 vmxferpage_packet->d.trans_id;
711 netvsc_packet->total_data_buflen =
712 vmxferpage_packet->ranges[i].byte_count;
713 netvsc_packet->page_buf_cnt = 1;
715 netvsc_packet->page_buf[0].len =
716 vmxferpage_packet->ranges[i].byte_count;
718 start = virt_to_phys((void *)((unsigned long)net_device->
719 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
721 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
722 end_virtual = (unsigned long)net_device->recv_buf
723 + vmxferpage_packet->ranges[i].byte_offset
724 + vmxferpage_packet->ranges[i].byte_count - 1;
725 end = virt_to_phys((void *)end_virtual);
727 /* Calculate the page relative offset */
728 netvsc_packet->page_buf[0].offset =
729 vmxferpage_packet->ranges[i].byte_offset &
731 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
732 /* Handle frame across multiple pages: */
733 netvsc_packet->page_buf[0].len =
734 (netvsc_packet->page_buf[0].pfn <<
737 bytes_remain = netvsc_packet->total_data_buflen -
738 netvsc_packet->page_buf[0].len;
739 for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
740 netvsc_packet->page_buf[j].offset = 0;
741 if (bytes_remain <= PAGE_SIZE) {
742 netvsc_packet->page_buf[j].len =
746 netvsc_packet->page_buf[j].len =
748 bytes_remain -= PAGE_SIZE;
750 netvsc_packet->page_buf[j].pfn =
751 virt_to_phys((void *)(end_virtual -
752 bytes_remain)) >> PAGE_SHIFT;
753 netvsc_packet->page_buf_cnt++;
754 if (bytes_remain == 0)
759 /* Pass it to the upper layer */
760 rndis_filter_receive(device, netvsc_packet);
762 netvsc_receive_completion(netvsc_packet->
763 completion.recv.recv_completion_ctx);
768 static void netvsc_channel_cb(void *context)
771 struct hv_device *device = context;
772 struct netvsc_device *net_device;
775 unsigned char *packet;
776 struct vmpacket_descriptor *desc;
777 unsigned char *buffer;
778 int bufferlen = NETVSC_PACKET_SIZE;
780 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
786 net_device = get_inbound_net_device(device);
788 dev_err(&device->device, "net device (%p) shutting down..."
789 "ignoring inbound packets", net_device);
794 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
795 &bytes_recvd, &request_id);
797 if (bytes_recvd > 0) {
798 desc = (struct vmpacket_descriptor *)buffer;
799 switch (desc->type) {
801 netvsc_send_completion(device, desc);
804 case VM_PKT_DATA_USING_XFER_PAGES:
805 netvsc_receive(device, desc);
809 dev_err(&device->device,
810 "unhandled packet type %d, "
812 desc->type, request_id,
818 if (bufferlen > NETVSC_PACKET_SIZE) {
821 bufferlen = NETVSC_PACKET_SIZE;
825 if (bufferlen > NETVSC_PACKET_SIZE) {
828 bufferlen = NETVSC_PACKET_SIZE;
833 } else if (ret == -ENOBUFS) {
834 /* Handle large packet */
835 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
836 if (buffer == NULL) {
837 /* Try again next time around */
838 dev_err(&device->device,
839 "unable to allocate buffer of size "
840 "(%d)!!", bytes_recvd);
844 bufferlen = bytes_recvd;
854 * netvsc_device_add - Callback when the device belonging to this
857 int netvsc_device_add(struct hv_device *device, void *additional_info)
862 ((struct netvsc_device_info *)additional_info)->ring_size;
863 struct netvsc_device *net_device;
864 struct hv_netvsc_packet *packet, *pos;
866 net_device = alloc_net_device(device);
872 /* Initialize the NetVSC channel extension */
873 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
874 spin_lock_init(&net_device->recv_pkt_list_lock);
876 INIT_LIST_HEAD(&net_device->recv_pkt_list);
878 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
879 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
880 (NETVSC_RECEIVE_SG_COUNT *
881 sizeof(struct hv_page_buffer)), GFP_KERNEL);
885 list_add_tail(&packet->list_ent,
886 &net_device->recv_pkt_list);
888 init_completion(&net_device->channel_init_wait);
890 /* Open the channel */
891 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
892 ring_size * PAGE_SIZE, NULL, 0,
893 netvsc_channel_cb, device);
896 dev_err(&device->device, "unable to open channel: %d", ret);
900 /* Channel is opened */
901 pr_info("hv_netvsc channel opened successfully");
903 /* Connect with the NetVsp */
904 ret = netvsc_connect_vsp(device);
906 dev_err(&device->device,
907 "unable to connect to NetVSP - %d", ret);
914 /* Now, we can close the channel safely */
915 vmbus_close(device->channel);
920 list_for_each_entry_safe(packet, pos,
921 &net_device->recv_pkt_list,
923 list_del(&packet->list_ent);