]> Pileus Git - ~andy/linux/blob - drivers/staging/hv/netvsc.c
Staging: hv: netvsc: Get rid of release_inbound_net_device() by inlining the code
[~andy/linux] / drivers / staging / hv / netvsc.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/delay.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30
31 #include "hyperv.h"
32 #include "hyperv_net.h"
33
34
35 static struct netvsc_device *alloc_net_device(struct hv_device *device)
36 {
37         struct netvsc_device *net_device;
38
39         net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
40         if (!net_device)
41                 return NULL;
42
43         /* Set to 2 to allow both inbound and outbound traffic */
44         atomic_set(&net_device->refcnt, 2);
45
46         net_device->destroy = false;
47         net_device->dev = device;
48         device->ext = net_device;
49
50         return net_device;
51 }
52
53 /* Get the net device object iff exists and its refcount > 1 */
54 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
55 {
56         struct netvsc_device *net_device;
57
58         net_device = device->ext;
59         if (net_device && (atomic_read(&net_device->refcnt) > 1) &&
60                 !net_device->destroy)
61                 atomic_inc(&net_device->refcnt);
62         else
63                 net_device = NULL;
64
65         return net_device;
66 }
67
68 /* Get the net device object iff exists and its refcount > 0 */
69 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
70 {
71         struct netvsc_device *net_device;
72
73         net_device = device->ext;
74         if (net_device && atomic_read(&net_device->refcnt))
75                 atomic_inc(&net_device->refcnt);
76         else
77                 net_device = NULL;
78
79         return net_device;
80 }
81
82 static void put_net_device(struct hv_device *device)
83 {
84         struct netvsc_device *net_device;
85
86         net_device = device->ext;
87
88         atomic_dec(&net_device->refcnt);
89 }
90
91 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
92 {
93         struct nvsp_message *revoke_packet;
94         int ret = 0;
95
96         /*
97          * If we got a section count, it means we received a
98          * SendReceiveBufferComplete msg (ie sent
99          * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
100          * to send a revoke msg here
101          */
102         if (net_device->recv_section_cnt) {
103                 /* Send the revoke receive buffer */
104                 revoke_packet = &net_device->revoke_packet;
105                 memset(revoke_packet, 0, sizeof(struct nvsp_message));
106
107                 revoke_packet->hdr.msg_type =
108                         NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
109                 revoke_packet->msg.v1_msg.
110                 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
111
112                 ret = vmbus_sendpacket(net_device->dev->channel,
113                                        revoke_packet,
114                                        sizeof(struct nvsp_message),
115                                        (unsigned long)revoke_packet,
116                                        VM_PKT_DATA_INBAND, 0);
117                 /*
118                  * If we failed here, we might as well return and
119                  * have a leak rather than continue and a bugchk
120                  */
121                 if (ret != 0) {
122                         dev_err(&net_device->dev->device, "unable to send "
123                                 "revoke receive buffer to netvsp");
124                         return ret;
125                 }
126         }
127
128         /* Teardown the gpadl on the vsp end */
129         if (net_device->recv_buf_gpadl_handle) {
130                 ret = vmbus_teardown_gpadl(net_device->dev->channel,
131                            net_device->recv_buf_gpadl_handle);
132
133                 /* If we failed here, we might as well return and have a leak
134                  * rather than continue and a bugchk
135                  */
136                 if (ret != 0) {
137                         dev_err(&net_device->dev->device,
138                                    "unable to teardown receive buffer's gpadl");
139                         return -ret;
140                 }
141                 net_device->recv_buf_gpadl_handle = 0;
142         }
143
144         if (net_device->recv_buf) {
145                 /* Free up the receive buffer */
146                 free_pages((unsigned long)net_device->recv_buf,
147                         get_order(net_device->recv_buf_size));
148                 net_device->recv_buf = NULL;
149         }
150
151         if (net_device->recv_section) {
152                 net_device->recv_section_cnt = 0;
153                 kfree(net_device->recv_section);
154                 net_device->recv_section = NULL;
155         }
156
157         return ret;
158 }
159
160 static int netvsc_init_recv_buf(struct hv_device *device)
161 {
162         int ret = 0;
163         int t;
164         struct netvsc_device *net_device;
165         struct nvsp_message *init_packet;
166
167         net_device = get_outbound_net_device(device);
168         if (!net_device) {
169                 dev_err(&device->device, "unable to get net device..."
170                            "device being destroyed?");
171                 return -ENODEV;
172         }
173
174         net_device->recv_buf =
175                 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
176                                 get_order(net_device->recv_buf_size));
177         if (!net_device->recv_buf) {
178                 dev_err(&device->device, "unable to allocate receive "
179                         "buffer of size %d", net_device->recv_buf_size);
180                 ret = -ENOMEM;
181                 goto cleanup;
182         }
183
184         /*
185          * Establish the gpadl handle for this buffer on this
186          * channel.  Note: This call uses the vmbus connection rather
187          * than the channel to establish the gpadl handle.
188          */
189         ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
190                                     net_device->recv_buf_size,
191                                     &net_device->recv_buf_gpadl_handle);
192         if (ret != 0) {
193                 dev_err(&device->device,
194                         "unable to establish receive buffer's gpadl");
195                 goto cleanup;
196         }
197
198
199         /* Notify the NetVsp of the gpadl handle */
200         init_packet = &net_device->channel_init_pkt;
201
202         memset(init_packet, 0, sizeof(struct nvsp_message));
203
204         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
205         init_packet->msg.v1_msg.send_recv_buf.
206                 gpadl_handle = net_device->recv_buf_gpadl_handle;
207         init_packet->msg.v1_msg.
208                 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
209
210         /* Send the gpadl notification request */
211         ret = vmbus_sendpacket(device->channel, init_packet,
212                                sizeof(struct nvsp_message),
213                                (unsigned long)init_packet,
214                                VM_PKT_DATA_INBAND,
215                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
216         if (ret != 0) {
217                 dev_err(&device->device,
218                         "unable to send receive buffer's gpadl to netvsp");
219                 goto cleanup;
220         }
221
222         t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
223         BUG_ON(t == 0);
224
225
226         /* Check the response */
227         if (init_packet->msg.v1_msg.
228             send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
229                 dev_err(&device->device, "Unable to complete receive buffer "
230                            "initialzation with NetVsp - status %d",
231                            init_packet->msg.v1_msg.
232                            send_recv_buf_complete.status);
233                 ret = -EINVAL;
234                 goto cleanup;
235         }
236
237         /* Parse the response */
238
239         net_device->recv_section_cnt = init_packet->msg.
240                 v1_msg.send_recv_buf_complete.num_sections;
241
242         net_device->recv_section = kmalloc(net_device->recv_section_cnt
243                 * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
244         if (net_device->recv_section == NULL) {
245                 ret = -EINVAL;
246                 goto cleanup;
247         }
248
249         memcpy(net_device->recv_section,
250                 init_packet->msg.v1_msg.
251                send_recv_buf_complete.sections,
252                 net_device->recv_section_cnt *
253                sizeof(struct nvsp_1_receive_buffer_section));
254
255         /*
256          * For 1st release, there should only be 1 section that represents the
257          * entire receive buffer
258          */
259         if (net_device->recv_section_cnt != 1 ||
260             net_device->recv_section->offset != 0) {
261                 ret = -EINVAL;
262                 goto cleanup;
263         }
264
265         goto exit;
266
267 cleanup:
268         netvsc_destroy_recv_buf(net_device);
269
270 exit:
271         put_net_device(device);
272         return ret;
273 }
274
275
276 static int netvsc_connect_vsp(struct hv_device *device)
277 {
278         int ret, t;
279         struct netvsc_device *net_device;
280         struct nvsp_message *init_packet;
281         int ndis_version;
282
283         net_device = get_outbound_net_device(device);
284         if (!net_device) {
285                 dev_err(&device->device, "unable to get net device..."
286                            "device being destroyed?");
287                 return -ENODEV;
288         }
289
290         init_packet = &net_device->channel_init_pkt;
291
292         memset(init_packet, 0, sizeof(struct nvsp_message));
293         init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
294         init_packet->msg.init_msg.init.min_protocol_ver =
295                 NVSP_MIN_PROTOCOL_VERSION;
296         init_packet->msg.init_msg.init.max_protocol_ver =
297                 NVSP_MAX_PROTOCOL_VERSION;
298
299         /* Send the init request */
300         ret = vmbus_sendpacket(device->channel, init_packet,
301                                sizeof(struct nvsp_message),
302                                (unsigned long)init_packet,
303                                VM_PKT_DATA_INBAND,
304                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
305
306         if (ret != 0)
307                 goto cleanup;
308
309         t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
310
311         if (t == 0) {
312                 ret = -ETIMEDOUT;
313                 goto cleanup;
314         }
315
316         if (init_packet->msg.init_msg.init_complete.status !=
317             NVSP_STAT_SUCCESS) {
318                 ret = -EINVAL;
319                 goto cleanup;
320         }
321
322         if (init_packet->msg.init_msg.init_complete.
323             negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
324                 ret = -EPROTO;
325                 goto cleanup;
326         }
327         /* Send the ndis version */
328         memset(init_packet, 0, sizeof(struct nvsp_message));
329
330         ndis_version = 0x00050000;
331
332         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
333         init_packet->msg.v1_msg.
334                 send_ndis_ver.ndis_major_ver =
335                                 (ndis_version & 0xFFFF0000) >> 16;
336         init_packet->msg.v1_msg.
337                 send_ndis_ver.ndis_minor_ver =
338                                 ndis_version & 0xFFFF;
339
340         /* Send the init request */
341         ret = vmbus_sendpacket(device->channel, init_packet,
342                                 sizeof(struct nvsp_message),
343                                 (unsigned long)init_packet,
344                                 VM_PKT_DATA_INBAND, 0);
345         if (ret != 0)
346                 goto cleanup;
347
348         /* Post the big receive buffer to NetVSP */
349         ret = netvsc_init_recv_buf(device);
350
351 cleanup:
352         put_net_device(device);
353         return ret;
354 }
355
356 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
357 {
358         netvsc_destroy_recv_buf(net_device);
359 }
360
361 /*
362  * netvsc_device_remove - Callback when the root bus device is removed
363  */
364 int netvsc_device_remove(struct hv_device *device)
365 {
366         struct netvsc_device *net_device;
367         struct hv_netvsc_packet *netvsc_packet, *pos;
368         unsigned long flags;
369
370         net_device = (struct netvsc_device *)device->ext;
371         atomic_dec(&net_device->refcnt);
372         spin_lock_irqsave(&device->channel->inbound_lock, flags);
373         net_device->destroy = true;
374         spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
375
376         /* Wait for all send completions */
377         while (atomic_read(&net_device->num_outstanding_sends)) {
378                 dev_err(&device->device,
379                         "waiting for %d requests to complete...",
380                         atomic_read(&net_device->num_outstanding_sends));
381                 udelay(100);
382         }
383
384         netvsc_disconnect_vsp(net_device);
385
386         atomic_dec(&net_device->refcnt);
387         device->ext = NULL;
388         /*
389          * Wait until the ref cnt falls to 0.
390          * We have already stopped any new references
391          * for outgoing traffic. Also, at this point we don't have any
392          * incoming traffic as well. So this must be outgoing refrences
393          * established prior to marking the device as being destroyed.
394          * Since the send path is non-blocking, it is reasonable to busy
395          * wait here.
396          */
397         while (atomic_read(&net_device->refcnt))
398                 udelay(100);
399
400         /* At this point, no one should be accessing netDevice except in here */
401         dev_notice(&device->device, "net device safe to remove");
402
403         /* Now, we can close the channel safely */
404         vmbus_close(device->channel);
405
406         /* Release all resources */
407         list_for_each_entry_safe(netvsc_packet, pos,
408                                  &net_device->recv_pkt_list, list_ent) {
409                 list_del(&netvsc_packet->list_ent);
410                 kfree(netvsc_packet);
411         }
412
413         kfree(net_device);
414         return 0;
415 }
416
417 static void netvsc_send_completion(struct hv_device *device,
418                                    struct vmpacket_descriptor *packet)
419 {
420         struct netvsc_device *net_device;
421         struct nvsp_message *nvsp_packet;
422         struct hv_netvsc_packet *nvsc_packet;
423
424         net_device = get_inbound_net_device(device);
425         if (!net_device) {
426                 dev_err(&device->device, "unable to get net device..."
427                            "device being destroyed?");
428                 return;
429         }
430
431         nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
432                         (packet->offset8 << 3));
433
434         if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
435             (nvsp_packet->hdr.msg_type ==
436              NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
437             (nvsp_packet->hdr.msg_type ==
438              NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
439                 /* Copy the response back */
440                 memcpy(&net_device->channel_init_pkt, nvsp_packet,
441                        sizeof(struct nvsp_message));
442                 complete(&net_device->channel_init_wait);
443         } else if (nvsp_packet->hdr.msg_type ==
444                    NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
445                 /* Get the send context */
446                 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
447                         packet->trans_id;
448
449                 /* Notify the layer above us */
450                 nvsc_packet->completion.send.send_completion(
451                         nvsc_packet->completion.send.send_completion_ctx);
452
453                 atomic_dec(&net_device->num_outstanding_sends);
454         } else {
455                 dev_err(&device->device, "Unknown send completion packet type- "
456                            "%d received!!", nvsp_packet->hdr.msg_type);
457         }
458
459         put_net_device(device);
460 }
461
462 int netvsc_send(struct hv_device *device,
463                         struct hv_netvsc_packet *packet)
464 {
465         struct netvsc_device *net_device;
466         int ret = 0;
467
468         struct nvsp_message sendMessage;
469
470         net_device = get_outbound_net_device(device);
471         if (!net_device) {
472                 dev_err(&device->device, "net device (%p) shutting down..."
473                            "ignoring outbound packets", net_device);
474                 return -ENODEV;
475         }
476
477         sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
478         if (packet->is_data_pkt) {
479                 /* 0 is RMC_DATA; */
480                 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
481         } else {
482                 /* 1 is RMC_CONTROL; */
483                 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
484         }
485
486         /* Not using send buffer section */
487         sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
488                 0xFFFFFFFF;
489         sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
490
491         if (packet->page_buf_cnt) {
492                 ret = vmbus_sendpacket_pagebuffer(device->channel,
493                                                   packet->page_buf,
494                                                   packet->page_buf_cnt,
495                                                   &sendMessage,
496                                                   sizeof(struct nvsp_message),
497                                                   (unsigned long)packet);
498         } else {
499                 ret = vmbus_sendpacket(device->channel, &sendMessage,
500                                 sizeof(struct nvsp_message),
501                                 (unsigned long)packet,
502                                 VM_PKT_DATA_INBAND,
503                                 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
504
505         }
506
507         if (ret != 0)
508                 dev_err(&device->device, "Unable to send packet %p ret %d",
509                            packet, ret);
510
511         atomic_inc(&net_device->num_outstanding_sends);
512         put_net_device(device);
513         return ret;
514 }
515
516 static void netvsc_send_recv_completion(struct hv_device *device,
517                                         u64 transaction_id)
518 {
519         struct nvsp_message recvcompMessage;
520         int retries = 0;
521         int ret;
522
523         recvcompMessage.hdr.msg_type =
524                                 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
525
526         /* FIXME: Pass in the status */
527         recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
528                 NVSP_STAT_SUCCESS;
529
530 retry_send_cmplt:
531         /* Send the completion */
532         ret = vmbus_sendpacket(device->channel, &recvcompMessage,
533                                sizeof(struct nvsp_message), transaction_id,
534                                VM_PKT_COMP, 0);
535         if (ret == 0) {
536                 /* success */
537                 /* no-op */
538         } else if (ret == -EAGAIN) {
539                 /* no more room...wait a bit and attempt to retry 3 times */
540                 retries++;
541                 dev_err(&device->device, "unable to send receive completion pkt"
542                         " (tid %llx)...retrying %d", transaction_id, retries);
543
544                 if (retries < 4) {
545                         udelay(100);
546                         goto retry_send_cmplt;
547                 } else {
548                         dev_err(&device->device, "unable to send receive "
549                                 "completion pkt (tid %llx)...give up retrying",
550                                 transaction_id);
551                 }
552         } else {
553                 dev_err(&device->device, "unable to send receive "
554                         "completion pkt - %llx", transaction_id);
555         }
556 }
557
558 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
559 static void netvsc_receive_completion(void *context)
560 {
561         struct hv_netvsc_packet *packet = context;
562         struct hv_device *device = (struct hv_device *)packet->device;
563         struct netvsc_device *net_device;
564         u64 transaction_id = 0;
565         bool fsend_receive_comp = false;
566         unsigned long flags;
567
568         /*
569          * Even though it seems logical to do a GetOutboundNetDevice() here to
570          * send out receive completion, we are using GetInboundNetDevice()
571          * since we may have disable outbound traffic already.
572          */
573         net_device = get_inbound_net_device(device);
574         if (!net_device) {
575                 dev_err(&device->device, "unable to get net device..."
576                            "device being destroyed?");
577                 return;
578         }
579
580         /* Overloading use of the lock. */
581         spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
582
583         packet->xfer_page_pkt->count--;
584
585         /*
586          * Last one in the line that represent 1 xfer page packet.
587          * Return the xfer page packet itself to the freelist
588          */
589         if (packet->xfer_page_pkt->count == 0) {
590                 fsend_receive_comp = true;
591                 transaction_id = packet->completion.recv.recv_completion_tid;
592                 list_add_tail(&packet->xfer_page_pkt->list_ent,
593                               &net_device->recv_pkt_list);
594
595         }
596
597         /* Put the packet back */
598         list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
599         spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
600
601         /* Send a receive completion for the xfer page packet */
602         if (fsend_receive_comp)
603                 netvsc_send_recv_completion(device, transaction_id);
604
605         put_net_device(device);
606 }
607
608 static void netvsc_receive(struct hv_device *device,
609                             struct vmpacket_descriptor *packet)
610 {
611         struct netvsc_device *net_device;
612         struct vmtransfer_page_packet_header *vmxferpage_packet;
613         struct nvsp_message *nvsp_packet;
614         struct hv_netvsc_packet *netvsc_packet = NULL;
615         unsigned long start;
616         unsigned long end, end_virtual;
617         /* struct netvsc_driver *netvscDriver; */
618         struct xferpage_packet *xferpage_packet = NULL;
619         int i, j;
620         int count = 0, bytes_remain = 0;
621         unsigned long flags;
622
623         LIST_HEAD(listHead);
624
625         net_device = get_inbound_net_device(device);
626         if (!net_device) {
627                 dev_err(&device->device, "unable to get net device..."
628                            "device being destroyed?");
629                 return;
630         }
631
632         /*
633          * All inbound packets other than send completion should be xfer page
634          * packet
635          */
636         if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
637                 dev_err(&device->device, "Unknown packet type received - %d",
638                            packet->type);
639                 put_net_device(device);
640                 return;
641         }
642
643         nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
644                         (packet->offset8 << 3));
645
646         /* Make sure this is a valid nvsp packet */
647         if (nvsp_packet->hdr.msg_type !=
648             NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
649                 dev_err(&device->device, "Unknown nvsp packet type received-"
650                         " %d", nvsp_packet->hdr.msg_type);
651                 put_net_device(device);
652                 return;
653         }
654
655         vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
656
657         if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
658                 dev_err(&device->device, "Invalid xfer page set id - "
659                            "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
660                            vmxferpage_packet->xfer_pageset_id);
661                 put_net_device(device);
662                 return;
663         }
664
665         /*
666          * Grab free packets (range count + 1) to represent this xfer
667          * page packet. +1 to represent the xfer page packet itself.
668          * We grab it here so that we know exactly how many we can
669          * fulfil
670          */
671         spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
672         while (!list_empty(&net_device->recv_pkt_list)) {
673                 list_move_tail(net_device->recv_pkt_list.next, &listHead);
674                 if (++count == vmxferpage_packet->range_cnt + 1)
675                         break;
676         }
677         spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
678
679         /*
680          * We need at least 2 netvsc pkts (1 to represent the xfer
681          * page and at least 1 for the range) i.e. we can handled
682          * some of the xfer page packet ranges...
683          */
684         if (count < 2) {
685                 dev_err(&device->device, "Got only %d netvsc pkt...needed "
686                         "%d pkts. Dropping this xfer page packet completely!",
687                         count, vmxferpage_packet->range_cnt + 1);
688
689                 /* Return it to the freelist */
690                 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
691                 for (i = count; i != 0; i--) {
692                         list_move_tail(listHead.next,
693                                        &net_device->recv_pkt_list);
694                 }
695                 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
696                                        flags);
697
698                 netvsc_send_recv_completion(device,
699                                             vmxferpage_packet->d.trans_id);
700
701                 put_net_device(device);
702                 return;
703         }
704
705         /* Remove the 1st packet to represent the xfer page packet itself */
706         xferpage_packet = (struct xferpage_packet *)listHead.next;
707         list_del(&xferpage_packet->list_ent);
708
709         /* This is how much we can satisfy */
710         xferpage_packet->count = count - 1;
711
712         if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
713                 dev_err(&device->device, "Needed %d netvsc pkts to satisy "
714                         "this xfer page...got %d",
715                         vmxferpage_packet->range_cnt, xferpage_packet->count);
716         }
717
718         /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
719         for (i = 0; i < (count - 1); i++) {
720                 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
721                 list_del(&netvsc_packet->list_ent);
722
723                 /* Initialize the netvsc packet */
724                 netvsc_packet->xfer_page_pkt = xferpage_packet;
725                 netvsc_packet->completion.recv.recv_completion =
726                                         netvsc_receive_completion;
727                 netvsc_packet->completion.recv.recv_completion_ctx =
728                                         netvsc_packet;
729                 netvsc_packet->device = device;
730                 /* Save this so that we can send it back */
731                 netvsc_packet->completion.recv.recv_completion_tid =
732                                         vmxferpage_packet->d.trans_id;
733
734                 netvsc_packet->total_data_buflen =
735                                         vmxferpage_packet->ranges[i].byte_count;
736                 netvsc_packet->page_buf_cnt = 1;
737
738                 netvsc_packet->page_buf[0].len =
739                                         vmxferpage_packet->ranges[i].byte_count;
740
741                 start = virt_to_phys((void *)((unsigned long)net_device->
742                 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
743
744                 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
745                 end_virtual = (unsigned long)net_device->recv_buf
746                     + vmxferpage_packet->ranges[i].byte_offset
747                     + vmxferpage_packet->ranges[i].byte_count - 1;
748                 end = virt_to_phys((void *)end_virtual);
749
750                 /* Calculate the page relative offset */
751                 netvsc_packet->page_buf[0].offset =
752                         vmxferpage_packet->ranges[i].byte_offset &
753                         (PAGE_SIZE - 1);
754                 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
755                         /* Handle frame across multiple pages: */
756                         netvsc_packet->page_buf[0].len =
757                                 (netvsc_packet->page_buf[0].pfn <<
758                                  PAGE_SHIFT)
759                                 + PAGE_SIZE - start;
760                         bytes_remain = netvsc_packet->total_data_buflen -
761                                         netvsc_packet->page_buf[0].len;
762                         for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
763                                 netvsc_packet->page_buf[j].offset = 0;
764                                 if (bytes_remain <= PAGE_SIZE) {
765                                         netvsc_packet->page_buf[j].len =
766                                                 bytes_remain;
767                                         bytes_remain = 0;
768                                 } else {
769                                         netvsc_packet->page_buf[j].len =
770                                                 PAGE_SIZE;
771                                         bytes_remain -= PAGE_SIZE;
772                                 }
773                                 netvsc_packet->page_buf[j].pfn =
774                                     virt_to_phys((void *)(end_virtual -
775                                                 bytes_remain)) >> PAGE_SHIFT;
776                                 netvsc_packet->page_buf_cnt++;
777                                 if (bytes_remain == 0)
778                                         break;
779                         }
780                 }
781
782                 /* Pass it to the upper layer */
783                 rndis_filter_receive(device, netvsc_packet);
784
785                 netvsc_receive_completion(netvsc_packet->
786                                 completion.recv.recv_completion_ctx);
787         }
788
789         put_net_device(device);
790 }
791
792 static void netvsc_channel_cb(void *context)
793 {
794         int ret;
795         struct hv_device *device = context;
796         struct netvsc_device *net_device;
797         u32 bytes_recvd;
798         u64 request_id;
799         unsigned char *packet;
800         struct vmpacket_descriptor *desc;
801         unsigned char *buffer;
802         int bufferlen = NETVSC_PACKET_SIZE;
803
804         packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
805                          GFP_ATOMIC);
806         if (!packet)
807                 return;
808         buffer = packet;
809
810         net_device = get_inbound_net_device(device);
811         if (!net_device) {
812                 dev_err(&device->device, "net device (%p) shutting down..."
813                            "ignoring inbound packets", net_device);
814                 goto out;
815         }
816
817         do {
818                 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
819                                            &bytes_recvd, &request_id);
820                 if (ret == 0) {
821                         if (bytes_recvd > 0) {
822                                 desc = (struct vmpacket_descriptor *)buffer;
823                                 switch (desc->type) {
824                                 case VM_PKT_COMP:
825                                         netvsc_send_completion(device, desc);
826                                         break;
827
828                                 case VM_PKT_DATA_USING_XFER_PAGES:
829                                         netvsc_receive(device, desc);
830                                         break;
831
832                                 default:
833                                         dev_err(&device->device,
834                                                    "unhandled packet type %d, "
835                                                    "tid %llx len %d\n",
836                                                    desc->type, request_id,
837                                                    bytes_recvd);
838                                         break;
839                                 }
840
841                                 /* reset */
842                                 if (bufferlen > NETVSC_PACKET_SIZE) {
843                                         kfree(buffer);
844                                         buffer = packet;
845                                         bufferlen = NETVSC_PACKET_SIZE;
846                                 }
847                         } else {
848                                 /* reset */
849                                 if (bufferlen > NETVSC_PACKET_SIZE) {
850                                         kfree(buffer);
851                                         buffer = packet;
852                                         bufferlen = NETVSC_PACKET_SIZE;
853                                 }
854
855                                 break;
856                         }
857                 } else if (ret == -ENOBUFS) {
858                         /* Handle large packet */
859                         buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
860                         if (buffer == NULL) {
861                                 /* Try again next time around */
862                                 dev_err(&device->device,
863                                            "unable to allocate buffer of size "
864                                            "(%d)!!", bytes_recvd);
865                                 break;
866                         }
867
868                         bufferlen = bytes_recvd;
869                 }
870         } while (1);
871
872         put_net_device(device);
873 out:
874         kfree(buffer);
875         return;
876 }
877
878 /*
879  * netvsc_device_add - Callback when the device belonging to this
880  * driver is added
881  */
882 int netvsc_device_add(struct hv_device *device, void *additional_info)
883 {
884         int ret = 0;
885         int i;
886         int ring_size =
887         ((struct netvsc_device_info *)additional_info)->ring_size;
888         struct netvsc_device *net_device;
889         struct hv_netvsc_packet *packet, *pos;
890
891         net_device = alloc_net_device(device);
892         if (!net_device) {
893                 ret = -ENOMEM;
894                 goto cleanup;
895         }
896
897         /* Initialize the NetVSC channel extension */
898         net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
899         spin_lock_init(&net_device->recv_pkt_list_lock);
900
901         INIT_LIST_HEAD(&net_device->recv_pkt_list);
902
903         for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
904                 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
905                                  (NETVSC_RECEIVE_SG_COUNT *
906                                   sizeof(struct hv_page_buffer)), GFP_KERNEL);
907                 if (!packet)
908                         break;
909
910                 list_add_tail(&packet->list_ent,
911                               &net_device->recv_pkt_list);
912         }
913         init_completion(&net_device->channel_init_wait);
914
915         /* Open the channel */
916         ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
917                          ring_size * PAGE_SIZE, NULL, 0,
918                          netvsc_channel_cb, device);
919
920         if (ret != 0) {
921                 dev_err(&device->device, "unable to open channel: %d", ret);
922                 goto cleanup;
923         }
924
925         /* Channel is opened */
926         pr_info("hv_netvsc channel opened successfully");
927
928         /* Connect with the NetVsp */
929         ret = netvsc_connect_vsp(device);
930         if (ret != 0) {
931                 dev_err(&device->device,
932                         "unable to connect to NetVSP - %d", ret);
933                 goto close;
934         }
935
936         return ret;
937
938 close:
939         /* Now, we can close the channel safely */
940         vmbus_close(device->channel);
941
942 cleanup:
943
944         if (net_device) {
945                 list_for_each_entry_safe(packet, pos,
946                                          &net_device->recv_pkt_list,
947                                          list_ent) {
948                         list_del(&packet->list_ent);
949                         kfree(packet);
950                 }
951
952                 kfree(net_device);
953         }
954
955         return ret;
956 }