]> Pileus Git - ~andy/linux/blob - drivers/staging/lustre/lustre/ptlrpc/service.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
[~andy/linux] / drivers / staging / lustre / lustre / ptlrpc / service.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #include <obd_support.h>
39 #include <obd_class.h>
40 #include <lustre_net.h>
41 #include <lu_object.h>
42 #include <linux/lnet/types.h>
43 #include "ptlrpc_internal.h"
44
45 /* The following are visible and mutable through /sys/module/ptlrpc */
46 int test_req_buffer_pressure = 0;
47 module_param(test_req_buffer_pressure, int, 0444);
48 MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
49 module_param(at_min, int, 0644);
50 MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)");
51 module_param(at_max, int, 0644);
52 MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)");
53 module_param(at_history, int, 0644);
54 MODULE_PARM_DESC(at_history,
55                  "Adaptive timeouts remember the slowest event that took place within this period (sec)");
56 module_param(at_early_margin, int, 0644);
57 MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
58 module_param(at_extra, int, 0644);
59 MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
60
61
62 /* forward ref */
63 static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
64 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
65 static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
66
67 /** Holds a list of all PTLRPC services */
68 LIST_HEAD(ptlrpc_all_services);
69 /** Used to protect the \e ptlrpc_all_services list */
70 struct mutex ptlrpc_all_services_mutex;
71
72 struct ptlrpc_request_buffer_desc *
73 ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
74 {
75         struct ptlrpc_service             *svc = svcpt->scp_service;
76         struct ptlrpc_request_buffer_desc *rqbd;
77
78         OBD_CPT_ALLOC_PTR(rqbd, svc->srv_cptable, svcpt->scp_cpt);
79         if (rqbd == NULL)
80                 return NULL;
81
82         rqbd->rqbd_svcpt = svcpt;
83         rqbd->rqbd_refcount = 0;
84         rqbd->rqbd_cbid.cbid_fn = request_in_callback;
85         rqbd->rqbd_cbid.cbid_arg = rqbd;
86         INIT_LIST_HEAD(&rqbd->rqbd_reqs);
87         OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable,
88                             svcpt->scp_cpt, svc->srv_buf_size);
89         if (rqbd->rqbd_buffer == NULL) {
90                 OBD_FREE_PTR(rqbd);
91                 return NULL;
92         }
93
94         spin_lock(&svcpt->scp_lock);
95         list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
96         svcpt->scp_nrqbds_total++;
97         spin_unlock(&svcpt->scp_lock);
98
99         return rqbd;
100 }
101
102 void
103 ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
104 {
105         struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
106
107         LASSERT(rqbd->rqbd_refcount == 0);
108         LASSERT(list_empty(&rqbd->rqbd_reqs));
109
110         spin_lock(&svcpt->scp_lock);
111         list_del(&rqbd->rqbd_list);
112         svcpt->scp_nrqbds_total--;
113         spin_unlock(&svcpt->scp_lock);
114
115         OBD_FREE_LARGE(rqbd->rqbd_buffer, svcpt->scp_service->srv_buf_size);
116         OBD_FREE_PTR(rqbd);
117 }
118
119 int
120 ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
121 {
122         struct ptlrpc_service             *svc = svcpt->scp_service;
123         struct ptlrpc_request_buffer_desc *rqbd;
124         int                             rc = 0;
125         int                             i;
126
127         if (svcpt->scp_rqbd_allocating)
128                 goto try_post;
129
130         spin_lock(&svcpt->scp_lock);
131         /* check again with lock */
132         if (svcpt->scp_rqbd_allocating) {
133                 /* NB: we might allow more than one thread in the future */
134                 LASSERT(svcpt->scp_rqbd_allocating == 1);
135                 spin_unlock(&svcpt->scp_lock);
136                 goto try_post;
137         }
138
139         svcpt->scp_rqbd_allocating++;
140         spin_unlock(&svcpt->scp_lock);
141
142
143         for (i = 0; i < svc->srv_nbuf_per_group; i++) {
144                 /* NB: another thread might have recycled enough rqbds, we
145                  * need to make sure it wouldn't over-allocate, see LU-1212. */
146                 if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
147                         break;
148
149                 rqbd = ptlrpc_alloc_rqbd(svcpt);
150
151                 if (rqbd == NULL) {
152                         CERROR("%s: Can't allocate request buffer\n",
153                                svc->srv_name);
154                         rc = -ENOMEM;
155                         break;
156                 }
157         }
158
159         spin_lock(&svcpt->scp_lock);
160
161         LASSERT(svcpt->scp_rqbd_allocating == 1);
162         svcpt->scp_rqbd_allocating--;
163
164         spin_unlock(&svcpt->scp_lock);
165
166         CDEBUG(D_RPCTRACE,
167                "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
168                svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted,
169                svcpt->scp_nrqbds_total, rc);
170
171  try_post:
172         if (post && rc == 0)
173                 rc = ptlrpc_server_post_idle_rqbds(svcpt);
174
175         return rc;
176 }
177
178 /**
179  * Part of Rep-Ack logic.
180  * Puts a lock and its mode into reply state assotiated to request reply.
181  */
182 void
183 ptlrpc_save_lock(struct ptlrpc_request *req,
184                  struct lustre_handle *lock, int mode, int no_ack)
185 {
186         struct ptlrpc_reply_state *rs = req->rq_reply_state;
187         int                     idx;
188
189         LASSERT(rs != NULL);
190         LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
191
192         if (req->rq_export->exp_disconnected) {
193                 ldlm_lock_decref(lock, mode);
194         } else {
195                 idx = rs->rs_nlocks++;
196                 rs->rs_locks[idx] = *lock;
197                 rs->rs_modes[idx] = mode;
198                 rs->rs_difficult = 1;
199                 rs->rs_no_ack = !!no_ack;
200         }
201 }
202 EXPORT_SYMBOL(ptlrpc_save_lock);
203
204
205 struct ptlrpc_hr_partition;
206
207 struct ptlrpc_hr_thread {
208         int                             hrt_id;         /* thread ID */
209         spinlock_t                      hrt_lock;
210         wait_queue_head_t                       hrt_waitq;
211         struct list_head                        hrt_queue;      /* RS queue */
212         struct ptlrpc_hr_partition      *hrt_partition;
213 };
214
215 struct ptlrpc_hr_partition {
216         /* # of started threads */
217         atomic_t                        hrp_nstarted;
218         /* # of stopped threads */
219         atomic_t                        hrp_nstopped;
220         /* cpu partition id */
221         int                             hrp_cpt;
222         /* round-robin rotor for choosing thread */
223         int                             hrp_rotor;
224         /* total number of threads on this partition */
225         int                             hrp_nthrs;
226         /* threads table */
227         struct ptlrpc_hr_thread         *hrp_thrs;
228 };
229
230 #define HRT_RUNNING 0
231 #define HRT_STOPPING 1
232
233 struct ptlrpc_hr_service {
234         /* CPU partition table, it's just cfs_cpt_table for now */
235         struct cfs_cpt_table            *hr_cpt_table;
236         /** controller sleep waitq */
237         wait_queue_head_t                       hr_waitq;
238         unsigned int                    hr_stopping;
239         /** roundrobin rotor for non-affinity service */
240         unsigned int                    hr_rotor;
241         /* partition data */
242         struct ptlrpc_hr_partition      **hr_partitions;
243 };
244
245 struct rs_batch {
246         struct list_head                        rsb_replies;
247         unsigned int                    rsb_n_replies;
248         struct ptlrpc_service_part      *rsb_svcpt;
249 };
250
251 /** reply handling service. */
252 static struct ptlrpc_hr_service         ptlrpc_hr;
253
254 /**
255  * maximum mumber of replies scheduled in one batch
256  */
257 #define MAX_SCHEDULED 256
258
259 /**
260  * Initialize a reply batch.
261  *
262  * \param b batch
263  */
264 static void rs_batch_init(struct rs_batch *b)
265 {
266         memset(b, 0, sizeof(*b));
267         INIT_LIST_HEAD(&b->rsb_replies);
268 }
269
270 /**
271  * Choose an hr thread to dispatch requests to.
272  */
273 static struct ptlrpc_hr_thread *
274 ptlrpc_hr_select(struct ptlrpc_service_part *svcpt)
275 {
276         struct ptlrpc_hr_partition      *hrp;
277         unsigned int                    rotor;
278
279         if (svcpt->scp_cpt >= 0 &&
280             svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) {
281                 /* directly match partition */
282                 hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt];
283
284         } else {
285                 rotor = ptlrpc_hr.hr_rotor++;
286                 rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table);
287
288                 hrp = ptlrpc_hr.hr_partitions[rotor];
289         }
290
291         rotor = hrp->hrp_rotor++;
292         return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs];
293 }
294
295 /**
296  * Dispatch all replies accumulated in the batch to one from
297  * dedicated reply handling threads.
298  *
299  * \param b batch
300  */
301 static void rs_batch_dispatch(struct rs_batch *b)
302 {
303         if (b->rsb_n_replies != 0) {
304                 struct ptlrpc_hr_thread *hrt;
305
306                 hrt = ptlrpc_hr_select(b->rsb_svcpt);
307
308                 spin_lock(&hrt->hrt_lock);
309                 list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
310                 spin_unlock(&hrt->hrt_lock);
311
312                 wake_up(&hrt->hrt_waitq);
313                 b->rsb_n_replies = 0;
314         }
315 }
316
317 /**
318  * Add a reply to a batch.
319  * Add one reply object to a batch, schedule batched replies if overload.
320  *
321  * \param b batch
322  * \param rs reply
323  */
324 static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
325 {
326         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
327
328         if (svcpt != b->rsb_svcpt || b->rsb_n_replies >= MAX_SCHEDULED) {
329                 if (b->rsb_svcpt != NULL) {
330                         rs_batch_dispatch(b);
331                         spin_unlock(&b->rsb_svcpt->scp_rep_lock);
332                 }
333                 spin_lock(&svcpt->scp_rep_lock);
334                 b->rsb_svcpt = svcpt;
335         }
336         spin_lock(&rs->rs_lock);
337         rs->rs_scheduled_ever = 1;
338         if (rs->rs_scheduled == 0) {
339                 list_move(&rs->rs_list, &b->rsb_replies);
340                 rs->rs_scheduled = 1;
341                 b->rsb_n_replies++;
342         }
343         rs->rs_committed = 1;
344         spin_unlock(&rs->rs_lock);
345 }
346
347 /**
348  * Reply batch finalization.
349  * Dispatch remaining replies from the batch
350  * and release remaining spinlock.
351  *
352  * \param b batch
353  */
354 static void rs_batch_fini(struct rs_batch *b)
355 {
356         if (b->rsb_svcpt != NULL) {
357                 rs_batch_dispatch(b);
358                 spin_unlock(&b->rsb_svcpt->scp_rep_lock);
359         }
360 }
361
362 #define DECLARE_RS_BATCH(b)     struct rs_batch b
363
364
365 /**
366  * Put reply state into a queue for processing because we received
367  * ACK from the client
368  */
369 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
370 {
371         struct ptlrpc_hr_thread *hrt;
372
373         LASSERT(list_empty(&rs->rs_list));
374
375         hrt = ptlrpc_hr_select(rs->rs_svcpt);
376
377         spin_lock(&hrt->hrt_lock);
378         list_add_tail(&rs->rs_list, &hrt->hrt_queue);
379         spin_unlock(&hrt->hrt_lock);
380
381         wake_up(&hrt->hrt_waitq);
382 }
383
384 void
385 ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
386 {
387         LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
388         LASSERT(spin_is_locked(&rs->rs_lock));
389         LASSERT(rs->rs_difficult);
390         rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
391
392         if (rs->rs_scheduled) {     /* being set up or already notified */
393                 return;
394         }
395
396         rs->rs_scheduled = 1;
397         list_del_init(&rs->rs_list);
398         ptlrpc_dispatch_difficult_reply(rs);
399 }
400 EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
401
402 void ptlrpc_commit_replies(struct obd_export *exp)
403 {
404         struct ptlrpc_reply_state *rs, *nxt;
405         DECLARE_RS_BATCH(batch);
406
407         rs_batch_init(&batch);
408         /* Find any replies that have been committed and get their service
409          * to attend to complete them. */
410
411         /* CAVEAT EMPTOR: spinlock ordering!!! */
412         spin_lock(&exp->exp_uncommitted_replies_lock);
413         list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
414                                      rs_obd_list) {
415                 LASSERT(rs->rs_difficult);
416                 /* VBR: per-export last_committed */
417                 LASSERT(rs->rs_export);
418                 if (rs->rs_transno <= exp->exp_last_committed) {
419                         list_del_init(&rs->rs_obd_list);
420                         rs_batch_add(&batch, rs);
421                 }
422         }
423         spin_unlock(&exp->exp_uncommitted_replies_lock);
424         rs_batch_fini(&batch);
425 }
426 EXPORT_SYMBOL(ptlrpc_commit_replies);
427
428 static int
429 ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
430 {
431         struct ptlrpc_request_buffer_desc *rqbd;
432         int                               rc;
433         int                               posted = 0;
434
435         for (;;) {
436                 spin_lock(&svcpt->scp_lock);
437
438                 if (list_empty(&svcpt->scp_rqbd_idle)) {
439                         spin_unlock(&svcpt->scp_lock);
440                         return posted;
441                 }
442
443                 rqbd = list_entry(svcpt->scp_rqbd_idle.next,
444                                       struct ptlrpc_request_buffer_desc,
445                                       rqbd_list);
446                 list_del(&rqbd->rqbd_list);
447
448                 /* assume we will post successfully */
449                 svcpt->scp_nrqbds_posted++;
450                 list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
451
452                 spin_unlock(&svcpt->scp_lock);
453
454                 rc = ptlrpc_register_rqbd(rqbd);
455                 if (rc != 0)
456                         break;
457
458                 posted = 1;
459         }
460
461         spin_lock(&svcpt->scp_lock);
462
463         svcpt->scp_nrqbds_posted--;
464         list_del(&rqbd->rqbd_list);
465         list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
466
467         /* Don't complain if no request buffers are posted right now; LNET
468          * won't drop requests because we set the portal lazy! */
469
470         spin_unlock(&svcpt->scp_lock);
471
472         return -1;
473 }
474
475 static void ptlrpc_at_timer(unsigned long castmeharder)
476 {
477         struct ptlrpc_service_part *svcpt;
478
479         svcpt = (struct ptlrpc_service_part *)castmeharder;
480
481         svcpt->scp_at_check = 1;
482         svcpt->scp_at_checktime = cfs_time_current();
483         wake_up(&svcpt->scp_waitq);
484 }
485
486 static void
487 ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
488                              struct ptlrpc_service_conf *conf)
489 {
490         struct ptlrpc_service_thr_conf  *tc = &conf->psc_thr;
491         unsigned                        init;
492         unsigned                        total;
493         unsigned                        nthrs;
494         int                             weight;
495
496         /*
497          * Common code for estimating & validating threads number.
498          * CPT affinity service could have percpt thread-pool instead
499          * of a global thread-pool, which means user might not always
500          * get the threads number they give it in conf::tc_nthrs_user
501          * even they did set. It's because we need to validate threads
502          * number for each CPT to guarantee each pool will have enough
503          * threads to keep the service healthy.
504          */
505         init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL);
506         init = max_t(int, init, tc->tc_nthrs_init);
507
508         /* NB: please see comments in lustre_lnet.h for definition
509          * details of these members */
510         LASSERT(tc->tc_nthrs_max != 0);
511
512         if (tc->tc_nthrs_user != 0) {
513                 /* In case there is a reason to test a service with many
514                  * threads, we give a less strict check here, it can
515                  * be up to 8 * nthrs_max */
516                 total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
517                 nthrs = total / svc->srv_ncpts;
518                 init  = max(init, nthrs);
519                 goto out;
520         }
521
522         total = tc->tc_nthrs_max;
523         if (tc->tc_nthrs_base == 0) {
524                 /* don't care about base threads number per partition,
525                  * this is most for non-affinity service */
526                 nthrs = total / svc->srv_ncpts;
527                 goto out;
528         }
529
530         nthrs = tc->tc_nthrs_base;
531         if (svc->srv_ncpts == 1) {
532                 int     i;
533
534                 /* NB: Increase the base number if it's single partition
535                  * and total number of cores/HTs is larger or equal to 4.
536                  * result will always < 2 * nthrs_base */
537                 weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
538                 for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
539                             (tc->tc_nthrs_base >> i) != 0; i++)
540                         nthrs += tc->tc_nthrs_base >> i;
541         }
542
543         if (tc->tc_thr_factor != 0) {
544                 int       factor = tc->tc_thr_factor;
545                 const int fade = 4;
546                 cpumask_t mask;
547
548                 /*
549                  * User wants to increase number of threads with for
550                  * each CPU core/HT, most likely the factor is larger then
551                  * one thread/core because service threads are supposed to
552                  * be blocked by lock or wait for IO.
553                  */
554                 /*
555                  * Amdahl's law says that adding processors wouldn't give
556                  * a linear increasing of parallelism, so it's nonsense to
557                  * have too many threads no matter how many cores/HTs
558                  * there are.
559                  */
560                 cpumask_copy(&mask, topology_thread_cpumask(0));
561                 if (cpus_weight(mask) > 1) { /* weight is # of HTs */
562                         /* depress thread factor for hyper-thread */
563                         factor = factor - (factor >> 1) + (factor >> 3);
564                 }
565
566                 weight = cfs_cpt_weight(svc->srv_cptable, 0);
567                 LASSERT(weight > 0);
568
569                 for (; factor > 0 && weight > 0; factor--, weight -= fade)
570                         nthrs += min(weight, fade) * factor;
571         }
572
573         if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
574                 nthrs = max(tc->tc_nthrs_base,
575                             tc->tc_nthrs_max / svc->srv_ncpts);
576         }
577  out:
578         nthrs = max(nthrs, tc->tc_nthrs_init);
579         svc->srv_nthrs_cpt_limit = nthrs;
580         svc->srv_nthrs_cpt_init = init;
581
582         if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
583                 CDEBUG(D_OTHER, "%s: This service may have more threads (%d) "
584                        "than the given soft limit (%d)\n",
585                        svc->srv_name, nthrs * svc->srv_ncpts,
586                        tc->tc_nthrs_max);
587         }
588 }
589
590 /**
591  * Initialize percpt data for a service
592  */
593 static int
594 ptlrpc_service_part_init(struct ptlrpc_service *svc,
595                          struct ptlrpc_service_part *svcpt, int cpt)
596 {
597         struct ptlrpc_at_array  *array;
598         int                     size;
599         int                     index;
600         int                     rc;
601
602         svcpt->scp_cpt = cpt;
603         INIT_LIST_HEAD(&svcpt->scp_threads);
604
605         /* rqbd and incoming request queue */
606         spin_lock_init(&svcpt->scp_lock);
607         INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
608         INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
609         INIT_LIST_HEAD(&svcpt->scp_req_incoming);
610         init_waitqueue_head(&svcpt->scp_waitq);
611         /* history request & rqbd list */
612         INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
613         INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
614
615         /* acitve requests and hp requests */
616         spin_lock_init(&svcpt->scp_req_lock);
617
618         /* reply states */
619         spin_lock_init(&svcpt->scp_rep_lock);
620         INIT_LIST_HEAD(&svcpt->scp_rep_active);
621         INIT_LIST_HEAD(&svcpt->scp_rep_idle);
622         init_waitqueue_head(&svcpt->scp_rep_waitq);
623         atomic_set(&svcpt->scp_nreps_difficult, 0);
624
625         /* adaptive timeout */
626         spin_lock_init(&svcpt->scp_at_lock);
627         array = &svcpt->scp_at_array;
628
629         size = at_est2timeout(at_max);
630         array->paa_size     = size;
631         array->paa_count    = 0;
632         array->paa_deadline = -1;
633
634         /* allocate memory for scp_at_array (ptlrpc_at_array) */
635         OBD_CPT_ALLOC(array->paa_reqs_array,
636                       svc->srv_cptable, cpt, sizeof(struct list_head) * size);
637         if (array->paa_reqs_array == NULL)
638                 return -ENOMEM;
639
640         for (index = 0; index < size; index++)
641                 INIT_LIST_HEAD(&array->paa_reqs_array[index]);
642
643         OBD_CPT_ALLOC(array->paa_reqs_count,
644                       svc->srv_cptable, cpt, sizeof(__u32) * size);
645         if (array->paa_reqs_count == NULL)
646                 goto failed;
647
648         cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt);
649         /* At SOW, service time should be quick; 10s seems generous. If client
650          * timeout is less than this, we'll be sending an early reply. */
651         at_init(&svcpt->scp_at_estimate, 10, 0);
652
653         /* assign this before call ptlrpc_grow_req_bufs */
654         svcpt->scp_service = svc;
655         /* Now allocate the request buffers, but don't post them now */
656         rc = ptlrpc_grow_req_bufs(svcpt, 0);
657         /* We shouldn't be under memory pressure at startup, so
658          * fail if we can't allocate all our buffers at this time. */
659         if (rc != 0)
660                 goto failed;
661
662         return 0;
663
664  failed:
665         if (array->paa_reqs_count != NULL) {
666                 OBD_FREE(array->paa_reqs_count, sizeof(__u32) * size);
667                 array->paa_reqs_count = NULL;
668         }
669
670         if (array->paa_reqs_array != NULL) {
671                 OBD_FREE(array->paa_reqs_array,
672                          sizeof(struct list_head) * array->paa_size);
673                 array->paa_reqs_array = NULL;
674         }
675
676         return -ENOMEM;
677 }
678
679 /**
680  * Initialize service on a given portal.
681  * This includes starting serving threads , allocating and posting rqbds and
682  * so on.
683  */
684 struct ptlrpc_service *
685 ptlrpc_register_service(struct ptlrpc_service_conf *conf,
686                         struct proc_dir_entry *proc_entry)
687 {
688         struct ptlrpc_service_cpt_conf  *cconf = &conf->psc_cpt;
689         struct ptlrpc_service           *service;
690         struct ptlrpc_service_part      *svcpt;
691         struct cfs_cpt_table            *cptable;
692         __u32                           *cpts = NULL;
693         int                             ncpts;
694         int                             cpt;
695         int                             rc;
696         int                             i;
697
698         LASSERT(conf->psc_buf.bc_nbufs > 0);
699         LASSERT(conf->psc_buf.bc_buf_size >=
700                 conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD);
701         LASSERT(conf->psc_thr.tc_ctx_tags != 0);
702
703         cptable = cconf->cc_cptable;
704         if (cptable == NULL)
705                 cptable = cfs_cpt_table;
706
707         if (!conf->psc_thr.tc_cpu_affinity) {
708                 ncpts = 1;
709         } else {
710                 ncpts = cfs_cpt_number(cptable);
711                 if (cconf->cc_pattern != NULL) {
712                         struct cfs_expr_list    *el;
713
714                         rc = cfs_expr_list_parse(cconf->cc_pattern,
715                                                  strlen(cconf->cc_pattern),
716                                                  0, ncpts - 1, &el);
717                         if (rc != 0) {
718                                 CERROR("%s: invalid CPT pattern string: %s",
719                                        conf->psc_name, cconf->cc_pattern);
720                                 return ERR_PTR(-EINVAL);
721                         }
722
723                         rc = cfs_expr_list_values(el, ncpts, &cpts);
724                         cfs_expr_list_free(el);
725                         if (rc <= 0) {
726                                 CERROR("%s: failed to parse CPT array %s: %d\n",
727                                        conf->psc_name, cconf->cc_pattern, rc);
728                                 if (cpts != NULL)
729                                         OBD_FREE(cpts, sizeof(*cpts) * ncpts);
730                                 return ERR_PTR(rc < 0 ? rc : -EINVAL);
731                         }
732                         ncpts = rc;
733                 }
734         }
735
736         OBD_ALLOC(service, offsetof(struct ptlrpc_service, srv_parts[ncpts]));
737         if (service == NULL) {
738                 if (cpts != NULL)
739                         OBD_FREE(cpts, sizeof(*cpts) * ncpts);
740                 return ERR_PTR(-ENOMEM);
741         }
742
743         service->srv_cptable            = cptable;
744         service->srv_cpts               = cpts;
745         service->srv_ncpts              = ncpts;
746
747         service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
748         while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
749                 service->srv_cpt_bits++;
750
751         /* public members */
752         spin_lock_init(&service->srv_lock);
753         service->srv_name               = conf->psc_name;
754         service->srv_watchdog_factor    = conf->psc_watchdog_factor;
755         INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
756
757         /* buffer configuration */
758         service->srv_nbuf_per_group     = test_req_buffer_pressure ?
759                                           1 : conf->psc_buf.bc_nbufs;
760         service->srv_max_req_size       = conf->psc_buf.bc_req_max_size +
761                                           SPTLRPC_MAX_PAYLOAD;
762         service->srv_buf_size           = conf->psc_buf.bc_buf_size;
763         service->srv_rep_portal         = conf->psc_buf.bc_rep_portal;
764         service->srv_req_portal         = conf->psc_buf.bc_req_portal;
765
766         /* Increase max reply size to next power of two */
767         service->srv_max_reply_size = 1;
768         while (service->srv_max_reply_size <
769                conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD)
770                 service->srv_max_reply_size <<= 1;
771
772         service->srv_thread_name        = conf->psc_thr.tc_thr_name;
773         service->srv_ctx_tags           = conf->psc_thr.tc_ctx_tags;
774         service->srv_hpreq_ratio        = PTLRPC_SVC_HP_RATIO;
775         service->srv_ops                = conf->psc_ops;
776
777         for (i = 0; i < ncpts; i++) {
778                 if (!conf->psc_thr.tc_cpu_affinity)
779                         cpt = CFS_CPT_ANY;
780                 else
781                         cpt = cpts != NULL ? cpts[i] : i;
782
783                 OBD_CPT_ALLOC(svcpt, cptable, cpt, sizeof(*svcpt));
784                 if (svcpt == NULL)
785                         GOTO(failed, rc = -ENOMEM);
786
787                 service->srv_parts[i] = svcpt;
788                 rc = ptlrpc_service_part_init(service, svcpt, cpt);
789                 if (rc != 0)
790                         GOTO(failed, rc);
791         }
792
793         ptlrpc_server_nthreads_check(service, conf);
794
795         rc = LNetSetLazyPortal(service->srv_req_portal);
796         LASSERT(rc == 0);
797
798         mutex_lock(&ptlrpc_all_services_mutex);
799         list_add(&service->srv_list, &ptlrpc_all_services);
800         mutex_unlock(&ptlrpc_all_services_mutex);
801
802         if (proc_entry != NULL)
803                 ptlrpc_lprocfs_register_service(proc_entry, service);
804
805         rc = ptlrpc_service_nrs_setup(service);
806         if (rc != 0)
807                 GOTO(failed, rc);
808
809         CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
810                service->srv_name, service->srv_req_portal);
811
812         rc = ptlrpc_start_threads(service);
813         if (rc != 0) {
814                 CERROR("Failed to start threads for service %s: %d\n",
815                        service->srv_name, rc);
816                 GOTO(failed, rc);
817         }
818
819         return service;
820 failed:
821         ptlrpc_unregister_service(service);
822         return ERR_PTR(rc);
823 }
824 EXPORT_SYMBOL(ptlrpc_register_service);
825
826 /**
827  * to actually free the request, must be called without holding svc_lock.
828  * note it's caller's responsibility to unlink req->rq_list.
829  */
830 static void ptlrpc_server_free_request(struct ptlrpc_request *req)
831 {
832         LASSERT(atomic_read(&req->rq_refcount) == 0);
833         LASSERT(list_empty(&req->rq_timed_list));
834
835          /* DEBUG_REQ() assumes the reply state of a request with a valid
836           * ref will not be destroyed until that reference is dropped. */
837         ptlrpc_req_drop_rs(req);
838
839         sptlrpc_svc_ctx_decref(req);
840
841         if (req != &req->rq_rqbd->rqbd_req) {
842                 /* NB request buffers use an embedded
843                  * req if the incoming req unlinked the
844                  * MD; this isn't one of them! */
845                 OBD_FREE(req, sizeof(*req));
846         }
847 }
848
849 /**
850  * drop a reference count of the request. if it reaches 0, we either
851  * put it into history list, or free it immediately.
852  */
853 void ptlrpc_server_drop_request(struct ptlrpc_request *req)
854 {
855         struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
856         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
857         struct ptlrpc_service             *svc = svcpt->scp_service;
858         int                             refcount;
859         struct list_head                        *tmp;
860         struct list_head                        *nxt;
861
862         if (!atomic_dec_and_test(&req->rq_refcount))
863                 return;
864
865         if (req->rq_at_linked) {
866                 spin_lock(&svcpt->scp_at_lock);
867                 /* recheck with lock, in case it's unlinked by
868                  * ptlrpc_at_check_timed() */
869                 if (likely(req->rq_at_linked))
870                         ptlrpc_at_remove_timed(req);
871                 spin_unlock(&svcpt->scp_at_lock);
872         }
873
874         LASSERT(list_empty(&req->rq_timed_list));
875
876         /* finalize request */
877         if (req->rq_export) {
878                 class_export_put(req->rq_export);
879                 req->rq_export = NULL;
880         }
881
882         spin_lock(&svcpt->scp_lock);
883
884         list_add(&req->rq_list, &rqbd->rqbd_reqs);
885
886         refcount = --(rqbd->rqbd_refcount);
887         if (refcount == 0) {
888                 /* request buffer is now idle: add to history */
889                 list_del(&rqbd->rqbd_list);
890
891                 list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
892                 svcpt->scp_hist_nrqbds++;
893
894                 /* cull some history?
895                  * I expect only about 1 or 2 rqbds need to be recycled here */
896                 while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
897                         rqbd = list_entry(svcpt->scp_hist_rqbds.next,
898                                               struct ptlrpc_request_buffer_desc,
899                                               rqbd_list);
900
901                         list_del(&rqbd->rqbd_list);
902                         svcpt->scp_hist_nrqbds--;
903
904                         /* remove rqbd's reqs from svc's req history while
905                          * I've got the service lock */
906                         list_for_each(tmp, &rqbd->rqbd_reqs) {
907                                 req = list_entry(tmp, struct ptlrpc_request,
908                                                      rq_list);
909                                 /* Track the highest culled req seq */
910                                 if (req->rq_history_seq >
911                                     svcpt->scp_hist_seq_culled) {
912                                         svcpt->scp_hist_seq_culled =
913                                                 req->rq_history_seq;
914                                 }
915                                 list_del(&req->rq_history_list);
916                         }
917
918                         spin_unlock(&svcpt->scp_lock);
919
920                         list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
921                                 req = list_entry(rqbd->rqbd_reqs.next,
922                                                      struct ptlrpc_request,
923                                                      rq_list);
924                                 list_del(&req->rq_list);
925                                 ptlrpc_server_free_request(req);
926                         }
927
928                         spin_lock(&svcpt->scp_lock);
929                         /*
930                          * now all reqs including the embedded req has been
931                          * disposed, schedule request buffer for re-use.
932                          */
933                         LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) ==
934                                 0);
935                         list_add_tail(&rqbd->rqbd_list,
936                                           &svcpt->scp_rqbd_idle);
937                 }
938
939                 spin_unlock(&svcpt->scp_lock);
940         } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
941                 /* If we are low on memory, we are not interested in history */
942                 list_del(&req->rq_list);
943                 list_del_init(&req->rq_history_list);
944
945                 /* Track the highest culled req seq */
946                 if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
947                         svcpt->scp_hist_seq_culled = req->rq_history_seq;
948
949                 spin_unlock(&svcpt->scp_lock);
950
951                 ptlrpc_server_free_request(req);
952         } else {
953                 spin_unlock(&svcpt->scp_lock);
954         }
955 }
956
957 /** Change request export and move hp request from old export to new */
958 void ptlrpc_request_change_export(struct ptlrpc_request *req,
959                                   struct obd_export *export)
960 {
961         if (req->rq_export != NULL) {
962                 if (!list_empty(&req->rq_exp_list)) {
963                         /* remove rq_exp_list from last export */
964                         spin_lock_bh(&req->rq_export->exp_rpc_lock);
965                         list_del_init(&req->rq_exp_list);
966                         spin_unlock_bh(&req->rq_export->exp_rpc_lock);
967
968                         /* export has one reference already, so it`s safe to
969                          * add req to export queue here and get another
970                          * reference for request later */
971                         spin_lock_bh(&export->exp_rpc_lock);
972                         list_add(&req->rq_exp_list, &export->exp_hp_rpcs);
973                         spin_unlock_bh(&export->exp_rpc_lock);
974                 }
975                 class_export_rpc_dec(req->rq_export);
976                 class_export_put(req->rq_export);
977         }
978
979         /* request takes one export refcount */
980         req->rq_export = class_export_get(export);
981         class_export_rpc_inc(export);
982
983         return;
984 }
985
986 /**
987  * to finish a request: stop sending more early replies, and release
988  * the request.
989  */
990 static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
991                                          struct ptlrpc_request *req)
992 {
993         ptlrpc_server_hpreq_fini(req);
994
995         ptlrpc_server_drop_request(req);
996 }
997
998 /**
999  * to finish a active request: stop sending more early replies, and release
1000  * the request. should be called after we finished handling the request.
1001  */
1002 static void ptlrpc_server_finish_active_request(
1003                                         struct ptlrpc_service_part *svcpt,
1004                                         struct ptlrpc_request *req)
1005 {
1006         spin_lock(&svcpt->scp_req_lock);
1007         ptlrpc_nrs_req_stop_nolock(req);
1008         svcpt->scp_nreqs_active--;
1009         if (req->rq_hp)
1010                 svcpt->scp_nhreqs_active--;
1011         spin_unlock(&svcpt->scp_req_lock);
1012
1013         ptlrpc_nrs_req_finalize(req);
1014
1015         if (req->rq_export != NULL)
1016                 class_export_rpc_dec(req->rq_export);
1017
1018         ptlrpc_server_finish_request(svcpt, req);
1019 }
1020
1021 /**
1022  * This function makes sure dead exports are evicted in a timely manner.
1023  * This function is only called when some export receives a message (i.e.,
1024  * the network is up.)
1025  */
1026 static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
1027 {
1028         struct obd_export *oldest_exp;
1029         time_t oldest_time, new_time;
1030
1031         LASSERT(exp);
1032
1033         /* Compensate for slow machines, etc, by faking our request time
1034            into the future.  Although this can break the strict time-ordering
1035            of the list, we can be really lazy here - we don't have to evict
1036            at the exact right moment.  Eventually, all silent exports
1037            will make it to the top of the list. */
1038
1039         /* Do not pay attention on 1sec or smaller renewals. */
1040         new_time = cfs_time_current_sec() + extra_delay;
1041         if (exp->exp_last_request_time + 1 /*second */ >= new_time)
1042                 return;
1043
1044         exp->exp_last_request_time = new_time;
1045         CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n",
1046                exp->exp_client_uuid.uuid,
1047                exp->exp_last_request_time, exp);
1048
1049         /* exports may get disconnected from the chain even though the
1050            export has references, so we must keep the spin lock while
1051            manipulating the lists */
1052         spin_lock(&exp->exp_obd->obd_dev_lock);
1053
1054         if (list_empty(&exp->exp_obd_chain_timed)) {
1055                 /* this one is not timed */
1056                 spin_unlock(&exp->exp_obd->obd_dev_lock);
1057                 return;
1058         }
1059
1060         list_move_tail(&exp->exp_obd_chain_timed,
1061                            &exp->exp_obd->obd_exports_timed);
1062
1063         oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next,
1064                                     struct obd_export, exp_obd_chain_timed);
1065         oldest_time = oldest_exp->exp_last_request_time;
1066         spin_unlock(&exp->exp_obd->obd_dev_lock);
1067
1068         if (exp->exp_obd->obd_recovering) {
1069                 /* be nice to everyone during recovery */
1070                 return;
1071         }
1072
1073         /* Note - racing to start/reset the obd_eviction timer is safe */
1074         if (exp->exp_obd->obd_eviction_timer == 0) {
1075                 /* Check if the oldest entry is expired. */
1076                 if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
1077                                               extra_delay)) {
1078                         /* We need a second timer, in case the net was down and
1079                          * it just came back. Since the pinger may skip every
1080                          * other PING_INTERVAL (see note in ptlrpc_pinger_main),
1081                          * we better wait for 3. */
1082                         exp->exp_obd->obd_eviction_timer =
1083                                 cfs_time_current_sec() + 3 * PING_INTERVAL;
1084                         CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
1085                                exp->exp_obd->obd_name,
1086                                obd_export_nid2str(oldest_exp), oldest_time);
1087                 }
1088         } else {
1089                 if (cfs_time_current_sec() >
1090                     (exp->exp_obd->obd_eviction_timer + extra_delay)) {
1091                         /* The evictor won't evict anyone who we've heard from
1092                          * recently, so we don't have to check before we start
1093                          * it. */
1094                         if (!ping_evictor_wake(exp))
1095                                 exp->exp_obd->obd_eviction_timer = 0;
1096                 }
1097         }
1098 }
1099
1100 /**
1101  * Sanity check request \a req.
1102  * Return 0 if all is ok, error code otherwise.
1103  */
1104 static int ptlrpc_check_req(struct ptlrpc_request *req)
1105 {
1106         int rc = 0;
1107
1108         if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
1109                      req->rq_export->exp_conn_cnt)) {
1110                 DEBUG_REQ(D_RPCTRACE, req,
1111                           "DROPPING req from old connection %d < %d",
1112                           lustre_msg_get_conn_cnt(req->rq_reqmsg),
1113                           req->rq_export->exp_conn_cnt);
1114                 return -EEXIST;
1115         }
1116         if (unlikely(req->rq_export->exp_obd &&
1117                      req->rq_export->exp_obd->obd_fail)) {
1118                 /*
1119                  * Failing over, don't handle any more reqs, send
1120                  * error response instead.
1121                  */
1122                 CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
1123                        req, req->rq_export->exp_obd->obd_name);
1124                 rc = -ENODEV;
1125         } else if (lustre_msg_get_flags(req->rq_reqmsg) &
1126                    (MSG_REPLAY | MSG_REQ_REPLAY_DONE) &&
1127                    !(req->rq_export->exp_obd->obd_recovering)) {
1128                         DEBUG_REQ(D_ERROR, req,
1129                                   "Invalid replay without recovery");
1130                         class_fail_export(req->rq_export);
1131                         rc = -ENODEV;
1132         } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
1133                    !(req->rq_export->exp_obd->obd_recovering)) {
1134                         DEBUG_REQ(D_ERROR, req, "Invalid req with transno "
1135                                   LPU64" without recovery",
1136                                   lustre_msg_get_transno(req->rq_reqmsg));
1137                         class_fail_export(req->rq_export);
1138                         rc = -ENODEV;
1139         }
1140
1141         if (unlikely(rc < 0)) {
1142                 req->rq_status = rc;
1143                 ptlrpc_error(req);
1144         }
1145         return rc;
1146 }
1147
1148 static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
1149 {
1150         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1151         __s32 next;
1152
1153         if (array->paa_count == 0) {
1154                 cfs_timer_disarm(&svcpt->scp_at_timer);
1155                 return;
1156         }
1157
1158         /* Set timer for closest deadline */
1159         next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
1160                        at_early_margin);
1161         if (next <= 0) {
1162                 ptlrpc_at_timer((unsigned long)svcpt);
1163         } else {
1164                 cfs_timer_arm(&svcpt->scp_at_timer, cfs_time_shift(next));
1165                 CDEBUG(D_INFO, "armed %s at %+ds\n",
1166                        svcpt->scp_service->srv_name, next);
1167         }
1168 }
1169
1170 /* Add rpc to early reply check list */
1171 static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
1172 {
1173         struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1174         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1175         struct ptlrpc_request *rq = NULL;
1176         __u32 index;
1177
1178         if (AT_OFF)
1179                 return(0);
1180
1181         if (req->rq_no_reply)
1182                 return 0;
1183
1184         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
1185                 return(-ENOSYS);
1186
1187         spin_lock(&svcpt->scp_at_lock);
1188         LASSERT(list_empty(&req->rq_timed_list));
1189
1190         index = (unsigned long)req->rq_deadline % array->paa_size;
1191         if (array->paa_reqs_count[index] > 0) {
1192                 /* latest rpcs will have the latest deadlines in the list,
1193                  * so search backward. */
1194                 list_for_each_entry_reverse(rq,
1195                                                 &array->paa_reqs_array[index],
1196                                                 rq_timed_list) {
1197                         if (req->rq_deadline >= rq->rq_deadline) {
1198                                 list_add(&req->rq_timed_list,
1199                                              &rq->rq_timed_list);
1200                                 break;
1201                         }
1202                 }
1203         }
1204
1205         /* Add the request at the head of the list */
1206         if (list_empty(&req->rq_timed_list))
1207                 list_add(&req->rq_timed_list,
1208                              &array->paa_reqs_array[index]);
1209
1210         spin_lock(&req->rq_lock);
1211         req->rq_at_linked = 1;
1212         spin_unlock(&req->rq_lock);
1213         req->rq_at_index = index;
1214         array->paa_reqs_count[index]++;
1215         array->paa_count++;
1216         if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
1217                 array->paa_deadline = req->rq_deadline;
1218                 ptlrpc_at_set_timer(svcpt);
1219         }
1220         spin_unlock(&svcpt->scp_at_lock);
1221
1222         return 0;
1223 }
1224
1225 static void
1226 ptlrpc_at_remove_timed(struct ptlrpc_request *req)
1227 {
1228         struct ptlrpc_at_array *array;
1229
1230         array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
1231
1232         /* NB: must call with hold svcpt::scp_at_lock */
1233         LASSERT(!list_empty(&req->rq_timed_list));
1234         list_del_init(&req->rq_timed_list);
1235
1236         spin_lock(&req->rq_lock);
1237         req->rq_at_linked = 0;
1238         spin_unlock(&req->rq_lock);
1239
1240         array->paa_reqs_count[req->rq_at_index]--;
1241         array->paa_count--;
1242 }
1243
1244 static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
1245 {
1246         struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
1247         struct ptlrpc_request *reqcopy;
1248         struct lustre_msg *reqmsg;
1249         cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
1250         time_t newdl;
1251         int rc;
1252
1253         /* deadline is when the client expects us to reply, margin is the
1254            difference between clients' and servers' expectations */
1255         DEBUG_REQ(D_ADAPTTO, req,
1256                   "%ssending early reply (deadline %+lds, margin %+lds) for "
1257                   "%d+%d", AT_OFF ? "AT off - not " : "",
1258                   olddl, olddl - at_get(&svcpt->scp_at_estimate),
1259                   at_get(&svcpt->scp_at_estimate), at_extra);
1260
1261         if (AT_OFF)
1262                 return 0;
1263
1264         if (olddl < 0) {
1265                 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
1266                           "not sending early reply. Consider increasing "
1267                           "at_early_margin (%d)?", olddl, at_early_margin);
1268
1269                 /* Return an error so we're not re-added to the timed list. */
1270                 return -ETIMEDOUT;
1271         }
1272
1273         if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1274                 DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
1275                           "but no AT support");
1276                 return -ENOSYS;
1277         }
1278
1279         if (req->rq_export &&
1280             lustre_msg_get_flags(req->rq_reqmsg) &
1281             (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
1282                 /* During recovery, we don't want to send too many early
1283                  * replies, but on the other hand we want to make sure the
1284                  * client has enough time to resend if the rpc is lost. So
1285                  * during the recovery period send at least 4 early replies,
1286                  * spacing them every at_extra if we can. at_estimate should
1287                  * always equal this fixed value during recovery. */
1288                 at_measured(&svcpt->scp_at_estimate, min(at_extra,
1289                             req->rq_export->exp_obd->obd_recovery_timeout / 4));
1290         } else {
1291                 /* Fake our processing time into the future to ask the clients
1292                  * for some extra amount of time */
1293                 at_measured(&svcpt->scp_at_estimate, at_extra +
1294                             cfs_time_current_sec() -
1295                             req->rq_arrival_time.tv_sec);
1296
1297                 /* Check to see if we've actually increased the deadline -
1298                  * we may be past adaptive_max */
1299                 if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
1300                     at_get(&svcpt->scp_at_estimate)) {
1301                         DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
1302                                   "(%ld/%ld), not sending early reply\n",
1303                                   olddl, req->rq_arrival_time.tv_sec +
1304                                   at_get(&svcpt->scp_at_estimate) -
1305                                   cfs_time_current_sec());
1306                         return -ETIMEDOUT;
1307                 }
1308         }
1309         newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
1310
1311         OBD_ALLOC(reqcopy, sizeof(*reqcopy));
1312         if (reqcopy == NULL)
1313                 return -ENOMEM;
1314         OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
1315         if (!reqmsg) {
1316                 OBD_FREE(reqcopy, sizeof(*reqcopy));
1317                 return -ENOMEM;
1318         }
1319
1320         *reqcopy = *req;
1321         reqcopy->rq_reply_state = NULL;
1322         reqcopy->rq_rep_swab_mask = 0;
1323         reqcopy->rq_pack_bulk = 0;
1324         reqcopy->rq_pack_udesc = 0;
1325         reqcopy->rq_packed_final = 0;
1326         sptlrpc_svc_ctx_addref(reqcopy);
1327         /* We only need the reqmsg for the magic */
1328         reqcopy->rq_reqmsg = reqmsg;
1329         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
1330
1331         LASSERT(atomic_read(&req->rq_refcount));
1332         /** if it is last refcount then early reply isn't needed */
1333         if (atomic_read(&req->rq_refcount) == 1) {
1334                 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
1335                           "abort sending early reply\n");
1336                 GOTO(out, rc = -EINVAL);
1337         }
1338
1339         /* Connection ref */
1340         reqcopy->rq_export = class_conn2export(
1341                                      lustre_msg_get_handle(reqcopy->rq_reqmsg));
1342         if (reqcopy->rq_export == NULL)
1343                 GOTO(out, rc = -ENODEV);
1344
1345         /* RPC ref */
1346         class_export_rpc_inc(reqcopy->rq_export);
1347         if (reqcopy->rq_export->exp_obd &&
1348             reqcopy->rq_export->exp_obd->obd_fail)
1349                 GOTO(out_put, rc = -ENODEV);
1350
1351         rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
1352         if (rc)
1353                 GOTO(out_put, rc);
1354
1355         rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
1356
1357         if (!rc) {
1358                 /* Adjust our own deadline to what we told the client */
1359                 req->rq_deadline = newdl;
1360                 req->rq_early_count++; /* number sent, server side */
1361         } else {
1362                 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
1363         }
1364
1365         /* Free the (early) reply state from lustre_pack_reply.
1366            (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
1367         ptlrpc_req_drop_rs(reqcopy);
1368
1369 out_put:
1370         class_export_rpc_dec(reqcopy->rq_export);
1371         class_export_put(reqcopy->rq_export);
1372 out:
1373         sptlrpc_svc_ctx_decref(reqcopy);
1374         OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
1375         OBD_FREE(reqcopy, sizeof(*reqcopy));
1376         return rc;
1377 }
1378
1379 /* Send early replies to everybody expiring within at_early_margin
1380    asking for at_extra time */
1381 static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
1382 {
1383         struct ptlrpc_at_array *array = &svcpt->scp_at_array;
1384         struct ptlrpc_request *rq, *n;
1385         struct list_head work_list;
1386         __u32  index, count;
1387         time_t deadline;
1388         time_t now = cfs_time_current_sec();
1389         cfs_duration_t delay;
1390         int first, counter = 0;
1391
1392         spin_lock(&svcpt->scp_at_lock);
1393         if (svcpt->scp_at_check == 0) {
1394                 spin_unlock(&svcpt->scp_at_lock);
1395                 return 0;
1396         }
1397         delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
1398         svcpt->scp_at_check = 0;
1399
1400         if (array->paa_count == 0) {
1401                 spin_unlock(&svcpt->scp_at_lock);
1402                 return 0;
1403         }
1404
1405         /* The timer went off, but maybe the nearest rpc already completed. */
1406         first = array->paa_deadline - now;
1407         if (first > at_early_margin) {
1408                 /* We've still got plenty of time.  Reset the timer. */
1409                 ptlrpc_at_set_timer(svcpt);
1410                 spin_unlock(&svcpt->scp_at_lock);
1411                 return 0;
1412         }
1413
1414         /* We're close to a timeout, and we don't know how much longer the
1415            server will take. Send early replies to everyone expiring soon. */
1416         INIT_LIST_HEAD(&work_list);
1417         deadline = -1;
1418         index = (unsigned long)array->paa_deadline % array->paa_size;
1419         count = array->paa_count;
1420         while (count > 0) {
1421                 count -= array->paa_reqs_count[index];
1422                 list_for_each_entry_safe(rq, n,
1423                                              &array->paa_reqs_array[index],
1424                                              rq_timed_list) {
1425                         if (rq->rq_deadline > now + at_early_margin) {
1426                                 /* update the earliest deadline */
1427                                 if (deadline == -1 ||
1428                                     rq->rq_deadline < deadline)
1429                                         deadline = rq->rq_deadline;
1430                                 break;
1431                         }
1432
1433                         ptlrpc_at_remove_timed(rq);
1434                         /**
1435                          * ptlrpc_server_drop_request() may drop
1436                          * refcount to 0 already. Let's check this and
1437                          * don't add entry to work_list
1438                          */
1439                         if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
1440                                 list_add(&rq->rq_timed_list, &work_list);
1441                         counter++;
1442                 }
1443
1444                 if (++index >= array->paa_size)
1445                         index = 0;
1446         }
1447         array->paa_deadline = deadline;
1448         /* we have a new earliest deadline, restart the timer */
1449         ptlrpc_at_set_timer(svcpt);
1450
1451         spin_unlock(&svcpt->scp_at_lock);
1452
1453         CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
1454                "replies\n", first, at_extra, counter);
1455         if (first < 0) {
1456                 /* We're already past request deadlines before we even get a
1457                    chance to send early replies */
1458                 LCONSOLE_WARN("%s: This server is not able to keep up with "
1459                               "request traffic (cpu-bound).\n",
1460                               svcpt->scp_service->srv_name);
1461                 CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
1462                       "delay="CFS_DURATION_T"(jiff)\n",
1463                       counter, svcpt->scp_nreqs_incoming,
1464                       svcpt->scp_nreqs_active,
1465                       at_get(&svcpt->scp_at_estimate), delay);
1466         }
1467
1468         /* we took additional refcount so entries can't be deleted from list, no
1469          * locking is needed */
1470         while (!list_empty(&work_list)) {
1471                 rq = list_entry(work_list.next, struct ptlrpc_request,
1472                                     rq_timed_list);
1473                 list_del_init(&rq->rq_timed_list);
1474
1475                 if (ptlrpc_at_send_early_reply(rq) == 0)
1476                         ptlrpc_at_add_timed(rq);
1477
1478                 ptlrpc_server_drop_request(rq);
1479         }
1480
1481         return 1; /* return "did_something" for liblustre */
1482 }
1483
1484 /**
1485  * Put the request to the export list if the request may become
1486  * a high priority one.
1487  */
1488 static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
1489                                     struct ptlrpc_request *req)
1490 {
1491         int rc = 0;
1492
1493         if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
1494                 rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
1495                 if (rc < 0)
1496                         return rc;
1497                 LASSERT(rc == 0);
1498         }
1499         if (req->rq_export && req->rq_ops) {
1500                 /* Perform request specific check. We should do this check
1501                  * before the request is added into exp_hp_rpcs list otherwise
1502                  * it may hit swab race at LU-1044. */
1503                 if (req->rq_ops->hpreq_check) {
1504                         rc = req->rq_ops->hpreq_check(req);
1505                         /**
1506                          * XXX: Out of all current
1507                          * ptlrpc_hpreq_ops::hpreq_check(), only
1508                          * ldlm_cancel_hpreq_check() can return an error code;
1509                          * other functions assert in similar places, which seems
1510                          * odd. What also does not seem right is that handlers
1511                          * for those RPCs do not assert on the same checks, but
1512                          * rather handle the error cases. e.g. see
1513                          * ost_rw_hpreq_check(), and ost_brw_read(),
1514                          * ost_brw_write().
1515                          */
1516                         if (rc < 0)
1517                                 return rc;
1518                         LASSERT(rc == 0 || rc == 1);
1519                 }
1520
1521                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
1522                 list_add(&req->rq_exp_list,
1523                              &req->rq_export->exp_hp_rpcs);
1524                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1525         }
1526
1527         ptlrpc_nrs_req_initialize(svcpt, req, rc);
1528
1529         return rc;
1530 }
1531
1532 /** Remove the request from the export list. */
1533 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
1534 {
1535         if (req->rq_export && req->rq_ops) {
1536                 /* refresh lock timeout again so that client has more
1537                  * room to send lock cancel RPC. */
1538                 if (req->rq_ops->hpreq_fini)
1539                         req->rq_ops->hpreq_fini(req);
1540
1541                 spin_lock_bh(&req->rq_export->exp_rpc_lock);
1542                 list_del_init(&req->rq_exp_list);
1543                 spin_unlock_bh(&req->rq_export->exp_rpc_lock);
1544         }
1545 }
1546
1547 static int ptlrpc_hpreq_check(struct ptlrpc_request *req)
1548 {
1549         return 1;
1550 }
1551
1552 static struct ptlrpc_hpreq_ops ptlrpc_hpreq_common = {
1553         .hpreq_check       = ptlrpc_hpreq_check,
1554 };
1555
1556 /* Hi-Priority RPC check by RPC operation code. */
1557 int ptlrpc_hpreq_handler(struct ptlrpc_request *req)
1558 {
1559         int opc = lustre_msg_get_opc(req->rq_reqmsg);
1560
1561         /* Check for export to let only reconnects for not yet evicted
1562          * export to become a HP rpc. */
1563         if ((req->rq_export != NULL) &&
1564             (opc == OBD_PING || opc == MDS_CONNECT || opc == OST_CONNECT))
1565                 req->rq_ops = &ptlrpc_hpreq_common;
1566
1567         return 0;
1568 }
1569 EXPORT_SYMBOL(ptlrpc_hpreq_handler);
1570
1571 static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
1572                                      struct ptlrpc_request *req)
1573 {
1574         int     rc;
1575
1576         rc = ptlrpc_server_hpreq_init(svcpt, req);
1577         if (rc < 0)
1578                 return rc;
1579
1580         ptlrpc_nrs_req_add(svcpt, req, !!rc);
1581
1582         return 0;
1583 }
1584
1585 /**
1586  * Allow to handle high priority request
1587  * User can call it w/o any lock but need to hold
1588  * ptlrpc_service_part::scp_req_lock to get reliable result
1589  */
1590 static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
1591                                      bool force)
1592 {
1593         int running = svcpt->scp_nthrs_running;
1594
1595         if (!nrs_svcpt_has_hp(svcpt))
1596                 return false;
1597
1598         if (force)
1599                 return true;
1600
1601         if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
1602                      CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
1603                 /* leave just 1 thread for normal RPCs */
1604                 running = PTLRPC_NTHRS_INIT;
1605                 if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
1606                         running += 1;
1607         }
1608
1609         if (svcpt->scp_nreqs_active >= running - 1)
1610                 return false;
1611
1612         if (svcpt->scp_nhreqs_active == 0)
1613                 return true;
1614
1615         return !ptlrpc_nrs_req_pending_nolock(svcpt, false) ||
1616                svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
1617 }
1618
1619 static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
1620                                        bool force)
1621 {
1622         return ptlrpc_server_allow_high(svcpt, force) &&
1623                ptlrpc_nrs_req_pending_nolock(svcpt, true);
1624 }
1625
1626 /**
1627  * Only allow normal priority requests on a service that has a high-priority
1628  * queue if forced (i.e. cleanup), if there are other high priority requests
1629  * already being processed (i.e. those threads can service more high-priority
1630  * requests), or if there are enough idle threads that a later thread can do
1631  * a high priority request.
1632  * User can call it w/o any lock but need to hold
1633  * ptlrpc_service_part::scp_req_lock to get reliable result
1634  */
1635 static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
1636                                        bool force)
1637 {
1638         int running = svcpt->scp_nthrs_running;
1639         if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
1640                      CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
1641                 /* leave just 1 thread for normal RPCs */
1642                 running = PTLRPC_NTHRS_INIT;
1643                 if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
1644                         running += 1;
1645         }
1646
1647         if (force ||
1648             svcpt->scp_nreqs_active < running - 2)
1649                 return true;
1650
1651         if (svcpt->scp_nreqs_active >= running - 1)
1652                 return false;
1653
1654         return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt);
1655 }
1656
1657 static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
1658                                          bool force)
1659 {
1660         return ptlrpc_server_allow_normal(svcpt, force) &&
1661                ptlrpc_nrs_req_pending_nolock(svcpt, false);
1662 }
1663
1664 /**
1665  * Returns true if there are requests available in incoming
1666  * request queue for processing and it is allowed to fetch them.
1667  * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
1668  * to get reliable result
1669  * \see ptlrpc_server_allow_normal
1670  * \see ptlrpc_server_allow high
1671  */
1672 static inline bool
1673 ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force)
1674 {
1675         return ptlrpc_server_high_pending(svcpt, force) ||
1676                ptlrpc_server_normal_pending(svcpt, force);
1677 }
1678
1679 /**
1680  * Fetch a request for processing from queue of unprocessed requests.
1681  * Favors high-priority requests.
1682  * Returns a pointer to fetched request.
1683  */
1684 static struct ptlrpc_request *
1685 ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
1686 {
1687         struct ptlrpc_request *req = NULL;
1688
1689         spin_lock(&svcpt->scp_req_lock);
1690
1691         if (ptlrpc_server_high_pending(svcpt, force)) {
1692                 req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
1693                 if (req != NULL) {
1694                         svcpt->scp_hreq_count++;
1695                         goto got_request;
1696                 }
1697         }
1698
1699         if (ptlrpc_server_normal_pending(svcpt, force)) {
1700                 req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
1701                 if (req != NULL) {
1702                         svcpt->scp_hreq_count = 0;
1703                         goto got_request;
1704                 }
1705         }
1706
1707         spin_unlock(&svcpt->scp_req_lock);
1708         return NULL;
1709
1710 got_request:
1711         svcpt->scp_nreqs_active++;
1712         if (req->rq_hp)
1713                 svcpt->scp_nhreqs_active++;
1714
1715         spin_unlock(&svcpt->scp_req_lock);
1716
1717         if (likely(req->rq_export))
1718                 class_export_rpc_inc(req->rq_export);
1719
1720         return req;
1721 }
1722
1723 /**
1724  * Handle freshly incoming reqs, add to timed early reply list,
1725  * pass on to regular request queue.
1726  * All incoming requests pass through here before getting into
1727  * ptlrpc_server_handle_req later on.
1728  */
1729 static int
1730 ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
1731                             struct ptlrpc_thread *thread)
1732 {
1733         struct ptlrpc_service   *svc = svcpt->scp_service;
1734         struct ptlrpc_request   *req;
1735         __u32                   deadline;
1736         int                     rc;
1737
1738         spin_lock(&svcpt->scp_lock);
1739         if (list_empty(&svcpt->scp_req_incoming)) {
1740                 spin_unlock(&svcpt->scp_lock);
1741                 return 0;
1742         }
1743
1744         req = list_entry(svcpt->scp_req_incoming.next,
1745                              struct ptlrpc_request, rq_list);
1746         list_del_init(&req->rq_list);
1747         svcpt->scp_nreqs_incoming--;
1748         /* Consider this still a "queued" request as far as stats are
1749          * concerned */
1750         spin_unlock(&svcpt->scp_lock);
1751
1752         /* go through security check/transform */
1753         rc = sptlrpc_svc_unwrap_request(req);
1754         switch (rc) {
1755         case SECSVC_OK:
1756                 break;
1757         case SECSVC_COMPLETE:
1758                 target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
1759                 goto err_req;
1760         case SECSVC_DROP:
1761                 goto err_req;
1762         default:
1763                 LBUG();
1764         }
1765
1766         /*
1767          * for null-flavored rpc, msg has been unpacked by sptlrpc, although
1768          * redo it wouldn't be harmful.
1769          */
1770         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
1771                 rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
1772                 if (rc != 0) {
1773                         CERROR("error unpacking request: ptl %d from %s "
1774                                "x"LPU64"\n", svc->srv_req_portal,
1775                                libcfs_id2str(req->rq_peer), req->rq_xid);
1776                         goto err_req;
1777                 }
1778         }
1779
1780         rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
1781         if (rc) {
1782                 CERROR("error unpacking ptlrpc body: ptl %d from %s x"
1783                        LPU64"\n", svc->srv_req_portal,
1784                        libcfs_id2str(req->rq_peer), req->rq_xid);
1785                 goto err_req;
1786         }
1787
1788         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
1789             lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
1790                 CERROR("drop incoming rpc opc %u, x"LPU64"\n",
1791                        cfs_fail_val, req->rq_xid);
1792                 goto err_req;
1793         }
1794
1795         rc = -EINVAL;
1796         if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
1797                 CERROR("wrong packet type received (type=%u) from %s\n",
1798                        lustre_msg_get_type(req->rq_reqmsg),
1799                        libcfs_id2str(req->rq_peer));
1800                 goto err_req;
1801         }
1802
1803         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1804         case MDS_WRITEPAGE:
1805         case OST_WRITE:
1806                 req->rq_bulk_write = 1;
1807                 break;
1808         case MDS_READPAGE:
1809         case OST_READ:
1810         case MGS_CONFIG_READ:
1811                 req->rq_bulk_read = 1;
1812                 break;
1813         }
1814
1815         CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid);
1816
1817         req->rq_export = class_conn2export(
1818                 lustre_msg_get_handle(req->rq_reqmsg));
1819         if (req->rq_export) {
1820                 rc = ptlrpc_check_req(req);
1821                 if (rc == 0) {
1822                         rc = sptlrpc_target_export_check(req->rq_export, req);
1823                         if (rc)
1824                                 DEBUG_REQ(D_ERROR, req, "DROPPING req with "
1825                                           "illegal security flavor,");
1826                 }
1827
1828                 if (rc)
1829                         goto err_req;
1830                 ptlrpc_update_export_timer(req->rq_export, 0);
1831         }
1832
1833         /* req_in handling should/must be fast */
1834         if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
1835                 DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
1836                           cfs_time_sub(cfs_time_current_sec(),
1837                                        req->rq_arrival_time.tv_sec));
1838
1839         /* Set rpc server deadline and add it to the timed list */
1840         deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
1841                     MSGHDR_AT_SUPPORT) ?
1842                    /* The max time the client expects us to take */
1843                    lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
1844         req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
1845         if (unlikely(deadline == 0)) {
1846                 DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
1847                 goto err_req;
1848         }
1849
1850         req->rq_svc_thread = thread;
1851
1852         ptlrpc_at_add_timed(req);
1853
1854         /* Move it over to the request processing queue */
1855         rc = ptlrpc_server_request_add(svcpt, req);
1856         if (rc)
1857                 GOTO(err_req, rc);
1858
1859         wake_up(&svcpt->scp_waitq);
1860         return 1;
1861
1862 err_req:
1863         ptlrpc_server_finish_request(svcpt, req);
1864
1865         return 1;
1866 }
1867
1868 /**
1869  * Main incoming request handling logic.
1870  * Calls handler function from service to do actual processing.
1871  */
1872 static int
1873 ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
1874                              struct ptlrpc_thread *thread)
1875 {
1876         struct ptlrpc_service *svc = svcpt->scp_service;
1877         struct ptlrpc_request *request;
1878         struct timeval   work_start;
1879         struct timeval   work_end;
1880         long               timediff;
1881         int                 rc;
1882         int                 fail_opc = 0;
1883
1884         request = ptlrpc_server_request_get(svcpt, false);
1885         if (request == NULL)
1886                 return 0;
1887
1888         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
1889                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
1890         else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
1891                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
1892
1893         if (unlikely(fail_opc)) {
1894                 if (request->rq_export && request->rq_ops)
1895                         OBD_FAIL_TIMEOUT(fail_opc, 4);
1896         }
1897
1898         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
1899
1900         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
1901                 libcfs_debug_dumplog();
1902
1903         do_gettimeofday(&work_start);
1904         timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
1905         if (likely(svc->srv_stats != NULL)) {
1906                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
1907                                     timediff);
1908                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
1909                                     svcpt->scp_nreqs_incoming);
1910                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
1911                                     svcpt->scp_nreqs_active);
1912                 lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
1913                                     at_get(&svcpt->scp_at_estimate));
1914         }
1915
1916         rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
1917         if (rc) {
1918                 CERROR("Failure to initialize session: %d\n", rc);
1919                 goto out_req;
1920         }
1921         request->rq_session.lc_thread = thread;
1922         request->rq_session.lc_cookie = 0x5;
1923         lu_context_enter(&request->rq_session);
1924
1925         CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
1926
1927         request->rq_svc_thread = thread;
1928         if (thread)
1929                 request->rq_svc_thread->t_env->le_ses = &request->rq_session;
1930
1931         if (likely(request->rq_export)) {
1932                 if (unlikely(ptlrpc_check_req(request)))
1933                         goto put_conn;
1934                 ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
1935         }
1936
1937         /* Discard requests queued for longer than the deadline.
1938            The deadline is increased if we send an early reply. */
1939         if (cfs_time_current_sec() > request->rq_deadline) {
1940                 DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
1941                           ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
1942                           libcfs_id2str(request->rq_peer),
1943                           cfs_time_sub(request->rq_deadline,
1944                           request->rq_arrival_time.tv_sec),
1945                           cfs_time_sub(cfs_time_current_sec(),
1946                           request->rq_deadline));
1947                 goto put_conn;
1948         }
1949
1950         CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
1951                "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
1952                (request->rq_export ?
1953                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
1954                (request->rq_export ?
1955                 atomic_read(&request->rq_export->exp_refcount) : -99),
1956                lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
1957                libcfs_id2str(request->rq_peer),
1958                lustre_msg_get_opc(request->rq_reqmsg));
1959
1960         if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
1961                 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
1962
1963         rc = svc->srv_ops.so_req_handler(request);
1964
1965         ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
1966
1967 put_conn:
1968         lu_context_exit(&request->rq_session);
1969         lu_context_fini(&request->rq_session);
1970
1971         if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
1972                 DEBUG_REQ(D_WARNING, request,
1973                           "Request took longer than estimated ("
1974                                 CFS_DURATION_T":"CFS_DURATION_T
1975                                 "s); client may timeout.",
1976                           cfs_time_sub(request->rq_deadline,
1977                                        request->rq_arrival_time.tv_sec),
1978                           cfs_time_sub(cfs_time_current_sec(),
1979                                        request->rq_deadline));
1980         }
1981
1982         do_gettimeofday(&work_end);
1983         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1984         CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
1985                "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
1986                "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
1987                 current_comm(),
1988                 (request->rq_export ?
1989                  (char *)request->rq_export->exp_client_uuid.uuid : "0"),
1990                 (request->rq_export ?
1991                  atomic_read(&request->rq_export->exp_refcount) : -99),
1992                 lustre_msg_get_status(request->rq_reqmsg),
1993                 request->rq_xid,
1994                 libcfs_id2str(request->rq_peer),
1995                 lustre_msg_get_opc(request->rq_reqmsg),
1996                 timediff,
1997                 cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
1998                 (request->rq_repmsg ?
1999                  lustre_msg_get_transno(request->rq_repmsg) :
2000                  request->rq_transno),
2001                 request->rq_status,
2002                 (request->rq_repmsg ?
2003                  lustre_msg_get_status(request->rq_repmsg) : -999));
2004         if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
2005                 __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
2006                 int opc = opcode_offset(op);
2007                 if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
2008                         LASSERT(opc < LUSTRE_MAX_OPCODES);
2009                         lprocfs_counter_add(svc->srv_stats,
2010                                             opc + EXTRA_MAX_OPCODES,
2011                                             timediff);
2012                 }
2013         }
2014         if (unlikely(request->rq_early_count)) {
2015                 DEBUG_REQ(D_ADAPTTO, request,
2016                           "sent %d early replies before finishing in "
2017                           CFS_DURATION_T"s",
2018                           request->rq_early_count,
2019                           cfs_time_sub(work_end.tv_sec,
2020                           request->rq_arrival_time.tv_sec));
2021         }
2022
2023 out_req:
2024         ptlrpc_server_finish_active_request(svcpt, request);
2025
2026         return 1;
2027 }
2028
2029 /**
2030  * An internal function to process a single reply state object.
2031  */
2032 static int
2033 ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
2034 {
2035         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
2036         struct ptlrpc_service     *svc = svcpt->scp_service;
2037         struct obd_export        *exp;
2038         int                     nlocks;
2039         int                     been_handled;
2040
2041         exp = rs->rs_export;
2042
2043         LASSERT(rs->rs_difficult);
2044         LASSERT(rs->rs_scheduled);
2045         LASSERT(list_empty(&rs->rs_list));
2046
2047         spin_lock(&exp->exp_lock);
2048         /* Noop if removed already */
2049         list_del_init(&rs->rs_exp_list);
2050         spin_unlock(&exp->exp_lock);
2051
2052         /* The disk commit callback holds exp_uncommitted_replies_lock while it
2053          * iterates over newly committed replies, removing them from
2054          * exp_uncommitted_replies.  It then drops this lock and schedules the
2055          * replies it found for handling here.
2056          *
2057          * We can avoid contention for exp_uncommitted_replies_lock between the
2058          * HRT threads and further commit callbacks by checking rs_committed
2059          * which is set in the commit callback while it holds both
2060          * rs_lock and exp_uncommitted_reples.
2061          *
2062          * If we see rs_committed clear, the commit callback _may_ not have
2063          * handled this reply yet and we race with it to grab
2064          * exp_uncommitted_replies_lock before removing the reply from
2065          * exp_uncommitted_replies.  Note that if we lose the race and the
2066          * reply has already been removed, list_del_init() is a noop.
2067          *
2068          * If we see rs_committed set, we know the commit callback is handling,
2069          * or has handled this reply since store reordering might allow us to
2070          * see rs_committed set out of sequence.  But since this is done
2071          * holding rs_lock, we can be sure it has all completed once we hold
2072          * rs_lock, which we do right next.
2073          */
2074         if (!rs->rs_committed) {
2075                 spin_lock(&exp->exp_uncommitted_replies_lock);
2076                 list_del_init(&rs->rs_obd_list);
2077                 spin_unlock(&exp->exp_uncommitted_replies_lock);
2078         }
2079
2080         spin_lock(&rs->rs_lock);
2081
2082         been_handled = rs->rs_handled;
2083         rs->rs_handled = 1;
2084
2085         nlocks = rs->rs_nlocks;          /* atomic "steal", but */
2086         rs->rs_nlocks = 0;                    /* locks still on rs_locks! */
2087
2088         if (nlocks == 0 && !been_handled) {
2089                 /* If we see this, we should already have seen the warning
2090                  * in mds_steal_ack_locks()  */
2091                 CDEBUG(D_HA, "All locks stolen from rs %p x"LPD64".t"LPD64
2092                        " o%d NID %s\n",
2093                        rs,
2094                        rs->rs_xid, rs->rs_transno, rs->rs_opc,
2095                        libcfs_nid2str(exp->exp_connection->c_peer.nid));
2096         }
2097
2098         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
2099                 spin_unlock(&rs->rs_lock);
2100
2101                 if (!been_handled && rs->rs_on_net) {
2102                         LNetMDUnlink(rs->rs_md_h);
2103                         /* Ignore return code; we're racing with completion */
2104                 }
2105
2106                 while (nlocks-- > 0)
2107                         ldlm_lock_decref(&rs->rs_locks[nlocks],
2108                                          rs->rs_modes[nlocks]);
2109
2110                 spin_lock(&rs->rs_lock);
2111         }
2112
2113         rs->rs_scheduled = 0;
2114
2115         if (!rs->rs_on_net) {
2116                 /* Off the net */
2117                 spin_unlock(&rs->rs_lock);
2118
2119                 class_export_put(exp);
2120                 rs->rs_export = NULL;
2121                 ptlrpc_rs_decref(rs);
2122                 if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
2123                     svc->srv_is_stopping)
2124                         wake_up_all(&svcpt->scp_waitq);
2125                 return 1;
2126         }
2127
2128         /* still on the net; callback will schedule */
2129         spin_unlock(&rs->rs_lock);
2130         return 1;
2131 }
2132
2133
2134 static void
2135 ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
2136 {
2137         int avail = svcpt->scp_nrqbds_posted;
2138         int low_water = test_req_buffer_pressure ? 0 :
2139                         svcpt->scp_service->srv_nbuf_per_group / 2;
2140
2141         /* NB I'm not locking; just looking. */
2142
2143         /* CAVEAT EMPTOR: We might be allocating buffers here because we've
2144          * allowed the request history to grow out of control.  We could put a
2145          * sanity check on that here and cull some history if we need the
2146          * space. */
2147
2148         if (avail <= low_water)
2149                 ptlrpc_grow_req_bufs(svcpt, 1);
2150
2151         if (svcpt->scp_service->srv_stats) {
2152                 lprocfs_counter_add(svcpt->scp_service->srv_stats,
2153                                     PTLRPC_REQBUF_AVAIL_CNTR, avail);
2154         }
2155 }
2156
2157 static int
2158 ptlrpc_retry_rqbds(void *arg)
2159 {
2160         struct ptlrpc_service_part *svcpt = (struct ptlrpc_service_part *)arg;
2161
2162         svcpt->scp_rqbd_timeout = 0;
2163         return -ETIMEDOUT;
2164 }
2165
2166 static inline int
2167 ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt)
2168 {
2169         return svcpt->scp_nreqs_active <
2170                svcpt->scp_nthrs_running - 1 -
2171                (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL);
2172 }
2173
2174 /**
2175  * allowed to create more threads
2176  * user can call it w/o any lock but need to hold
2177  * ptlrpc_service_part::scp_lock to get reliable result
2178  */
2179 static inline int
2180 ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt)
2181 {
2182         return svcpt->scp_nthrs_running +
2183                svcpt->scp_nthrs_starting <
2184                svcpt->scp_service->srv_nthrs_cpt_limit;
2185 }
2186
2187 /**
2188  * too many requests and allowed to create more threads
2189  */
2190 static inline int
2191 ptlrpc_threads_need_create(struct ptlrpc_service_part *svcpt)
2192 {
2193         return !ptlrpc_threads_enough(svcpt) &&
2194                 ptlrpc_threads_increasable(svcpt);
2195 }
2196
2197 static inline int
2198 ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
2199 {
2200         return thread_is_stopping(thread) ||
2201                thread->t_svcpt->scp_service->srv_is_stopping;
2202 }
2203
2204 static inline int
2205 ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
2206 {
2207         return !list_empty(&svcpt->scp_rqbd_idle) &&
2208                svcpt->scp_rqbd_timeout == 0;
2209 }
2210
2211 static inline int
2212 ptlrpc_at_check(struct ptlrpc_service_part *svcpt)
2213 {
2214         return svcpt->scp_at_check;
2215 }
2216
2217 /**
2218  * requests wait on preprocessing
2219  * user can call it w/o any lock but need to hold
2220  * ptlrpc_service_part::scp_lock to get reliable result
2221  */
2222 static inline int
2223 ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
2224 {
2225         return !list_empty(&svcpt->scp_req_incoming);
2226 }
2227
2228 static __attribute__((__noinline__)) int
2229 ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
2230                   struct ptlrpc_thread *thread)
2231 {
2232         /* Don't exit while there are replies to be handled */
2233         struct l_wait_info lwi = LWI_TIMEOUT(svcpt->scp_rqbd_timeout,
2234                                              ptlrpc_retry_rqbds, svcpt);
2235
2236         /* XXX: Add this back when libcfs watchdog is merged upstream
2237         lc_watchdog_disable(thread->t_watchdog);
2238          */
2239
2240         cond_resched();
2241
2242         l_wait_event_exclusive_head(svcpt->scp_waitq,
2243                                 ptlrpc_thread_stopping(thread) ||
2244                                 ptlrpc_server_request_incoming(svcpt) ||
2245                                 ptlrpc_server_request_pending(svcpt, false) ||
2246                                 ptlrpc_rqbd_pending(svcpt) ||
2247                                 ptlrpc_at_check(svcpt), &lwi);
2248
2249         if (ptlrpc_thread_stopping(thread))
2250                 return -EINTR;
2251
2252         /*
2253         lc_watchdog_touch(thread->t_watchdog,
2254                           ptlrpc_server_get_timeout(svcpt));
2255          */
2256         return 0;
2257 }
2258
2259 /**
2260  * Main thread body for service threads.
2261  * Waits in a loop waiting for new requests to process to appear.
2262  * Every time an incoming requests is added to its queue, a waitq
2263  * is woken up and one of the threads will handle it.
2264  */
2265 static int ptlrpc_main(void *arg)
2266 {
2267         struct ptlrpc_thread            *thread = (struct ptlrpc_thread *)arg;
2268         struct ptlrpc_service_part      *svcpt = thread->t_svcpt;
2269         struct ptlrpc_service           *svc = svcpt->scp_service;
2270         struct ptlrpc_reply_state       *rs;
2271 #ifdef WITH_GROUP_INFO
2272         struct group_info *ginfo = NULL;
2273 #endif
2274         struct lu_env *env;
2275         int counter = 0, rc = 0;
2276
2277         thread->t_pid = current_pid();
2278         unshare_fs_struct();
2279
2280         /* NB: we will call cfs_cpt_bind() for all threads, because we
2281          * might want to run lustre server only on a subset of system CPUs,
2282          * in that case ->scp_cpt is CFS_CPT_ANY */
2283         rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
2284         if (rc != 0) {
2285                 CWARN("%s: failed to bind %s on CPT %d\n",
2286                       svc->srv_name, thread->t_name, svcpt->scp_cpt);
2287         }
2288
2289 #ifdef WITH_GROUP_INFO
2290         ginfo = groups_alloc(0);
2291         if (!ginfo) {
2292                 rc = -ENOMEM;
2293                 goto out;
2294         }
2295
2296         set_current_groups(ginfo);
2297         put_group_info(ginfo);
2298 #endif
2299
2300         if (svc->srv_ops.so_thr_init != NULL) {
2301                 rc = svc->srv_ops.so_thr_init(thread);
2302                 if (rc)
2303                         goto out;
2304         }
2305
2306         OBD_ALLOC_PTR(env);
2307         if (env == NULL) {
2308                 rc = -ENOMEM;
2309                 goto out_srv_fini;
2310         }
2311
2312         rc = lu_context_init(&env->le_ctx,
2313                              svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
2314         if (rc)
2315                 goto out_srv_fini;
2316
2317         thread->t_env = env;
2318         env->le_ctx.lc_thread = thread;
2319         env->le_ctx.lc_cookie = 0x6;
2320
2321         while (!list_empty(&svcpt->scp_rqbd_idle)) {
2322                 rc = ptlrpc_server_post_idle_rqbds(svcpt);
2323                 if (rc >= 0)
2324                         continue;
2325
2326                 CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
2327                         svc->srv_name, svcpt->scp_cpt, rc);
2328                 goto out_srv_fini;
2329         }
2330
2331         /* Alloc reply state structure for this one */
2332         OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
2333         if (!rs) {
2334                 rc = -ENOMEM;
2335                 goto out_srv_fini;
2336         }
2337
2338         spin_lock(&svcpt->scp_lock);
2339
2340         LASSERT(thread_is_starting(thread));
2341         thread_clear_flags(thread, SVC_STARTING);
2342
2343         LASSERT(svcpt->scp_nthrs_starting == 1);
2344         svcpt->scp_nthrs_starting--;
2345
2346         /* SVC_STOPPING may already be set here if someone else is trying
2347          * to stop the service while this new thread has been dynamically
2348          * forked. We still set SVC_RUNNING to let our creator know that
2349          * we are now running, however we will exit as soon as possible */
2350         thread_add_flags(thread, SVC_RUNNING);
2351         svcpt->scp_nthrs_running++;
2352         spin_unlock(&svcpt->scp_lock);
2353
2354         /* wake up our creator in case he's still waiting. */
2355         wake_up(&thread->t_ctl_waitq);
2356
2357         /*
2358         thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
2359                                              NULL, NULL);
2360          */
2361
2362         spin_lock(&svcpt->scp_rep_lock);
2363         list_add(&rs->rs_list, &svcpt->scp_rep_idle);
2364         wake_up(&svcpt->scp_rep_waitq);
2365         spin_unlock(&svcpt->scp_rep_lock);
2366
2367         CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
2368                svcpt->scp_nthrs_running);
2369
2370         /* XXX maintain a list of all managed devices: insert here */
2371         while (!ptlrpc_thread_stopping(thread)) {
2372                 if (ptlrpc_wait_event(svcpt, thread))
2373                         break;
2374
2375                 ptlrpc_check_rqbd_pool(svcpt);
2376
2377                 if (ptlrpc_threads_need_create(svcpt)) {
2378                         /* Ignore return code - we tried... */
2379                         ptlrpc_start_thread(svcpt, 0);
2380                 }
2381
2382                 /* Process all incoming reqs before handling any */
2383                 if (ptlrpc_server_request_incoming(svcpt)) {
2384                         lu_context_enter(&env->le_ctx);
2385                         env->le_ses = NULL;
2386                         ptlrpc_server_handle_req_in(svcpt, thread);
2387                         lu_context_exit(&env->le_ctx);
2388
2389                         /* but limit ourselves in case of flood */
2390                         if (counter++ < 100)
2391                                 continue;
2392                         counter = 0;
2393                 }
2394
2395                 if (ptlrpc_at_check(svcpt))
2396                         ptlrpc_at_check_timed(svcpt);
2397
2398                 if (ptlrpc_server_request_pending(svcpt, false)) {
2399                         lu_context_enter(&env->le_ctx);
2400                         ptlrpc_server_handle_request(svcpt, thread);
2401                         lu_context_exit(&env->le_ctx);
2402                 }
2403
2404                 if (ptlrpc_rqbd_pending(svcpt) &&
2405                     ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
2406                         /* I just failed to repost request buffers.
2407                          * Wait for a timeout (unless something else
2408                          * happens) before I try again */
2409                         svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
2410                         CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
2411                                svcpt->scp_nrqbds_posted);
2412                 }
2413         }
2414
2415         /*
2416         lc_watchdog_delete(thread->t_watchdog);
2417         thread->t_watchdog = NULL;
2418         */
2419
2420 out_srv_fini:
2421         /*
2422          * deconstruct service specific state created by ptlrpc_start_thread()
2423          */
2424         if (svc->srv_ops.so_thr_done != NULL)
2425                 svc->srv_ops.so_thr_done(thread);
2426
2427         if (env != NULL) {
2428                 lu_context_fini(&env->le_ctx);
2429                 OBD_FREE_PTR(env);
2430         }
2431 out:
2432         CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
2433                thread, thread->t_pid, thread->t_id, rc);
2434
2435         spin_lock(&svcpt->scp_lock);
2436         if (thread_test_and_clear_flags(thread, SVC_STARTING))
2437                 svcpt->scp_nthrs_starting--;
2438
2439         if (thread_test_and_clear_flags(thread, SVC_RUNNING)) {
2440                 /* must know immediately */
2441                 svcpt->scp_nthrs_running--;
2442         }
2443
2444         thread->t_id = rc;
2445         thread_add_flags(thread, SVC_STOPPED);
2446
2447         wake_up(&thread->t_ctl_waitq);
2448         spin_unlock(&svcpt->scp_lock);
2449
2450         return rc;
2451 }
2452
2453 static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
2454                           struct list_head *replies)
2455 {
2456         int result;
2457
2458         spin_lock(&hrt->hrt_lock);
2459
2460         list_splice_init(&hrt->hrt_queue, replies);
2461         result = ptlrpc_hr.hr_stopping || !list_empty(replies);
2462
2463         spin_unlock(&hrt->hrt_lock);
2464         return result;
2465 }
2466
2467 /**
2468  * Main body of "handle reply" function.
2469  * It processes acked reply states
2470  */
2471 static int ptlrpc_hr_main(void *arg)
2472 {
2473         struct ptlrpc_hr_thread         *hrt = (struct ptlrpc_hr_thread *)arg;
2474         struct ptlrpc_hr_partition      *hrp = hrt->hrt_partition;
2475         LIST_HEAD                       (replies);
2476         char                            threadname[20];
2477         int                             rc;
2478
2479         snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
2480                  hrp->hrp_cpt, hrt->hrt_id);
2481         unshare_fs_struct();
2482
2483         rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
2484         if (rc != 0) {
2485                 CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
2486                       threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
2487         }
2488
2489         atomic_inc(&hrp->hrp_nstarted);
2490         wake_up(&ptlrpc_hr.hr_waitq);
2491
2492         while (!ptlrpc_hr.hr_stopping) {
2493                 l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
2494
2495                 while (!list_empty(&replies)) {
2496                         struct ptlrpc_reply_state *rs;
2497
2498                         rs = list_entry(replies.prev,
2499                                             struct ptlrpc_reply_state,
2500                                             rs_list);
2501                         list_del_init(&rs->rs_list);
2502                         ptlrpc_handle_rs(rs);
2503                 }
2504         }
2505
2506         atomic_inc(&hrp->hrp_nstopped);
2507         wake_up(&ptlrpc_hr.hr_waitq);
2508
2509         return 0;
2510 }
2511
2512 static void ptlrpc_stop_hr_threads(void)
2513 {
2514         struct ptlrpc_hr_partition      *hrp;
2515         int                             i;
2516         int                             j;
2517
2518         ptlrpc_hr.hr_stopping = 1;
2519
2520         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2521                 if (hrp->hrp_thrs == NULL)
2522                         continue; /* uninitialized */
2523                 for (j = 0; j < hrp->hrp_nthrs; j++)
2524                         wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
2525         }
2526
2527         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2528                 if (hrp->hrp_thrs == NULL)
2529                         continue; /* uninitialized */
2530                 wait_event(ptlrpc_hr.hr_waitq,
2531                                atomic_read(&hrp->hrp_nstopped) ==
2532                                atomic_read(&hrp->hrp_nstarted));
2533         }
2534 }
2535
2536 static int ptlrpc_start_hr_threads(void)
2537 {
2538         struct ptlrpc_hr_partition      *hrp;
2539         int                             i;
2540         int                             j;
2541
2542         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2543                 int     rc = 0;
2544
2545                 for (j = 0; j < hrp->hrp_nthrs; j++) {
2546                         struct  ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
2547                         rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
2548                                                  &hrp->hrp_thrs[j],
2549                                                  "ptlrpc_hr%02d_%03d",
2550                                                  hrp->hrp_cpt,
2551                                                  hrt->hrt_id));
2552                         if (IS_ERR_VALUE(rc))
2553                                 break;
2554                 }
2555                 wait_event(ptlrpc_hr.hr_waitq,
2556                                atomic_read(&hrp->hrp_nstarted) == j);
2557                 if (!IS_ERR_VALUE(rc))
2558                         continue;
2559
2560                 CERROR("Reply handling thread %d:%d Failed on starting: "
2561                        "rc = %d\n", i, j, rc);
2562                 ptlrpc_stop_hr_threads();
2563                 return rc;
2564         }
2565         return 0;
2566 }
2567
2568 static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
2569 {
2570         struct l_wait_info      lwi = { 0 };
2571         struct ptlrpc_thread    *thread;
2572         LIST_HEAD               (zombie);
2573
2574         CDEBUG(D_INFO, "Stopping threads for service %s\n",
2575                svcpt->scp_service->srv_name);
2576
2577         spin_lock(&svcpt->scp_lock);
2578         /* let the thread know that we would like it to stop asap */
2579         list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
2580                 CDEBUG(D_INFO, "Stopping thread %s #%u\n",
2581                        svcpt->scp_service->srv_thread_name, thread->t_id);
2582                 thread_add_flags(thread, SVC_STOPPING);
2583         }
2584
2585         wake_up_all(&svcpt->scp_waitq);
2586
2587         while (!list_empty(&svcpt->scp_threads)) {
2588                 thread = list_entry(svcpt->scp_threads.next,
2589                                         struct ptlrpc_thread, t_link);
2590                 if (thread_is_stopped(thread)) {
2591                         list_del(&thread->t_link);
2592                         list_add(&thread->t_link, &zombie);
2593                         continue;
2594                 }
2595                 spin_unlock(&svcpt->scp_lock);
2596
2597                 CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
2598                        svcpt->scp_service->srv_thread_name, thread->t_id);
2599                 l_wait_event(thread->t_ctl_waitq,
2600                              thread_is_stopped(thread), &lwi);
2601
2602                 spin_lock(&svcpt->scp_lock);
2603         }
2604
2605         spin_unlock(&svcpt->scp_lock);
2606
2607         while (!list_empty(&zombie)) {
2608                 thread = list_entry(zombie.next,
2609                                         struct ptlrpc_thread, t_link);
2610                 list_del(&thread->t_link);
2611                 OBD_FREE_PTR(thread);
2612         }
2613 }
2614
2615 /**
2616  * Stops all threads of a particular service \a svc
2617  */
2618 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
2619 {
2620         struct ptlrpc_service_part *svcpt;
2621         int                        i;
2622
2623         ptlrpc_service_for_each_part(svcpt, i, svc) {
2624                 if (svcpt->scp_service != NULL)
2625                         ptlrpc_svcpt_stop_threads(svcpt);
2626         }
2627 }
2628 EXPORT_SYMBOL(ptlrpc_stop_all_threads);
2629
2630 int ptlrpc_start_threads(struct ptlrpc_service *svc)
2631 {
2632         int     rc = 0;
2633         int     i;
2634         int     j;
2635
2636         /* We require 2 threads min, see note in ptlrpc_server_handle_request */
2637         LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
2638
2639         for (i = 0; i < svc->srv_ncpts; i++) {
2640                 for (j = 0; j < svc->srv_nthrs_cpt_init; j++) {
2641                         rc = ptlrpc_start_thread(svc->srv_parts[i], 1);
2642                         if (rc == 0)
2643                                 continue;
2644
2645                         if (rc != -EMFILE)
2646                                 goto failed;
2647                         /* We have enough threads, don't start more. b=15759 */
2648                         break;
2649                 }
2650         }
2651
2652         return 0;
2653  failed:
2654         CERROR("cannot start %s thread #%d_%d: rc %d\n",
2655                svc->srv_thread_name, i, j, rc);
2656         ptlrpc_stop_all_threads(svc);
2657         return rc;
2658 }
2659 EXPORT_SYMBOL(ptlrpc_start_threads);
2660
2661 int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
2662 {
2663         struct l_wait_info      lwi = { 0 };
2664         struct ptlrpc_thread    *thread;
2665         struct ptlrpc_service   *svc;
2666         int                     rc;
2667
2668         LASSERT(svcpt != NULL);
2669
2670         svc = svcpt->scp_service;
2671
2672         CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
2673                svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running,
2674                svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit);
2675
2676  again:
2677         if (unlikely(svc->srv_is_stopping))
2678                 return -ESRCH;
2679
2680         if (!ptlrpc_threads_increasable(svcpt) ||
2681             (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
2682              svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1))
2683                 return -EMFILE;
2684
2685         OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
2686         if (thread == NULL)
2687                 return -ENOMEM;
2688         init_waitqueue_head(&thread->t_ctl_waitq);
2689
2690         spin_lock(&svcpt->scp_lock);
2691         if (!ptlrpc_threads_increasable(svcpt)) {
2692                 spin_unlock(&svcpt->scp_lock);
2693                 OBD_FREE_PTR(thread);
2694                 return -EMFILE;
2695         }
2696
2697         if (svcpt->scp_nthrs_starting != 0) {
2698                 /* serialize starting because some modules (obdfilter)
2699                  * might require unique and contiguous t_id */
2700                 LASSERT(svcpt->scp_nthrs_starting == 1);
2701                 spin_unlock(&svcpt->scp_lock);
2702                 OBD_FREE_PTR(thread);
2703                 if (wait) {
2704                         CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
2705                                svc->srv_thread_name, svcpt->scp_thr_nextid);
2706                         schedule();
2707                         goto again;
2708                 }
2709
2710                 CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n",
2711                        svc->srv_thread_name, svcpt->scp_thr_nextid);
2712                 return -EAGAIN;
2713         }
2714
2715         svcpt->scp_nthrs_starting++;
2716         thread->t_id = svcpt->scp_thr_nextid++;
2717         thread_add_flags(thread, SVC_STARTING);
2718         thread->t_svcpt = svcpt;
2719
2720         list_add(&thread->t_link, &svcpt->scp_threads);
2721         spin_unlock(&svcpt->scp_lock);
2722
2723         if (svcpt->scp_cpt >= 0) {
2724                 snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
2725                          svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
2726         } else {
2727                 snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
2728                          svc->srv_thread_name, thread->t_id);
2729         }
2730
2731         CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
2732         rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
2733         if (IS_ERR_VALUE(rc)) {
2734                 CERROR("cannot start thread '%s': rc %d\n",
2735                        thread->t_name, rc);
2736                 spin_lock(&svcpt->scp_lock);
2737                 --svcpt->scp_nthrs_starting;
2738                 if (thread_is_stopping(thread)) {
2739                         /* this ptlrpc_thread is being hanled
2740                          * by ptlrpc_svcpt_stop_threads now
2741                          */
2742                         thread_add_flags(thread, SVC_STOPPED);
2743                         wake_up(&thread->t_ctl_waitq);
2744                         spin_unlock(&svcpt->scp_lock);
2745                 } else {
2746                         list_del(&thread->t_link);
2747                         spin_unlock(&svcpt->scp_lock);
2748                         OBD_FREE_PTR(thread);
2749                 }
2750                 return rc;
2751         }
2752
2753         if (!wait)
2754                 return 0;
2755
2756         l_wait_event(thread->t_ctl_waitq,
2757                      thread_is_running(thread) || thread_is_stopped(thread),
2758                      &lwi);
2759
2760         rc = thread_is_stopped(thread) ? thread->t_id : 0;
2761         return rc;
2762 }
2763
2764 int ptlrpc_hr_init(void)
2765 {
2766         cpumask_t                       mask;
2767         struct ptlrpc_hr_partition      *hrp;
2768         struct ptlrpc_hr_thread         *hrt;
2769         int                             rc;
2770         int                             i;
2771         int                             j;
2772         int                             weight;
2773
2774         memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
2775         ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
2776
2777         ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
2778                                                    sizeof(*hrp));
2779         if (ptlrpc_hr.hr_partitions == NULL)
2780                 return -ENOMEM;
2781
2782         init_waitqueue_head(&ptlrpc_hr.hr_waitq);
2783
2784         cpumask_copy(&mask, topology_thread_cpumask(0));
2785         weight = cpus_weight(mask);
2786
2787         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2788                 hrp->hrp_cpt = i;
2789
2790                 atomic_set(&hrp->hrp_nstarted, 0);
2791                 atomic_set(&hrp->hrp_nstopped, 0);
2792
2793                 hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
2794                 hrp->hrp_nthrs /= weight;
2795
2796                 LASSERT(hrp->hrp_nthrs > 0);
2797                 OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
2798                               hrp->hrp_nthrs * sizeof(*hrt));
2799                 if (hrp->hrp_thrs == NULL)
2800                         GOTO(out, rc = -ENOMEM);
2801
2802                 for (j = 0; j < hrp->hrp_nthrs; j++) {
2803                         hrt = &hrp->hrp_thrs[j];
2804
2805                         hrt->hrt_id = j;
2806                         hrt->hrt_partition = hrp;
2807                         init_waitqueue_head(&hrt->hrt_waitq);
2808                         spin_lock_init(&hrt->hrt_lock);
2809                         INIT_LIST_HEAD(&hrt->hrt_queue);
2810                 }
2811         }
2812
2813         rc = ptlrpc_start_hr_threads();
2814 out:
2815         if (rc != 0)
2816                 ptlrpc_hr_fini();
2817         return rc;
2818 }
2819
2820 void ptlrpc_hr_fini(void)
2821 {
2822         struct ptlrpc_hr_partition      *hrp;
2823         int                             i;
2824
2825         if (ptlrpc_hr.hr_partitions == NULL)
2826                 return;
2827
2828         ptlrpc_stop_hr_threads();
2829
2830         cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
2831                 if (hrp->hrp_thrs != NULL) {
2832                         OBD_FREE(hrp->hrp_thrs,
2833                                  hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0]));
2834                 }
2835         }
2836
2837         cfs_percpt_free(ptlrpc_hr.hr_partitions);
2838         ptlrpc_hr.hr_partitions = NULL;
2839 }
2840
2841
2842 /**
2843  * Wait until all already scheduled replies are processed.
2844  */
2845 static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
2846 {
2847         while (1) {
2848                 int rc;
2849                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
2850                                                      NULL, NULL);
2851
2852                 rc = l_wait_event(svcpt->scp_waitq,
2853                      atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
2854                 if (rc == 0)
2855                         break;
2856                 CWARN("Unexpectedly long timeout %s %p\n",
2857                       svcpt->scp_service->srv_name, svcpt->scp_service);
2858         }
2859 }
2860
2861 static void
2862 ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
2863 {
2864         struct ptlrpc_service_part      *svcpt;
2865         int                             i;
2866
2867         /* early disarm AT timer... */
2868         ptlrpc_service_for_each_part(svcpt, i, svc) {
2869                 if (svcpt->scp_service != NULL)
2870                         cfs_timer_disarm(&svcpt->scp_at_timer);
2871         }
2872 }
2873
2874 static void
2875 ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
2876 {
2877         struct ptlrpc_service_part        *svcpt;
2878         struct ptlrpc_request_buffer_desc *rqbd;
2879         struct l_wait_info                lwi;
2880         int                               rc;
2881         int                               i;
2882
2883         /* All history will be culled when the next request buffer is
2884          * freed in ptlrpc_service_purge_all() */
2885         svc->srv_hist_nrqbds_cpt_max = 0;
2886
2887         rc = LNetClearLazyPortal(svc->srv_req_portal);
2888         LASSERT(rc == 0);
2889
2890         ptlrpc_service_for_each_part(svcpt, i, svc) {
2891                 if (svcpt->scp_service == NULL)
2892                         break;
2893
2894                 /* Unlink all the request buffers.  This forces a 'final'
2895                  * event with its 'unlink' flag set for each posted rqbd */
2896                 list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
2897                                         rqbd_list) {
2898                         rc = LNetMDUnlink(rqbd->rqbd_md_h);
2899                         LASSERT(rc == 0 || rc == -ENOENT);
2900                 }
2901         }
2902
2903         ptlrpc_service_for_each_part(svcpt, i, svc) {
2904                 if (svcpt->scp_service == NULL)
2905                         break;
2906
2907                 /* Wait for the network to release any buffers
2908                  * it's currently filling */
2909                 spin_lock(&svcpt->scp_lock);
2910                 while (svcpt->scp_nrqbds_posted != 0) {
2911                         spin_unlock(&svcpt->scp_lock);
2912                         /* Network access will complete in finite time but
2913                          * the HUGE timeout lets us CWARN for visibility
2914                          * of sluggish NALs */
2915                         lwi = LWI_TIMEOUT_INTERVAL(
2916                                         cfs_time_seconds(LONG_UNLINK),
2917                                         cfs_time_seconds(1), NULL, NULL);
2918                         rc = l_wait_event(svcpt->scp_waitq,
2919                                           svcpt->scp_nrqbds_posted == 0, &lwi);
2920                         if (rc == -ETIMEDOUT) {
2921                                 CWARN("Service %s waiting for "
2922                                       "request buffers\n",
2923                                       svcpt->scp_service->srv_name);
2924                         }
2925                         spin_lock(&svcpt->scp_lock);
2926                 }
2927                 spin_unlock(&svcpt->scp_lock);
2928         }
2929 }
2930
2931 static void
2932 ptlrpc_service_purge_all(struct ptlrpc_service *svc)
2933 {
2934         struct ptlrpc_service_part              *svcpt;
2935         struct ptlrpc_request_buffer_desc       *rqbd;
2936         struct ptlrpc_request                   *req;
2937         struct ptlrpc_reply_state               *rs;
2938         int                                     i;
2939
2940         ptlrpc_service_for_each_part(svcpt, i, svc) {
2941                 if (svcpt->scp_service == NULL)
2942                         break;
2943
2944                 spin_lock(&svcpt->scp_rep_lock);
2945                 while (!list_empty(&svcpt->scp_rep_active)) {
2946                         rs = list_entry(svcpt->scp_rep_active.next,
2947                                             struct ptlrpc_reply_state, rs_list);
2948                         spin_lock(&rs->rs_lock);
2949                         ptlrpc_schedule_difficult_reply(rs);
2950                         spin_unlock(&rs->rs_lock);
2951                 }
2952                 spin_unlock(&svcpt->scp_rep_lock);
2953
2954                 /* purge the request queue.  NB No new replies (rqbds
2955                  * all unlinked) and no service threads, so I'm the only
2956                  * thread noodling the request queue now */
2957                 while (!list_empty(&svcpt->scp_req_incoming)) {
2958                         req = list_entry(svcpt->scp_req_incoming.next,
2959                                              struct ptlrpc_request, rq_list);
2960
2961                         list_del(&req->rq_list);
2962                         svcpt->scp_nreqs_incoming--;
2963                         ptlrpc_server_finish_request(svcpt, req);
2964                 }
2965
2966                 while (ptlrpc_server_request_pending(svcpt, true)) {
2967                         req = ptlrpc_server_request_get(svcpt, true);
2968                         ptlrpc_server_finish_active_request(svcpt, req);
2969                 }
2970
2971                 LASSERT(list_empty(&svcpt->scp_rqbd_posted));
2972                 LASSERT(svcpt->scp_nreqs_incoming == 0);
2973                 LASSERT(svcpt->scp_nreqs_active == 0);
2974                 /* history should have been culled by
2975                  * ptlrpc_server_finish_request */
2976                 LASSERT(svcpt->scp_hist_nrqbds == 0);
2977
2978                 /* Now free all the request buffers since nothing
2979                  * references them any more... */
2980
2981                 while (!list_empty(&svcpt->scp_rqbd_idle)) {
2982                         rqbd = list_entry(svcpt->scp_rqbd_idle.next,
2983                                               struct ptlrpc_request_buffer_desc,
2984                                               rqbd_list);
2985                         ptlrpc_free_rqbd(rqbd);
2986                 }
2987                 ptlrpc_wait_replies(svcpt);
2988
2989                 while (!list_empty(&svcpt->scp_rep_idle)) {
2990                         rs = list_entry(svcpt->scp_rep_idle.next,
2991                                             struct ptlrpc_reply_state,
2992                                             rs_list);
2993                         list_del(&rs->rs_list);
2994                         OBD_FREE_LARGE(rs, svc->srv_max_reply_size);
2995                 }
2996         }
2997 }
2998
2999 static void
3000 ptlrpc_service_free(struct ptlrpc_service *svc)
3001 {
3002         struct ptlrpc_service_part      *svcpt;
3003         struct ptlrpc_at_array          *array;
3004         int                             i;
3005
3006         ptlrpc_service_for_each_part(svcpt, i, svc) {
3007                 if (svcpt->scp_service == NULL)
3008                         break;
3009
3010                 /* In case somebody rearmed this in the meantime */
3011                 cfs_timer_disarm(&svcpt->scp_at_timer);
3012                 array = &svcpt->scp_at_array;
3013
3014                 if (array->paa_reqs_array != NULL) {
3015                         OBD_FREE(array->paa_reqs_array,
3016                                  sizeof(struct list_head) * array->paa_size);
3017                         array->paa_reqs_array = NULL;
3018                 }
3019
3020                 if (array->paa_reqs_count != NULL) {
3021                         OBD_FREE(array->paa_reqs_count,
3022                                  sizeof(__u32) * array->paa_size);
3023                         array->paa_reqs_count = NULL;
3024                 }
3025         }
3026
3027         ptlrpc_service_for_each_part(svcpt, i, svc)
3028                 OBD_FREE_PTR(svcpt);
3029
3030         if (svc->srv_cpts != NULL)
3031                 cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
3032
3033         OBD_FREE(svc, offsetof(struct ptlrpc_service,
3034                                srv_parts[svc->srv_ncpts]));
3035 }
3036
3037 int ptlrpc_unregister_service(struct ptlrpc_service *service)
3038 {
3039         CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
3040
3041         service->srv_is_stopping = 1;
3042
3043         mutex_lock(&ptlrpc_all_services_mutex);
3044         list_del_init(&service->srv_list);
3045         mutex_unlock(&ptlrpc_all_services_mutex);
3046
3047         ptlrpc_service_del_atimer(service);
3048         ptlrpc_stop_all_threads(service);
3049
3050         ptlrpc_service_unlink_rqbd(service);
3051         ptlrpc_service_purge_all(service);
3052         ptlrpc_service_nrs_cleanup(service);
3053
3054         ptlrpc_lprocfs_unregister_service(service);
3055
3056         ptlrpc_service_free(service);
3057
3058         return 0;
3059 }
3060 EXPORT_SYMBOL(ptlrpc_unregister_service);
3061
3062 /**
3063  * Returns 0 if the service is healthy.
3064  *
3065  * Right now, it just checks to make sure that requests aren't languishing
3066  * in the queue.  We'll use this health check to govern whether a node needs
3067  * to be shot, so it's intentionally non-aggressive. */
3068 int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
3069 {
3070         struct ptlrpc_request           *request = NULL;
3071         struct timeval                  right_now;
3072         long                            timediff;
3073
3074         do_gettimeofday(&right_now);
3075
3076         spin_lock(&svcpt->scp_req_lock);
3077         /* How long has the next entry been waiting? */
3078         if (ptlrpc_server_high_pending(svcpt, true))
3079                 request = ptlrpc_nrs_req_peek_nolock(svcpt, true);
3080         else if (ptlrpc_server_normal_pending(svcpt, true))
3081                 request = ptlrpc_nrs_req_peek_nolock(svcpt, false);
3082
3083         if (request == NULL) {
3084                 spin_unlock(&svcpt->scp_req_lock);
3085                 return 0;
3086         }
3087
3088         timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
3089         spin_unlock(&svcpt->scp_req_lock);
3090
3091         if ((timediff / ONE_MILLION) >
3092             (AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
3093                 CERROR("%s: unhealthy - request has been waiting %lds\n",
3094                        svcpt->scp_service->srv_name, timediff / ONE_MILLION);
3095                 return -1;
3096         }
3097
3098         return 0;
3099 }
3100
3101 int
3102 ptlrpc_service_health_check(struct ptlrpc_service *svc)
3103 {
3104         struct ptlrpc_service_part      *svcpt;
3105         int                             i;
3106
3107         if (svc == NULL)
3108                 return 0;
3109
3110         ptlrpc_service_for_each_part(svcpt, i, svc) {
3111                 int rc = ptlrpc_svcpt_health_check(svcpt);
3112
3113                 if (rc != 0)
3114                         return rc;
3115         }
3116         return 0;
3117 }
3118 EXPORT_SYMBOL(ptlrpc_service_health_check);