]> Pileus Git - ~andy/linux/blobdiff - net/sunrpc/svc_xprt.c
Correct grammer/typos in dprintks
[~andy/linux] / net / sunrpc / svc_xprt.c
index 23165aef59d94e9b3d996508882eb273aab7e96b..d8e8d79a84514aebcd003b65ef2cda5002bc391c 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/skbuff.h>
 #include <linux/file.h>
 #include <linux/freezer.h>
+#include <linux/kthread.h>
 #include <net/sock.h>
 #include <net/checksum.h>
 #include <net/ip.h>
@@ -29,7 +30,6 @@
 #include <linux/sunrpc/types.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/xdr.h>
-#include <linux/sunrpc/svcsock.h>
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/svc_xprt.h>
 
@@ -113,6 +113,34 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
 }
 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
 
+/*
+ * Format the transport list for printing
+ */
+int svc_print_xprts(char *buf, int maxlen)
+{
+       struct list_head *le;
+       char tmpstr[80];
+       int len = 0;
+       buf[0] = '\0';
+
+       spin_lock(&svc_xprt_class_lock);
+       list_for_each(le, &svc_xprt_class_list) {
+               int slen;
+               struct svc_xprt_class *xcl =
+                       list_entry(le, struct svc_xprt_class, xcl_list);
+
+               sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
+               slen = strlen(tmpstr);
+               if (len + slen > maxlen)
+                       break;
+               len += slen;
+               strcat(buf, tmpstr);
+       }
+       spin_unlock(&svc_xprt_class_lock);
+
+       return len;
+}
+
 static void svc_xprt_free(struct kref *kref)
 {
        struct svc_xprt *xprt =
@@ -158,7 +186,7 @@ int svc_create_xprt(struct svc_serv *serv, char *xprt_name, unsigned short port,
        struct svc_xprt_class *xcl;
        struct sockaddr_in sin = {
                .sin_family             = AF_INET,
-               .sin_addr.s_addr        = INADDR_ANY,
+               .sin_addr.s_addr        = htonl(INADDR_ANY),
                .sin_port               = htons(port),
        };
        dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
@@ -408,6 +436,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
                svc_xprt_enqueue(xprt);
        }
 }
+EXPORT_SYMBOL(svc_reserve);
 
 static void svc_xprt_release(struct svc_rqst *rqstp)
 {
@@ -465,6 +494,7 @@ void svc_wake_up(struct svc_serv *serv)
                spin_unlock_bh(&pool->sp_lock);
        }
 }
+EXPORT_SYMBOL(svc_wake_up);
 
 int svc_port_is_privileged(struct sockaddr *sin)
 {
@@ -557,8 +587,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                while (rqstp->rq_pages[i] == NULL) {
                        struct page *p = alloc_page(GFP_KERNEL);
                        if (!p) {
-                               int j = msecs_to_jiffies(500);
-                               schedule_timeout_uninterruptible(j);
+                               set_current_state(TASK_INTERRUPTIBLE);
+                               if (signalled() || kthread_should_stop()) {
+                                       set_current_state(TASK_RUNNING);
+                                       return -EINTR;
+                               }
+                               schedule_timeout(msecs_to_jiffies(500));
                        }
                        rqstp->rq_pages[i] = p;
                }
@@ -578,7 +612,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 
        try_to_freeze();
        cond_resched();
-       if (signalled())
+       if (signalled() || kthread_should_stop())
                return -EINTR;
 
        spin_lock_bh(&pool->sp_lock);
@@ -597,6 +631,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                 * to bring down the daemons ...
                 */
                set_current_state(TASK_INTERRUPTIBLE);
+
+               /*
+                * checking kthread_should_stop() here allows us to avoid
+                * locking and signalling when stopping kthreads that call
+                * svc_recv. If the thread has already been woken up, then
+                * we can exit here without sleeping. If not, then it
+                * it'll be woken up quickly during the schedule_timeout
+                */
+               if (kthread_should_stop()) {
+                       set_current_state(TASK_RUNNING);
+                       spin_unlock_bh(&pool->sp_lock);
+                       return -EINTR;
+               }
+
                add_wait_queue(&rqstp->rq_wait, &wait);
                spin_unlock_bh(&pool->sp_lock);
 
@@ -612,7 +660,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                        svc_thread_dequeue(pool, rqstp);
                        spin_unlock_bh(&pool->sp_lock);
                        dprintk("svc: server %p, no data yet\n", rqstp);
-                       return signalled()? -EINTR : -EAGAIN;
+                       if (signalled() || kthread_should_stop())
+                               return -EINTR;
+                       else
+                               return -EAGAIN;
                }
        }
        spin_unlock_bh(&pool->sp_lock);
@@ -675,6 +726,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                serv->sv_stats->netcnt++;
        return len;
 }
+EXPORT_SYMBOL(svc_recv);
 
 /*
  * Drop request
@@ -684,6 +736,7 @@ void svc_drop(struct svc_rqst *rqstp)
        dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
        svc_xprt_release(rqstp);
 }
+EXPORT_SYMBOL(svc_drop);
 
 /*
  * Return reply to client.
@@ -815,6 +868,7 @@ void svc_close_xprt(struct svc_xprt *xprt)
        clear_bit(XPT_BUSY, &xprt->xpt_flags);
        svc_xprt_put(xprt);
 }
+EXPORT_SYMBOL_GPL(svc_close_xprt);
 
 void svc_close_all(struct list_head *xprt_list)
 {
@@ -859,10 +913,18 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
        svc_xprt_put(xprt);
 }
 
+/*
+ * Save the request off for later processing. The request buffer looks
+ * like this:
+ *
+ * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
+ *
+ * This code can only handle requests that consist of an xprt-header
+ * and rpc-header.
+ */
 static struct cache_deferred_req *svc_defer(struct cache_req *req)
 {
        struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
-       int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
        struct svc_deferred_req *dr;
 
        if (rqstp->rq_arg.page_len)
@@ -871,8 +933,10 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
                dr = rqstp->rq_deferred;
                rqstp->rq_deferred = NULL;
        } else {
-               int skip  = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
+               size_t skip;
+               size_t size;
                /* FIXME maybe discard if size too large */
+               size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
                dr = kmalloc(size, GFP_KERNEL);
                if (dr == NULL)
                        return NULL;
@@ -883,8 +947,12 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
                dr->addrlen = rqstp->rq_addrlen;
                dr->daddr = rqstp->rq_daddr;
                dr->argslen = rqstp->rq_arg.len >> 2;
-               memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip,
-                      dr->argslen<<2);
+               dr->xprt_hlen = rqstp->rq_xprt_hlen;
+
+               /* back up head to the start of the buffer and copy */
+               skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
+               memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
+                      dr->argslen << 2);
        }
        svc_xprt_get(rqstp->rq_xprt);
        dr->xprt = rqstp->rq_xprt;
@@ -900,16 +968,21 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
 {
        struct svc_deferred_req *dr = rqstp->rq_deferred;
 
-       rqstp->rq_arg.head[0].iov_base = dr->args;
-       rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
+       /* setup iov_base past transport header */
+       rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
+       /* The iov_len does not include the transport header bytes */
+       rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
        rqstp->rq_arg.page_len = 0;
-       rqstp->rq_arg.len = dr->argslen<<2;
+       /* The rq_arg.len includes the transport header bytes */
+       rqstp->rq_arg.len     = dr->argslen<<2;
        rqstp->rq_prot        = dr->prot;
        memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
        rqstp->rq_addrlen     = dr->addrlen;
+       /* Save off transport header len in case we get deferred again */
+       rqstp->rq_xprt_hlen   = dr->xprt_hlen;
        rqstp->rq_daddr       = dr->daddr;
        rqstp->rq_respages    = rqstp->rq_pages;
-       return dr->argslen<<2;
+       return (dr->argslen<<2) - dr->xprt_hlen;
 }
 
 
@@ -931,3 +1004,74 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
        spin_unlock(&xprt->xpt_lock);
        return dr;
 }
+
+/*
+ * Return the transport instance pointer for the endpoint accepting
+ * connections/peer traffic from the specified transport class,
+ * address family and port.
+ *
+ * Specifying 0 for the address family or port is effectively a
+ * wild-card, and will result in matching the first transport in the
+ * service's list that has a matching class name.
+ */
+struct svc_xprt *svc_find_xprt(struct svc_serv *serv, char *xcl_name,
+                              int af, int port)
+{
+       struct svc_xprt *xprt;
+       struct svc_xprt *found = NULL;
+
+       /* Sanity check the args */
+       if (!serv || !xcl_name)
+               return found;
+
+       spin_lock_bh(&serv->sv_lock);
+       list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
+               if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
+                       continue;
+               if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
+                       continue;
+               if (port && port != svc_xprt_local_port(xprt))
+                       continue;
+               found = xprt;
+               svc_xprt_get(xprt);
+               break;
+       }
+       spin_unlock_bh(&serv->sv_lock);
+       return found;
+}
+EXPORT_SYMBOL_GPL(svc_find_xprt);
+
+/*
+ * Format a buffer with a list of the active transports. A zero for
+ * the buflen parameter disables target buffer overflow checking.
+ */
+int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen)
+{
+       struct svc_xprt *xprt;
+       char xprt_str[64];
+       int totlen = 0;
+       int len;
+
+       /* Sanity check args */
+       if (!serv)
+               return 0;
+
+       spin_lock_bh(&serv->sv_lock);
+       list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
+               len = snprintf(xprt_str, sizeof(xprt_str),
+                              "%s %d\n", xprt->xpt_class->xcl_name,
+                              svc_xprt_local_port(xprt));
+               /* If the string was truncated, replace with error string */
+               if (len >= sizeof(xprt_str))
+                       strcpy(xprt_str, "name-too-long\n");
+               /* Don't overflow buffer */
+               len = strlen(xprt_str);
+               if (buflen && (len + totlen >= buflen))
+                       break;
+               strcpy(buf+totlen, xprt_str);
+               totlen += len;
+       }
+       spin_unlock_bh(&serv->sv_lock);
+       return totlen;
+}
+EXPORT_SYMBOL_GPL(svc_xprt_names);