summaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svcsock.c
diff options
context:
space:
mode:
authorGreg Banks <gnb@melbourne.sgi.com>2006-10-02 02:18:01 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-02 07:57:20 -0700
commitbfd241600a3b0db4fe43c859f1460d0a958d924a (patch)
tree7f04604adee7249e686d1db0cac93f1fee8bc5b6 /net/sunrpc/svcsock.c
parenteec09661dc82e90a31051d045a94026a91aceb82 (diff)
downloadkernel-crypto-bfd241600a3b0db4fe43c859f1460d0a958d924a.tar.gz
kernel-crypto-bfd241600a3b0db4fe43c859f1460d0a958d924a.tar.xz
kernel-crypto-bfd241600a3b0db4fe43c859f1460d0a958d924a.zip
[PATCH] knfsd: make rpc threads pools numa aware
Actually implement multiple pools. On NUMA machines, allocate a svc_pool per NUMA node; on SMP a svc_pool per CPU; otherwise a single global pool. Enqueue sockets on the svc_pool corresponding to the CPU on which the socket bh is run (i.e. the NIC interrupt CPU). Threads have their cpu mask set to limit them to the CPUs in the svc_pool that owns them. This is the patch that allows an Altix to scale NFS traffic linearly beyond 4 CPUs and 4 NICs. Incorporates changes and feedback from Neil Brown, Trond Myklebust, and Christoph Hellwig. Signed-off-by: Greg Banks <gnb@melbourne.sgi.com> Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'net/sunrpc/svcsock.c')
-rw-r--r--net/sunrpc/svcsock.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index b78659adeff..cba85d19522 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -151,8 +151,9 @@ static void
svc_sock_enqueue(struct svc_sock *svsk)
{
struct svc_serv *serv = svsk->sk_server;
- struct svc_pool *pool = &serv->sv_pools[0];
+ struct svc_pool *pool;
struct svc_rqst *rqstp;
+ int cpu;
if (!(svsk->sk_flags &
( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
@@ -160,6 +161,10 @@ svc_sock_enqueue(struct svc_sock *svsk)
if (test_bit(SK_DEAD, &svsk->sk_flags))
return;
+ cpu = get_cpu();
+ pool = svc_pool_for_cpu(svsk->sk_server, cpu);
+ put_cpu();
+
spin_lock_bh(&pool->sp_lock);
if (!list_empty(&pool->sp_threads) &&