1 // SPDX-License-Identifier: BSD-3-Clause
3 * linux/net/sunrpc/auth_gss/auth_gss.c
5 * RPCSEC_GSS client authentication.
7 * Copyright (c) 2000 The Regents of the University of Michigan.
10 * Dug Song <dugsong@monkey.org>
11 * Andy Adamson <andros@umich.edu>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/sunrpc/auth.h>
22 #include <linux/sunrpc/auth_gss.h>
23 #include <linux/sunrpc/gss_krb5.h>
24 #include <linux/sunrpc/svcauth_gss.h>
25 #include <linux/sunrpc/gss_err.h>
26 #include <linux/workqueue.h>
27 #include <linux/sunrpc/rpc_pipe_fs.h>
28 #include <linux/sunrpc/gss_api.h>
29 #include <linux/uaccess.h>
30 #include <linux/hashtable.h>
34 #include <trace/events/rpcgss.h>
36 static const struct rpc_authops authgss_ops;
38 static const struct rpc_credops gss_credops;
39 static const struct rpc_credops gss_nullops;
41 #define GSS_RETRY_EXPIRED 5
42 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
44 #define GSS_KEY_EXPIRE_TIMEO 240
45 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
48 # define RPCDBG_FACILITY RPCDBG_AUTH
51 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
52 /* length of a krb5 verifier (48), plus data added before arguments when
53 * using integrity (two 4-byte integers): */
54 #define GSS_VERF_SLACK 100
56 static DEFINE_HASHTABLE(gss_auth_hash_table, 4);
57 static DEFINE_SPINLOCK(gss_auth_hash_lock);
60 struct rpc_pipe_dir_object pdo;
61 struct rpc_pipe *pipe;
62 struct rpc_clnt *clnt;
69 struct hlist_node hash;
70 struct rpc_auth rpc_auth;
71 struct gss_api_mech *mech;
72 enum rpc_gss_svc service;
73 struct rpc_clnt *client;
76 * There are two upcall pipes; dentry[1], named "gssd", is used
77 * for the new text-based upcall; dentry[0] is named after the
78 * mechanism (for example, "krb5") and exists for
79 * backwards-compatibility with older gssd's.
81 struct gss_pipe *gss_pipe[2];
82 const char *target_name;
85 /* pipe_version >= 0 if and only if someone has a pipe open. */
86 static DEFINE_SPINLOCK(pipe_version_lock);
87 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
88 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
89 static void gss_put_auth(struct gss_auth *gss_auth);
91 static void gss_free_ctx(struct gss_cl_ctx *);
92 static const struct rpc_pipe_ops gss_upcall_ops_v0;
93 static const struct rpc_pipe_ops gss_upcall_ops_v1;
95 static inline struct gss_cl_ctx *
96 gss_get_ctx(struct gss_cl_ctx *ctx)
98 refcount_inc(&ctx->count);
103 gss_put_ctx(struct gss_cl_ctx *ctx)
105 if (refcount_dec_and_test(&ctx->count))
110 * called by gss_upcall_callback and gss_create_upcall in order
111 * to set the gss context. The actual exchange of an old context
112 * and a new one is protected by the pipe->lock.
115 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
117 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
119 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
122 rcu_assign_pointer(gss_cred->gc_ctx, ctx);
123 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
124 smp_mb__before_atomic();
125 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
129 simple_get_bytes(const void *p, const void *end, void *res, size_t len)
131 const void *q = (const void *)((const char *)p + len);
132 if (unlikely(q > end || q < p))
133 return ERR_PTR(-EFAULT);
138 static inline const void *
139 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
144 p = simple_get_bytes(p, end, &len, sizeof(len));
147 q = (const void *)((const char *)p + len);
148 if (unlikely(q > end || q < p))
149 return ERR_PTR(-EFAULT);
150 dest->data = kmemdup(p, len, GFP_NOFS);
151 if (unlikely(dest->data == NULL))
152 return ERR_PTR(-ENOMEM);
157 static struct gss_cl_ctx *
158 gss_cred_get_ctx(struct rpc_cred *cred)
160 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
161 struct gss_cl_ctx *ctx = NULL;
164 ctx = rcu_dereference(gss_cred->gc_ctx);
171 static struct gss_cl_ctx *
172 gss_alloc_context(void)
174 struct gss_cl_ctx *ctx;
176 ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
178 ctx->gc_proc = RPC_GSS_PROC_DATA;
179 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
180 spin_lock_init(&ctx->gc_seq_lock);
181 refcount_set(&ctx->count,1);
186 #define GSSD_MIN_TIMEOUT (60 * 60)
188 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
192 unsigned int timeout;
193 unsigned long now = jiffies;
197 /* First unsigned int gives the remaining lifetime in seconds of the
198 * credential - e.g. the remaining TGT lifetime for Kerberos or
199 * the -t value passed to GSSD.
201 p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
205 timeout = GSSD_MIN_TIMEOUT;
206 ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
207 /* Sequence number window. Determines the maximum number of
208 * simultaneous requests
210 p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
213 ctx->gc_win = window_size;
214 /* gssd signals an error by passing ctx->gc_win = 0: */
215 if (ctx->gc_win == 0) {
217 * in which case, p points to an error code. Anything other
218 * than -EKEYEXPIRED gets converted to -EACCES.
220 p = simple_get_bytes(p, end, &ret, sizeof(ret));
222 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
226 /* copy the opaque wire context */
227 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
230 /* import the opaque security context */
231 p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
234 q = (const void *)((const char *)p + seclen);
235 if (unlikely(q > end || q < p)) {
236 p = ERR_PTR(-EFAULT);
239 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
241 trace_rpcgss_import_ctx(ret);
246 /* is there any trailing data? */
252 /* pull in acceptor name (if there is one) */
253 p = simple_get_netobj(q, end, &ctx->gc_acceptor);
257 trace_rpcgss_context(ctx->gc_expiry, now, timeout,
258 ctx->gc_acceptor.len, ctx->gc_acceptor.data);
263 /* XXX: Need some documentation about why UPCALL_BUF_LEN is so small.
264 * Is user space expecting no more than UPCALL_BUF_LEN bytes?
265 * Note that there are now _two_ NI_MAXHOST sized data items
266 * being passed in this string.
268 #define UPCALL_BUF_LEN 256
270 struct gss_upcall_msg {
273 const char *service_name;
274 struct rpc_pipe_msg msg;
275 struct list_head list;
276 struct gss_auth *auth;
277 struct rpc_pipe *pipe;
278 struct rpc_wait_queue rpc_waitqueue;
279 wait_queue_head_t waitqueue;
280 struct gss_cl_ctx *ctx;
281 char databuf[UPCALL_BUF_LEN];
284 static int get_pipe_version(struct net *net)
286 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
289 spin_lock(&pipe_version_lock);
290 if (sn->pipe_version >= 0) {
291 atomic_inc(&sn->pipe_users);
292 ret = sn->pipe_version;
295 spin_unlock(&pipe_version_lock);
299 static void put_pipe_version(struct net *net)
301 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
303 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
304 sn->pipe_version = -1;
305 spin_unlock(&pipe_version_lock);
310 gss_release_msg(struct gss_upcall_msg *gss_msg)
312 struct net *net = gss_msg->auth->net;
313 if (!refcount_dec_and_test(&gss_msg->count))
315 put_pipe_version(net);
316 BUG_ON(!list_empty(&gss_msg->list));
317 if (gss_msg->ctx != NULL)
318 gss_put_ctx(gss_msg->ctx);
319 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
320 gss_put_auth(gss_msg->auth);
321 kfree_const(gss_msg->service_name);
325 static struct gss_upcall_msg *
326 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
328 struct gss_upcall_msg *pos;
329 list_for_each_entry(pos, &pipe->in_downcall, list) {
330 if (!uid_eq(pos->uid, uid))
332 if (auth && pos->auth->service != auth->service)
334 refcount_inc(&pos->count);
340 /* Try to add an upcall to the pipefs queue.
341 * If an upcall owned by our uid already exists, then we return a reference
342 * to that upcall instead of adding the new upcall.
344 static inline struct gss_upcall_msg *
345 gss_add_msg(struct gss_upcall_msg *gss_msg)
347 struct rpc_pipe *pipe = gss_msg->pipe;
348 struct gss_upcall_msg *old;
350 spin_lock(&pipe->lock);
351 old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
353 refcount_inc(&gss_msg->count);
354 list_add(&gss_msg->list, &pipe->in_downcall);
357 spin_unlock(&pipe->lock);
362 __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
364 list_del_init(&gss_msg->list);
365 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
366 wake_up_all(&gss_msg->waitqueue);
367 refcount_dec(&gss_msg->count);
371 gss_unhash_msg(struct gss_upcall_msg *gss_msg)
373 struct rpc_pipe *pipe = gss_msg->pipe;
375 if (list_empty(&gss_msg->list))
377 spin_lock(&pipe->lock);
378 if (!list_empty(&gss_msg->list))
379 __gss_unhash_msg(gss_msg);
380 spin_unlock(&pipe->lock);
384 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
386 switch (gss_msg->msg.errno) {
388 if (gss_msg->ctx == NULL)
390 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
391 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
394 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
396 gss_cred->gc_upcall_timestamp = jiffies;
397 gss_cred->gc_upcall = NULL;
398 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
402 gss_upcall_callback(struct rpc_task *task)
404 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
405 struct gss_cred, gc_base);
406 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
407 struct rpc_pipe *pipe = gss_msg->pipe;
409 spin_lock(&pipe->lock);
410 gss_handle_downcall_result(gss_cred, gss_msg);
411 spin_unlock(&pipe->lock);
412 task->tk_status = gss_msg->msg.errno;
413 gss_release_msg(gss_msg);
416 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg,
417 const struct cred *cred)
419 struct user_namespace *userns = cred->user_ns;
421 uid_t uid = from_kuid_munged(userns, gss_msg->uid);
422 memcpy(gss_msg->databuf, &uid, sizeof(uid));
423 gss_msg->msg.data = gss_msg->databuf;
424 gss_msg->msg.len = sizeof(uid);
426 BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf));
430 gss_v0_upcall(struct file *file, struct rpc_pipe_msg *msg,
431 char __user *buf, size_t buflen)
433 struct gss_upcall_msg *gss_msg = container_of(msg,
434 struct gss_upcall_msg,
436 if (msg->copied == 0)
437 gss_encode_v0_msg(gss_msg, file->f_cred);
438 return rpc_pipe_generic_upcall(file, msg, buf, buflen);
441 static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
442 const char *service_name,
443 const char *target_name,
444 const struct cred *cred)
446 struct user_namespace *userns = cred->user_ns;
447 struct gss_api_mech *mech = gss_msg->auth->mech;
448 char *p = gss_msg->databuf;
449 size_t buflen = sizeof(gss_msg->databuf);
452 len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
453 from_kuid_munged(userns, gss_msg->uid));
456 gss_msg->msg.len = len;
459 * target= is a full service principal that names the remote
460 * identity that we are authenticating to.
463 len = scnprintf(p, buflen, " target=%s", target_name);
466 gss_msg->msg.len += len;
470 * gssd uses service= and srchost= to select a matching key from
471 * the system's keytab to use as the source principal.
473 * service= is the service name part of the source principal,
474 * or "*" (meaning choose any).
476 * srchost= is the hostname part of the source principal. When
477 * not provided, gssd uses the local hostname.
480 char *c = strchr(service_name, '@');
483 len = scnprintf(p, buflen, " service=%s",
486 len = scnprintf(p, buflen,
487 " service=%.*s srchost=%s",
488 (int)(c - service_name),
489 service_name, c + 1);
492 gss_msg->msg.len += len;
495 if (mech->gm_upcall_enctypes) {
496 len = scnprintf(p, buflen, " enctypes=%s",
497 mech->gm_upcall_enctypes);
500 gss_msg->msg.len += len;
502 trace_rpcgss_upcall_msg(gss_msg->databuf);
503 len = scnprintf(p, buflen, "\n");
506 gss_msg->msg.len += len;
507 gss_msg->msg.data = gss_msg->databuf;
515 gss_v1_upcall(struct file *file, struct rpc_pipe_msg *msg,
516 char __user *buf, size_t buflen)
518 struct gss_upcall_msg *gss_msg = container_of(msg,
519 struct gss_upcall_msg,
522 if (msg->copied == 0) {
523 err = gss_encode_v1_msg(gss_msg,
524 gss_msg->service_name,
525 gss_msg->auth->target_name,
530 return rpc_pipe_generic_upcall(file, msg, buf, buflen);
533 static struct gss_upcall_msg *
534 gss_alloc_msg(struct gss_auth *gss_auth,
535 kuid_t uid, const char *service_name)
537 struct gss_upcall_msg *gss_msg;
541 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
544 vers = get_pipe_version(gss_auth->net);
548 gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
549 INIT_LIST_HEAD(&gss_msg->list);
550 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
551 init_waitqueue_head(&gss_msg->waitqueue);
552 refcount_set(&gss_msg->count, 1);
554 gss_msg->auth = gss_auth;
555 kref_get(&gss_auth->kref);
557 gss_msg->service_name = kstrdup_const(service_name, GFP_NOFS);
558 if (!gss_msg->service_name) {
560 goto err_put_pipe_version;
564 err_put_pipe_version:
565 put_pipe_version(gss_auth->net);
572 static struct gss_upcall_msg *
573 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
575 struct gss_cred *gss_cred = container_of(cred,
576 struct gss_cred, gc_base);
577 struct gss_upcall_msg *gss_new, *gss_msg;
578 kuid_t uid = cred->cr_cred->fsuid;
580 gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
583 gss_msg = gss_add_msg(gss_new);
584 if (gss_msg == gss_new) {
586 refcount_inc(&gss_msg->count);
587 res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
589 gss_unhash_msg(gss_new);
590 refcount_dec(&gss_msg->count);
591 gss_release_msg(gss_new);
592 gss_msg = ERR_PTR(res);
595 gss_release_msg(gss_new);
599 static void warn_gssd(void)
601 dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
605 gss_refresh_upcall(struct rpc_task *task)
607 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
608 struct gss_auth *gss_auth = container_of(cred->cr_auth,
609 struct gss_auth, rpc_auth);
610 struct gss_cred *gss_cred = container_of(cred,
611 struct gss_cred, gc_base);
612 struct gss_upcall_msg *gss_msg;
613 struct rpc_pipe *pipe;
616 gss_msg = gss_setup_upcall(gss_auth, cred);
617 if (PTR_ERR(gss_msg) == -EAGAIN) {
618 /* XXX: warning on the first, under the assumption we
619 * shouldn't normally hit this case on a refresh. */
621 rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue,
622 task, NULL, jiffies + (15 * HZ));
626 if (IS_ERR(gss_msg)) {
627 err = PTR_ERR(gss_msg);
630 pipe = gss_msg->pipe;
631 spin_lock(&pipe->lock);
632 if (gss_cred->gc_upcall != NULL)
633 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
634 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
635 gss_cred->gc_upcall = gss_msg;
636 /* gss_upcall_callback will release the reference to gss_upcall_msg */
637 refcount_inc(&gss_msg->count);
638 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
640 gss_handle_downcall_result(gss_cred, gss_msg);
641 err = gss_msg->msg.errno;
643 spin_unlock(&pipe->lock);
644 gss_release_msg(gss_msg);
646 trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
647 cred->cr_cred->fsuid), err);
652 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
654 struct net *net = gss_auth->net;
655 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
656 struct rpc_pipe *pipe;
657 struct rpc_cred *cred = &gss_cred->gc_base;
658 struct gss_upcall_msg *gss_msg;
664 /* if gssd is down, just skip upcalling altogether */
665 if (!gssd_running(net)) {
670 gss_msg = gss_setup_upcall(gss_auth, cred);
671 if (PTR_ERR(gss_msg) == -EAGAIN) {
672 err = wait_event_interruptible_timeout(pipe_version_waitqueue,
673 sn->pipe_version >= 0, 15 * HZ);
674 if (sn->pipe_version < 0) {
682 if (IS_ERR(gss_msg)) {
683 err = PTR_ERR(gss_msg);
686 pipe = gss_msg->pipe;
688 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
689 spin_lock(&pipe->lock);
690 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
693 spin_unlock(&pipe->lock);
694 if (fatal_signal_pending(current)) {
701 gss_cred_set_ctx(cred, gss_msg->ctx);
703 err = gss_msg->msg.errno;
704 spin_unlock(&pipe->lock);
706 finish_wait(&gss_msg->waitqueue, &wait);
707 gss_release_msg(gss_msg);
709 trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
710 cred->cr_cred->fsuid), err);
714 #define MSG_BUF_MAXSIZE 1024
717 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
721 struct gss_upcall_msg *gss_msg;
722 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
723 struct gss_cl_ctx *ctx;
726 ssize_t err = -EFBIG;
728 if (mlen > MSG_BUF_MAXSIZE)
731 buf = kmalloc(mlen, GFP_NOFS);
736 if (copy_from_user(buf, src, mlen))
739 end = (const void *)((char *)buf + mlen);
740 p = simple_get_bytes(buf, end, &id, sizeof(id));
746 uid = make_kuid(current_user_ns(), id);
747 if (!uid_valid(uid)) {
753 ctx = gss_alloc_context();
758 /* Find a matching upcall */
759 spin_lock(&pipe->lock);
760 gss_msg = __gss_find_upcall(pipe, uid, NULL);
761 if (gss_msg == NULL) {
762 spin_unlock(&pipe->lock);
765 list_del_init(&gss_msg->list);
766 spin_unlock(&pipe->lock);
768 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
774 gss_msg->msg.errno = err;
781 gss_msg->msg.errno = -EAGAIN;
784 printk(KERN_CRIT "%s: bad return from "
785 "gss_fill_context: %zd\n", __func__, err);
786 gss_msg->msg.errno = -EIO;
788 goto err_release_msg;
790 gss_msg->ctx = gss_get_ctx(ctx);
794 spin_lock(&pipe->lock);
795 __gss_unhash_msg(gss_msg);
796 spin_unlock(&pipe->lock);
797 gss_release_msg(gss_msg);
806 static int gss_pipe_open(struct inode *inode, int new_version)
808 struct net *net = inode->i_sb->s_fs_info;
809 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
812 spin_lock(&pipe_version_lock);
813 if (sn->pipe_version < 0) {
814 /* First open of any gss pipe determines the version: */
815 sn->pipe_version = new_version;
816 rpc_wake_up(&pipe_version_rpc_waitqueue);
817 wake_up(&pipe_version_waitqueue);
818 } else if (sn->pipe_version != new_version) {
819 /* Trying to open a pipe of a different version */
823 atomic_inc(&sn->pipe_users);
825 spin_unlock(&pipe_version_lock);
830 static int gss_pipe_open_v0(struct inode *inode)
832 return gss_pipe_open(inode, 0);
835 static int gss_pipe_open_v1(struct inode *inode)
837 return gss_pipe_open(inode, 1);
841 gss_pipe_release(struct inode *inode)
843 struct net *net = inode->i_sb->s_fs_info;
844 struct rpc_pipe *pipe = RPC_I(inode)->pipe;
845 struct gss_upcall_msg *gss_msg;
848 spin_lock(&pipe->lock);
849 list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
851 if (!list_empty(&gss_msg->msg.list))
853 gss_msg->msg.errno = -EPIPE;
854 refcount_inc(&gss_msg->count);
855 __gss_unhash_msg(gss_msg);
856 spin_unlock(&pipe->lock);
857 gss_release_msg(gss_msg);
860 spin_unlock(&pipe->lock);
862 put_pipe_version(net);
866 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
868 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
870 if (msg->errno < 0) {
871 refcount_inc(&gss_msg->count);
872 gss_unhash_msg(gss_msg);
873 if (msg->errno == -ETIMEDOUT)
875 gss_release_msg(gss_msg);
877 gss_release_msg(gss_msg);
880 static void gss_pipe_dentry_destroy(struct dentry *dir,
881 struct rpc_pipe_dir_object *pdo)
883 struct gss_pipe *gss_pipe = pdo->pdo_data;
884 struct rpc_pipe *pipe = gss_pipe->pipe;
886 if (pipe->dentry != NULL) {
887 rpc_unlink(pipe->dentry);
892 static int gss_pipe_dentry_create(struct dentry *dir,
893 struct rpc_pipe_dir_object *pdo)
895 struct gss_pipe *p = pdo->pdo_data;
896 struct dentry *dentry;
898 dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
900 return PTR_ERR(dentry);
901 p->pipe->dentry = dentry;
905 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
906 .create = gss_pipe_dentry_create,
907 .destroy = gss_pipe_dentry_destroy,
910 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt,
912 const struct rpc_pipe_ops *upcall_ops)
917 p = kmalloc(sizeof(*p), GFP_KERNEL);
920 p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
921 if (IS_ERR(p->pipe)) {
922 err = PTR_ERR(p->pipe);
923 goto err_free_gss_pipe;
928 rpc_init_pipe_dir_object(&p->pdo,
929 &gss_pipe_dir_object_ops,
938 struct gss_alloc_pdo {
939 struct rpc_clnt *clnt;
941 const struct rpc_pipe_ops *upcall_ops;
944 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data)
946 struct gss_pipe *gss_pipe;
947 struct gss_alloc_pdo *args = data;
949 if (pdo->pdo_ops != &gss_pipe_dir_object_ops)
951 gss_pipe = container_of(pdo, struct gss_pipe, pdo);
952 if (strcmp(gss_pipe->name, args->name) != 0)
954 if (!kref_get_unless_zero(&gss_pipe->kref))
959 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data)
961 struct gss_pipe *gss_pipe;
962 struct gss_alloc_pdo *args = data;
964 gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops);
965 if (!IS_ERR(gss_pipe))
966 return &gss_pipe->pdo;
970 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt,
972 const struct rpc_pipe_ops *upcall_ops)
974 struct net *net = rpc_net_ns(clnt);
975 struct rpc_pipe_dir_object *pdo;
976 struct gss_alloc_pdo args = {
979 .upcall_ops = upcall_ops,
982 pdo = rpc_find_or_alloc_pipe_dir_object(net,
983 &clnt->cl_pipedir_objects,
988 return container_of(pdo, struct gss_pipe, pdo);
989 return ERR_PTR(-ENOMEM);
992 static void __gss_pipe_free(struct gss_pipe *p)
994 struct rpc_clnt *clnt = p->clnt;
995 struct net *net = rpc_net_ns(clnt);
997 rpc_remove_pipe_dir_object(net,
998 &clnt->cl_pipedir_objects,
1000 rpc_destroy_pipe_data(p->pipe);
1004 static void __gss_pipe_release(struct kref *kref)
1006 struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
1011 static void gss_pipe_free(struct gss_pipe *p)
1014 kref_put(&p->kref, __gss_pipe_release);
1018 * NOTE: we have the opportunity to use different
1019 * parameters based on the input flavor (which must be a pseudoflavor)
1021 static struct gss_auth *
1022 gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1024 rpc_authflavor_t flavor = args->pseudoflavor;
1025 struct gss_auth *gss_auth;
1026 struct gss_pipe *gss_pipe;
1027 struct rpc_auth * auth;
1028 int err = -ENOMEM; /* XXX? */
1030 if (!try_module_get(THIS_MODULE))
1031 return ERR_PTR(err);
1032 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
1034 INIT_HLIST_NODE(&gss_auth->hash);
1035 gss_auth->target_name = NULL;
1036 if (args->target_name) {
1037 gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
1038 if (gss_auth->target_name == NULL)
1041 gss_auth->client = clnt;
1042 gss_auth->net = get_net(rpc_net_ns(clnt));
1044 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
1045 if (!gss_auth->mech)
1047 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
1048 if (gss_auth->service == 0)
1050 if (!gssd_running(gss_auth->net))
1052 auth = &gss_auth->rpc_auth;
1053 auth->au_cslack = GSS_CRED_SLACK >> 2;
1054 auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
1055 auth->au_verfsize = GSS_VERF_SLACK >> 2;
1056 auth->au_ralign = GSS_VERF_SLACK >> 2;
1058 auth->au_ops = &authgss_ops;
1059 auth->au_flavor = flavor;
1060 if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
1061 auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
1062 refcount_set(&auth->au_count, 1);
1063 kref_init(&gss_auth->kref);
1065 err = rpcauth_init_credcache(auth);
1069 * Note: if we created the old pipe first, then someone who
1070 * examined the directory at the right moment might conclude
1071 * that we supported only the old pipe. So we instead create
1072 * the new pipe first.
1074 gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1);
1075 if (IS_ERR(gss_pipe)) {
1076 err = PTR_ERR(gss_pipe);
1077 goto err_destroy_credcache;
1079 gss_auth->gss_pipe[1] = gss_pipe;
1081 gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name,
1082 &gss_upcall_ops_v0);
1083 if (IS_ERR(gss_pipe)) {
1084 err = PTR_ERR(gss_pipe);
1085 goto err_destroy_pipe_1;
1087 gss_auth->gss_pipe[0] = gss_pipe;
1091 gss_pipe_free(gss_auth->gss_pipe[1]);
1092 err_destroy_credcache:
1093 rpcauth_destroy_credcache(auth);
1095 gss_mech_put(gss_auth->mech);
1097 put_net(gss_auth->net);
1099 kfree(gss_auth->target_name);
1102 module_put(THIS_MODULE);
1103 trace_rpcgss_createauth(flavor, err);
1104 return ERR_PTR(err);
1108 gss_free(struct gss_auth *gss_auth)
1110 gss_pipe_free(gss_auth->gss_pipe[0]);
1111 gss_pipe_free(gss_auth->gss_pipe[1]);
1112 gss_mech_put(gss_auth->mech);
1113 put_net(gss_auth->net);
1114 kfree(gss_auth->target_name);
1117 module_put(THIS_MODULE);
1121 gss_free_callback(struct kref *kref)
1123 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
1129 gss_put_auth(struct gss_auth *gss_auth)
1131 kref_put(&gss_auth->kref, gss_free_callback);
1135 gss_destroy(struct rpc_auth *auth)
1137 struct gss_auth *gss_auth = container_of(auth,
1138 struct gss_auth, rpc_auth);
1140 if (hash_hashed(&gss_auth->hash)) {
1141 spin_lock(&gss_auth_hash_lock);
1142 hash_del(&gss_auth->hash);
1143 spin_unlock(&gss_auth_hash_lock);
1146 gss_pipe_free(gss_auth->gss_pipe[0]);
1147 gss_auth->gss_pipe[0] = NULL;
1148 gss_pipe_free(gss_auth->gss_pipe[1]);
1149 gss_auth->gss_pipe[1] = NULL;
1150 rpcauth_destroy_credcache(auth);
1152 gss_put_auth(gss_auth);
1156 * Auths may be shared between rpc clients that were cloned from a
1157 * common client with the same xprt, if they also share the flavor and
1160 * The auth is looked up from the oldest parent sharing the same
1161 * cl_xprt, and the auth itself references only that common parent
1162 * (which is guaranteed to last as long as any of its descendants).
1164 static struct gss_auth *
1165 gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
1166 struct rpc_clnt *clnt,
1167 struct gss_auth *new)
1169 struct gss_auth *gss_auth;
1170 unsigned long hashval = (unsigned long)clnt;
1172 spin_lock(&gss_auth_hash_lock);
1173 hash_for_each_possible(gss_auth_hash_table,
1177 if (gss_auth->client != clnt)
1179 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
1181 if (gss_auth->target_name != args->target_name) {
1182 if (gss_auth->target_name == NULL)
1184 if (args->target_name == NULL)
1186 if (strcmp(gss_auth->target_name, args->target_name))
1189 if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
1194 hash_add(gss_auth_hash_table, &new->hash, hashval);
1197 spin_unlock(&gss_auth_hash_lock);
1201 static struct gss_auth *
1202 gss_create_hashed(const struct rpc_auth_create_args *args,
1203 struct rpc_clnt *clnt)
1205 struct gss_auth *gss_auth;
1206 struct gss_auth *new;
1208 gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL);
1209 if (gss_auth != NULL)
1211 new = gss_create_new(args, clnt);
1214 gss_auth = gss_auth_find_or_add_hashed(args, clnt, new);
1215 if (gss_auth != new)
1216 gss_destroy(&new->rpc_auth);
1221 static struct rpc_auth *
1222 gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1224 struct gss_auth *gss_auth;
1225 struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch);
1227 while (clnt != clnt->cl_parent) {
1228 struct rpc_clnt *parent = clnt->cl_parent;
1229 /* Find the original parent for this transport */
1230 if (rcu_access_pointer(parent->cl_xpi.xpi_xpswitch) != xps)
1235 gss_auth = gss_create_hashed(args, clnt);
1236 if (IS_ERR(gss_auth))
1237 return ERR_CAST(gss_auth);
1238 return &gss_auth->rpc_auth;
1241 static struct gss_cred *
1242 gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
1244 struct gss_cred *new;
1246 /* Make a copy of the cred so that we can reference count it */
1247 new = kzalloc(sizeof(*gss_cred), GFP_NOFS);
1249 struct auth_cred acred = {
1250 .cred = gss_cred->gc_base.cr_cred,
1252 struct gss_cl_ctx *ctx =
1253 rcu_dereference_protected(gss_cred->gc_ctx, 1);
1255 rpcauth_init_cred(&new->gc_base, &acred,
1256 &gss_auth->rpc_auth,
1258 new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
1259 new->gc_service = gss_cred->gc_service;
1260 new->gc_principal = gss_cred->gc_principal;
1261 kref_get(&gss_auth->kref);
1262 rcu_assign_pointer(new->gc_ctx, ctx);
1269 * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
1270 * to the server with the GSS control procedure field set to
1271 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
1272 * all RPCSEC_GSS state associated with that context.
1275 gss_send_destroy_context(struct rpc_cred *cred)
1277 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1278 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1279 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1280 struct gss_cred *new;
1281 struct rpc_task *task;
1283 new = gss_dup_cred(gss_auth, gss_cred);
1285 ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1287 task = rpc_call_null(gss_auth->client, &new->gc_base,
1288 RPC_TASK_ASYNC|RPC_TASK_SOFT);
1292 put_rpccred(&new->gc_base);
1296 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
1297 * to create a new cred or context, so they check that things have been
1298 * allocated before freeing them. */
1300 gss_do_free_ctx(struct gss_cl_ctx *ctx)
1302 gss_delete_sec_context(&ctx->gc_gss_ctx);
1303 kfree(ctx->gc_wire_ctx.data);
1304 kfree(ctx->gc_acceptor.data);
1309 gss_free_ctx_callback(struct rcu_head *head)
1311 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1312 gss_do_free_ctx(ctx);
1316 gss_free_ctx(struct gss_cl_ctx *ctx)
1318 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1322 gss_free_cred(struct gss_cred *gss_cred)
1328 gss_free_cred_callback(struct rcu_head *head)
1330 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1331 gss_free_cred(gss_cred);
1335 gss_destroy_nullcred(struct rpc_cred *cred)
1337 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1338 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1339 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1341 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
1342 put_cred(cred->cr_cred);
1343 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1346 gss_put_auth(gss_auth);
1350 gss_destroy_cred(struct rpc_cred *cred)
1353 if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
1354 gss_send_destroy_context(cred);
1355 gss_destroy_nullcred(cred);
1359 gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
1361 return hash_64(from_kuid(&init_user_ns, acred->cred->fsuid), hashbits);
1365 * Lookup RPCSEC_GSS cred for the current process
1367 static struct rpc_cred *
1368 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1370 return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
1373 static struct rpc_cred *
1374 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
1376 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1377 struct gss_cred *cred = NULL;
1380 if (!(cred = kzalloc(sizeof(*cred), gfp)))
1383 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
1385 * Note: in order to force a call to call_refresh(), we deliberately
1386 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1388 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
1389 cred->gc_service = gss_auth->service;
1390 cred->gc_principal = acred->principal;
1391 kref_get(&gss_auth->kref);
1392 return &cred->gc_base;
1395 return ERR_PTR(err);
1399 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
1401 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1402 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
1406 err = gss_create_upcall(gss_auth, gss_cred);
1407 } while (err == -EAGAIN);
1412 gss_stringify_acceptor(struct rpc_cred *cred)
1414 char *string = NULL;
1415 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1416 struct gss_cl_ctx *ctx;
1418 struct xdr_netobj *acceptor;
1421 ctx = rcu_dereference(gss_cred->gc_ctx);
1425 len = ctx->gc_acceptor.len;
1428 /* no point if there's no string */
1432 string = kmalloc(len + 1, GFP_KERNEL);
1437 ctx = rcu_dereference(gss_cred->gc_ctx);
1439 /* did the ctx disappear or was it replaced by one with no acceptor? */
1440 if (!ctx || !ctx->gc_acceptor.len) {
1446 acceptor = &ctx->gc_acceptor;
1449 * Did we find a new acceptor that's longer than the original? Allocate
1450 * a longer buffer and try again.
1452 if (len < acceptor->len) {
1453 len = acceptor->len;
1459 memcpy(string, acceptor->data, acceptor->len);
1460 string[acceptor->len] = '\0';
1467 * Returns -EACCES if GSS context is NULL or will expire within the
1468 * timeout (miliseconds)
1471 gss_key_timeout(struct rpc_cred *rc)
1473 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1474 struct gss_cl_ctx *ctx;
1475 unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
1479 ctx = rcu_dereference(gss_cred->gc_ctx);
1480 if (!ctx || time_after(timeout, ctx->gc_expiry))
1488 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1490 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1491 struct gss_cl_ctx *ctx;
1494 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
1496 /* Don't match with creds that have expired. */
1498 ctx = rcu_dereference(gss_cred->gc_ctx);
1499 if (!ctx || time_after(jiffies, ctx->gc_expiry)) {
1504 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
1507 if (acred->principal != NULL) {
1508 if (gss_cred->gc_principal == NULL)
1510 ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
1512 if (gss_cred->gc_principal != NULL)
1514 ret = uid_eq(rc->cr_cred->fsuid, acred->cred->fsuid);
1520 * Marshal credentials.
1522 * The expensive part is computing the verifier. We can't cache a
1523 * pre-computed version of the verifier because the seqno, which
1524 * is different every time, is included in the MIC.
1526 static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
1528 struct rpc_rqst *req = task->tk_rqstp;
1529 struct rpc_cred *cred = req->rq_cred;
1530 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1532 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1533 __be32 *p, *cred_len;
1535 struct xdr_netobj mic;
1537 struct xdr_buf verf_buf;
1542 p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
1543 ctx->gc_wire_ctx.len);
1545 goto marshal_failed;
1546 *p++ = rpc_auth_gss;
1549 spin_lock(&ctx->gc_seq_lock);
1550 req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
1551 spin_unlock(&ctx->gc_seq_lock);
1552 if (req->rq_seqno == MAXSEQ)
1554 trace_rpcgss_seqno(task);
1556 *p++ = cpu_to_be32(RPC_GSS_VERSION);
1557 *p++ = cpu_to_be32(ctx->gc_proc);
1558 *p++ = cpu_to_be32(req->rq_seqno);
1559 *p++ = cpu_to_be32(gss_cred->gc_service);
1560 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1561 *cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
1565 /* We compute the checksum for the verifier over the xdr-encoded bytes
1566 * starting with the xid and ending at the end of the credential: */
1567 iov.iov_base = req->rq_snd_buf.head[0].iov_base;
1568 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1569 xdr_buf_from_iov(&iov, &verf_buf);
1571 p = xdr_reserve_space(xdr, sizeof(*p));
1573 goto marshal_failed;
1574 *p++ = rpc_auth_gss;
1575 mic.data = (u8 *)(p + 1);
1576 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1577 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1579 else if (maj_stat != 0)
1581 if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1582 goto marshal_failed;
1588 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1589 status = -EKEYEXPIRED;
1595 trace_rpcgss_get_mic(task, maj_stat);
1600 static int gss_renew_cred(struct rpc_task *task)
1602 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
1603 struct gss_cred *gss_cred = container_of(oldcred,
1606 struct rpc_auth *auth = oldcred->cr_auth;
1607 struct auth_cred acred = {
1608 .cred = oldcred->cr_cred,
1609 .principal = gss_cred->gc_principal,
1611 struct rpc_cred *new;
1613 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
1615 return PTR_ERR(new);
1616 task->tk_rqstp->rq_cred = new;
1617 put_rpccred(oldcred);
1621 static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1623 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1624 unsigned long now = jiffies;
1625 unsigned long begin, expire;
1626 struct gss_cred *gss_cred;
1628 gss_cred = container_of(cred, struct gss_cred, gc_base);
1629 begin = gss_cred->gc_upcall_timestamp;
1630 expire = begin + gss_expired_cred_retry_delay * HZ;
1632 if (time_in_range_open(now, begin, expire))
1639 * Refresh credentials. XXX - finish
1642 gss_refresh(struct rpc_task *task)
1644 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1647 if (gss_cred_is_negative_entry(cred))
1648 return -EKEYEXPIRED;
1650 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1651 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1652 ret = gss_renew_cred(task);
1655 cred = task->tk_rqstp->rq_cred;
1658 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
1659 ret = gss_refresh_upcall(task);
1664 /* Dummy refresh routine: used only when destroying the context */
1666 gss_refresh_null(struct rpc_task *task)
1672 gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
1674 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1675 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1676 __be32 *p, *seq = NULL;
1678 struct xdr_buf verf_buf;
1679 struct xdr_netobj mic;
1683 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1685 goto validate_failed;
1686 if (*p++ != rpc_auth_gss)
1687 goto validate_failed;
1688 len = be32_to_cpup(p);
1689 if (len > RPC_MAX_AUTH_SIZE)
1690 goto validate_failed;
1691 p = xdr_inline_decode(xdr, len);
1693 goto validate_failed;
1695 seq = kmalloc(4, GFP_NOFS);
1697 goto validate_failed;
1698 *seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
1701 xdr_buf_from_iov(&iov, &verf_buf);
1704 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1705 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1706 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1710 /* We leave it to unwrap to calculate au_rslack. For now we just
1711 * calculate the length of the verifier: */
1712 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1723 trace_rpcgss_verify_mic(task, maj_stat);
1728 static noinline_for_stack int
1729 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1730 struct rpc_task *task, struct xdr_stream *xdr)
1732 struct rpc_rqst *rqstp = task->tk_rqstp;
1733 struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
1734 struct xdr_netobj mic;
1735 __be32 *p, *integ_len;
1736 u32 offset, maj_stat;
1738 p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1742 *p = cpu_to_be32(rqstp->rq_seqno);
1744 if (rpcauth_wrap_req_encode(task, xdr))
1747 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1748 if (xdr_buf_subsegment(snd_buf, &integ_buf,
1749 offset, snd_buf->len - offset))
1751 *integ_len = cpu_to_be32(integ_buf.len);
1753 p = xdr_reserve_space(xdr, 0);
1756 mic.data = (u8 *)(p + 1);
1757 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1758 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1759 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1762 /* Check that the trailing MIC fit in the buffer, after the fact */
1763 if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1769 trace_rpcgss_get_mic(task, maj_stat);
1774 priv_release_snd_buf(struct rpc_rqst *rqstp)
1778 for (i=0; i < rqstp->rq_enc_pages_num; i++)
1779 __free_page(rqstp->rq_enc_pages[i]);
1780 kfree(rqstp->rq_enc_pages);
1781 rqstp->rq_release_snd_buf = NULL;
1785 alloc_enc_pages(struct rpc_rqst *rqstp)
1787 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1790 if (rqstp->rq_release_snd_buf)
1791 rqstp->rq_release_snd_buf(rqstp);
1793 if (snd_buf->page_len == 0) {
1794 rqstp->rq_enc_pages_num = 0;
1798 first = snd_buf->page_base >> PAGE_SHIFT;
1799 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
1800 rqstp->rq_enc_pages_num = last - first + 1 + 1;
1802 = kmalloc_array(rqstp->rq_enc_pages_num,
1803 sizeof(struct page *),
1805 if (!rqstp->rq_enc_pages)
1807 for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1808 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1809 if (rqstp->rq_enc_pages[i] == NULL)
1812 rqstp->rq_release_snd_buf = priv_release_snd_buf;
1815 rqstp->rq_enc_pages_num = i;
1816 priv_release_snd_buf(rqstp);
1821 static noinline_for_stack int
1822 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1823 struct rpc_task *task, struct xdr_stream *xdr)
1825 struct rpc_rqst *rqstp = task->tk_rqstp;
1826 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1827 u32 pad, offset, maj_stat;
1829 __be32 *p, *opaque_len;
1830 struct page **inpages;
1835 p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1839 *p = cpu_to_be32(rqstp->rq_seqno);
1841 if (rpcauth_wrap_req_encode(task, xdr))
1844 status = alloc_enc_pages(rqstp);
1845 if (unlikely(status))
1847 first = snd_buf->page_base >> PAGE_SHIFT;
1848 inpages = snd_buf->pages + first;
1849 snd_buf->pages = rqstp->rq_enc_pages;
1850 snd_buf->page_base -= first << PAGE_SHIFT;
1852 * Move the tail into its own page, in case gss_wrap needs
1853 * more space in the head when wrapping.
1855 * Still... Why can't gss_wrap just slide the tail down?
1857 if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1860 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1861 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1862 snd_buf->tail[0].iov_base = tmp;
1864 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1865 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1866 /* slack space should prevent this ever happening: */
1867 if (unlikely(snd_buf->len > snd_buf->buflen))
1869 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1870 * done anyway, so it's safe to put the request on the wire: */
1871 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1872 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1876 *opaque_len = cpu_to_be32(snd_buf->len - offset);
1877 /* guess whether the pad goes into the head or the tail: */
1878 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1879 iov = snd_buf->tail;
1881 iov = snd_buf->head;
1882 p = iov->iov_base + iov->iov_len;
1883 pad = xdr_pad_size(snd_buf->len - offset);
1885 iov->iov_len += pad;
1886 snd_buf->len += pad;
1892 trace_rpcgss_wrap(task, maj_stat);
1896 static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
1898 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1899 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1901 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1905 if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1906 /* The spec seems a little ambiguous here, but I think that not
1907 * wrapping context destruction requests makes the most sense.
1909 status = rpcauth_wrap_req_encode(task, xdr);
1912 switch (gss_cred->gc_service) {
1913 case RPC_GSS_SVC_NONE:
1914 status = rpcauth_wrap_req_encode(task, xdr);
1916 case RPC_GSS_SVC_INTEGRITY:
1917 status = gss_wrap_req_integ(cred, ctx, task, xdr);
1919 case RPC_GSS_SVC_PRIVACY:
1920 status = gss_wrap_req_priv(cred, ctx, task, xdr);
1931 gss_unwrap_resp_auth(struct rpc_cred *cred)
1933 struct rpc_auth *auth = cred->cr_auth;
1935 auth->au_rslack = auth->au_verfsize;
1936 auth->au_ralign = auth->au_verfsize;
1941 * RFC 2203, Section 5.3.2.2
1943 * struct rpc_gss_integ_data {
1944 * opaque databody_integ<>;
1945 * opaque checksum<>;
1948 * struct rpc_gss_data_t {
1949 * unsigned int seq_num;
1950 * proc_req_arg_t arg;
1953 static noinline_for_stack int
1954 gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
1955 struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
1956 struct xdr_stream *xdr)
1958 struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
1959 struct rpc_auth *auth = cred->cr_auth;
1960 u32 len, offset, seqno, maj_stat;
1961 struct xdr_netobj mic;
1967 /* opaque databody_integ<>; */
1968 if (xdr_stream_decode_u32(xdr, &len))
1972 offset = rcv_buf->len - xdr_stream_remaining(xdr);
1973 if (xdr_stream_decode_u32(xdr, &seqno))
1975 if (seqno != rqstp->rq_seqno)
1977 if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
1981 * The xdr_stream now points to the beginning of the
1982 * upper layer payload, to be passed below to
1983 * rpcauth_unwrap_resp_decode(). The checksum, which
1984 * follows the upper layer payload in @rcv_buf, is
1985 * located and parsed without updating the xdr_stream.
1988 /* opaque checksum<>; */
1990 if (xdr_decode_word(rcv_buf, offset, &len))
1992 offset += sizeof(__be32);
1993 if (offset + len > rcv_buf->len)
1996 mic.data = kmalloc(len, GFP_NOFS);
1999 if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len))
2002 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
2003 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
2004 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
2005 if (maj_stat != GSS_S_COMPLETE)
2008 auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
2009 auth->au_ralign = auth->au_verfsize + 2;
2017 trace_rpcgss_unwrap_failed(task);
2020 trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
2023 trace_rpcgss_verify_mic(task, maj_stat);
2027 static noinline_for_stack int
2028 gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
2029 struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
2030 struct xdr_stream *xdr)
2032 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
2033 struct kvec *head = rqstp->rq_rcv_buf.head;
2034 struct rpc_auth *auth = cred->cr_auth;
2035 unsigned int savedlen = rcv_buf->len;
2036 u32 offset, opaque_len, maj_stat;
2039 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
2042 opaque_len = be32_to_cpup(p++);
2043 offset = (u8 *)(p) - (u8 *)head->iov_base;
2044 if (offset + opaque_len > rcv_buf->len)
2046 rcv_buf->len = offset + opaque_len;
2048 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
2049 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
2050 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
2051 if (maj_stat != GSS_S_COMPLETE)
2053 /* gss_unwrap decrypted the sequence number */
2054 if (be32_to_cpup(p++) != rqstp->rq_seqno)
2057 /* gss_unwrap redacts the opaque blob from the head iovec.
2058 * rcv_buf has changed, thus the stream needs to be reset.
2060 xdr_init_decode(xdr, rcv_buf, p, rqstp);
2062 auth->au_rslack = auth->au_verfsize + 2 +
2063 XDR_QUADLEN(savedlen - rcv_buf->len);
2064 auth->au_ralign = auth->au_verfsize + 2 +
2065 XDR_QUADLEN(savedlen - rcv_buf->len);
2068 trace_rpcgss_unwrap_failed(task);
2071 trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
2074 trace_rpcgss_unwrap(task, maj_stat);
2079 gss_seq_is_newer(u32 new, u32 old)
2081 return (s32)(new - old) > 0;
2085 gss_xmit_need_reencode(struct rpc_task *task)
2087 struct rpc_rqst *req = task->tk_rqstp;
2088 struct rpc_cred *cred = req->rq_cred;
2089 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2090 u32 win, seq_xmit = 0;
2096 if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
2099 seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
2100 while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
2103 seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
2104 if (seq_xmit == tmp) {
2112 ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
2117 trace_rpcgss_need_reencode(task, seq_xmit, ret);
2122 gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
2124 struct rpc_rqst *rqstp = task->tk_rqstp;
2125 struct rpc_cred *cred = rqstp->rq_cred;
2126 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
2128 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2131 if (ctx->gc_proc != RPC_GSS_PROC_DATA)
2133 switch (gss_cred->gc_service) {
2134 case RPC_GSS_SVC_NONE:
2135 status = gss_unwrap_resp_auth(cred);
2137 case RPC_GSS_SVC_INTEGRITY:
2138 status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
2140 case RPC_GSS_SVC_PRIVACY:
2141 status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
2148 status = rpcauth_unwrap_resp_decode(task, xdr);
2154 static const struct rpc_authops authgss_ops = {
2155 .owner = THIS_MODULE,
2156 .au_flavor = RPC_AUTH_GSS,
2157 .au_name = "RPCSEC_GSS",
2158 .create = gss_create,
2159 .destroy = gss_destroy,
2160 .hash_cred = gss_hash_cred,
2161 .lookup_cred = gss_lookup_cred,
2162 .crcreate = gss_create_cred,
2163 .info2flavor = gss_mech_info2flavor,
2164 .flavor2info = gss_mech_flavor2info,
2167 static const struct rpc_credops gss_credops = {
2168 .cr_name = "AUTH_GSS",
2169 .crdestroy = gss_destroy_cred,
2170 .cr_init = gss_cred_init,
2171 .crmatch = gss_match,
2172 .crmarshal = gss_marshal,
2173 .crrefresh = gss_refresh,
2174 .crvalidate = gss_validate,
2175 .crwrap_req = gss_wrap_req,
2176 .crunwrap_resp = gss_unwrap_resp,
2177 .crkey_timeout = gss_key_timeout,
2178 .crstringify_acceptor = gss_stringify_acceptor,
2179 .crneed_reencode = gss_xmit_need_reencode,
2182 static const struct rpc_credops gss_nullops = {
2183 .cr_name = "AUTH_GSS",
2184 .crdestroy = gss_destroy_nullcred,
2185 .crmatch = gss_match,
2186 .crmarshal = gss_marshal,
2187 .crrefresh = gss_refresh_null,
2188 .crvalidate = gss_validate,
2189 .crwrap_req = gss_wrap_req,
2190 .crunwrap_resp = gss_unwrap_resp,
2191 .crstringify_acceptor = gss_stringify_acceptor,
2194 static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
2195 .upcall = gss_v0_upcall,
2196 .downcall = gss_pipe_downcall,
2197 .destroy_msg = gss_pipe_destroy_msg,
2198 .open_pipe = gss_pipe_open_v0,
2199 .release_pipe = gss_pipe_release,
2202 static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
2203 .upcall = gss_v1_upcall,
2204 .downcall = gss_pipe_downcall,
2205 .destroy_msg = gss_pipe_destroy_msg,
2206 .open_pipe = gss_pipe_open_v1,
2207 .release_pipe = gss_pipe_release,
2210 static __net_init int rpcsec_gss_init_net(struct net *net)
2212 return gss_svc_init_net(net);
2215 static __net_exit void rpcsec_gss_exit_net(struct net *net)
2217 gss_svc_shutdown_net(net);
2220 static struct pernet_operations rpcsec_gss_net_ops = {
2221 .init = rpcsec_gss_init_net,
2222 .exit = rpcsec_gss_exit_net,
2226 * Initialize RPCSEC_GSS module
2228 static int __init init_rpcsec_gss(void)
2232 err = rpcauth_register(&authgss_ops);
2235 err = gss_svc_init();
2237 goto out_unregister;
2238 err = register_pernet_subsys(&rpcsec_gss_net_ops);
2241 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
2246 rpcauth_unregister(&authgss_ops);
2251 static void __exit exit_rpcsec_gss(void)
2253 unregister_pernet_subsys(&rpcsec_gss_net_ops);
2255 rpcauth_unregister(&authgss_ops);
2256 rcu_barrier(); /* Wait for completion of call_rcu()'s */
2259 MODULE_ALIAS("rpc-auth-6");
2260 MODULE_LICENSE("GPL");
2261 module_param_named(expired_cred_retry_delay,
2262 gss_expired_cred_retry_delay,
2264 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
2265 "the RPC engine retries an expired credential");
2267 module_param_named(key_expire_timeo,
2268 gss_key_expire_timeo,
2270 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a "
2271 "credential keys lifetime where the NFS layer cleans up "
2272 "prior to key expiration");
2274 module_init(init_rpcsec_gss)
2275 module_exit(exit_rpcsec_gss)