File: | nfs/nfs_syscalls.c |
Warning: | line 489, column 8 Value stored to 'm' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: nfs_syscalls.c,v 1.117 2021/03/11 13:31:35 jsg Exp $ */ |
2 | /* $NetBSD: nfs_syscalls.c,v 1.19 1996/02/18 11:53:52 fvdl Exp $ */ |
3 | |
4 | /* |
5 | * Copyright (c) 1989, 1993 |
6 | * The Regents of the University of California. All rights reserved. |
7 | * |
8 | * This code is derived from software contributed to Berkeley by |
9 | * Rick Macklem at The University of Guelph. |
10 | * |
11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions |
13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. |
19 | * 3. Neither the name of the University nor the names of its contributors |
20 | * may be used to endorse or promote products derived from this software |
21 | * without specific prior written permission. |
22 | * |
23 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
24 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
27 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
28 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
29 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
30 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
31 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
32 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
33 | * SUCH DAMAGE. |
34 | * |
35 | * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95 |
36 | */ |
37 | |
38 | #include <sys/param.h> |
39 | #include <sys/systm.h> |
40 | #include <sys/kernel.h> |
41 | #include <sys/file.h> |
42 | #include <sys/stat.h> |
43 | #include <sys/vnode.h> |
44 | #include <sys/mount.h> |
45 | #include <sys/pool.h> |
46 | #include <sys/proc.h> |
47 | #include <sys/uio.h> |
48 | #include <sys/malloc.h> |
49 | #include <sys/buf.h> |
50 | #include <sys/mbuf.h> |
51 | #include <sys/socket.h> |
52 | #include <sys/socketvar.h> |
53 | #include <sys/domain.h> |
54 | #include <sys/protosw.h> |
55 | #include <sys/namei.h> |
56 | #include <sys/syslog.h> |
57 | #include <sys/filedesc.h> |
58 | #include <sys/signalvar.h> |
59 | #include <sys/kthread.h> |
60 | #include <sys/queue.h> |
61 | |
62 | #include <sys/syscallargs.h> |
63 | |
64 | #include <netinet/in.h> |
65 | #include <netinet/tcp.h> |
66 | #include <nfs/xdr_subs.h> |
67 | #include <nfs/rpcv2.h> |
68 | #include <nfs/nfsproto.h> |
69 | #include <nfs/nfs.h> |
70 | #include <nfs/nfsrvcache.h> |
71 | #include <nfs/nfsmount.h> |
72 | #include <nfs/nfsnode.h> |
73 | #include <nfs/nfs_var.h> |
74 | |
75 | /* Global defs. */ |
76 | extern int nfs_numasync; |
77 | extern struct nfsstats nfsstats; |
78 | struct nfssvc_sock *nfs_udpsock; |
79 | int nfsd_waiting = 0; |
80 | |
81 | #ifdef NFSSERVER1 |
82 | struct pool nfsrv_descript_pl; |
83 | |
84 | int nfsrv_getslp(struct nfsd *nfsd); |
85 | |
86 | static int nfs_numnfsd = 0; |
87 | int (*nfsrv3_procs[NFS_NPROCS23])(struct nfsrv_descript *, |
88 | struct nfssvc_sock *, struct proc *, struct mbuf **) = { |
89 | nfsrv_null, |
90 | nfsrv_getattr, |
91 | nfsrv_setattr, |
92 | nfsrv_lookup, |
93 | nfsrv3_access, |
94 | nfsrv_readlink, |
95 | nfsrv_read, |
96 | nfsrv_write, |
97 | nfsrv_create, |
98 | nfsrv_mkdir, |
99 | nfsrv_symlink, |
100 | nfsrv_mknod, |
101 | nfsrv_remove, |
102 | nfsrv_rmdir, |
103 | nfsrv_rename, |
104 | nfsrv_link, |
105 | nfsrv_readdir, |
106 | nfsrv_readdirplus, |
107 | nfsrv_statfs, |
108 | nfsrv_fsinfo, |
109 | nfsrv_pathconf, |
110 | nfsrv_commit, |
111 | nfsrv_noop |
112 | }; |
113 | #endif |
114 | |
115 | TAILQ_HEAD(, nfssvc_sock)struct { struct nfssvc_sock *tqh_first; struct nfssvc_sock ** tqh_last; } nfssvc_sockhead; |
116 | struct nfsdhead nfsd_head; |
117 | |
118 | int nfssvc_sockhead_flag; |
119 | #define SLP_INIT0x01 0x01 /* NFS data undergoing initialization */ |
120 | #define SLP_WANTINIT0x02 0x02 /* thread waiting on NFS initialization */ |
121 | int nfsd_head_flag; |
122 | |
123 | #ifdef NFSCLIENT1 |
124 | struct proc *nfs_asyncdaemon[NFS_MAXASYNCDAEMON20]; |
125 | int nfs_niothreads = -1; |
126 | #endif |
127 | |
128 | int nfssvc_addsock(struct file *, struct mbuf *); |
129 | int nfssvc_nfsd(struct nfsd *); |
130 | void nfsrv_slpderef(struct nfssvc_sock *); |
131 | void nfsrv_zapsock(struct nfssvc_sock *); |
132 | void nfssvc_iod(void *); |
133 | |
134 | /* |
135 | * NFS server pseudo system call for the nfsd's |
136 | * Based on the flag value it either: |
137 | * - adds a socket to the selection list |
138 | * - remains in the kernel as an nfsd |
139 | */ |
140 | int |
141 | sys_nfssvc(struct proc *p, void *v, register_t *retval) |
142 | { |
143 | int error = 0; |
144 | #ifdef NFSSERVER1 |
145 | struct sys_nfssvc_args /* { |
146 | syscallarg(int) flag; |
147 | syscallarg(caddr_t) argp; |
148 | } */ *uap = v; |
149 | int flags = SCARG(uap, flag)((uap)->flag.le.datum); |
150 | struct file *fp; |
151 | struct mbuf *nam; |
152 | struct nfsd_args nfsdarg; |
153 | struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs; |
154 | struct nfsd *nfsd; |
155 | #endif |
156 | |
157 | /* Must be super user */ |
158 | error = suser(p); |
159 | if (error) |
160 | return (error); |
161 | |
162 | #ifndef NFSSERVER1 |
163 | error = ENOSYS78; |
164 | #else |
165 | |
166 | while (nfssvc_sockhead_flag & SLP_INIT0x01) { |
167 | nfssvc_sockhead_flag |= SLP_WANTINIT0x02; |
168 | tsleep_nsec(&nfssvc_sockhead, PSOCK24, "nfsd init", INFSLP0xffffffffffffffffULL); |
169 | } |
170 | |
171 | switch (flags) { |
172 | case NFSSVC_ADDSOCK0x008: |
173 | error = copyin(SCARG(uap, argp)((uap)->argp.le.datum), &nfsdarg, sizeof(nfsdarg)); |
174 | if (error) |
175 | return (error); |
176 | |
177 | error = getsock(p, nfsdarg.sock, &fp); |
178 | if (error) |
179 | return (error); |
180 | |
181 | /* |
182 | * Get the client address for connected sockets. |
183 | */ |
184 | if (nfsdarg.name == NULL((void *)0) || nfsdarg.namelen == 0) |
185 | nam = NULL((void *)0); |
186 | else { |
187 | error = sockargs(&nam, nfsdarg.name, nfsdarg.namelen, |
188 | MT_SONAME3); |
189 | if (error) { |
190 | FRELE(fp, p)(_atomic_sub_int_nv((&fp->f_count), 1) == 0 ? fdrop(fp , p) : 0); |
191 | return (error); |
192 | } |
193 | } |
194 | error = nfssvc_addsock(fp, nam); |
195 | FRELE(fp, p)(_atomic_sub_int_nv((&fp->f_count), 1) == 0 ? fdrop(fp , p) : 0); |
196 | break; |
197 | case NFSSVC_NFSD0x004: |
198 | error = copyin(SCARG(uap, argp)((uap)->argp.le.datum), nsd, sizeof(*nsd)); |
199 | if (error) |
200 | return (error); |
201 | |
202 | nfsd = malloc(sizeof(*nfsd), M_NFSD52, M_WAITOK0x0001|M_ZERO0x0008); |
203 | nfsd->nfsd_procp = p; |
204 | nfsd->nfsd_slp = NULL((void *)0); |
205 | |
206 | error = nfssvc_nfsd(nfsd); |
207 | break; |
208 | default: |
209 | error = EINVAL22; |
210 | break; |
211 | } |
212 | |
213 | if (error == EINTR4 || error == ERESTART-1) |
214 | error = 0; |
215 | #endif /* !NFSSERVER */ |
216 | |
217 | return (error); |
218 | } |
219 | |
220 | #ifdef NFSSERVER1 |
221 | /* |
222 | * Adds a socket to the list for servicing by nfsds. |
223 | */ |
224 | int |
225 | nfssvc_addsock(struct file *fp, struct mbuf *mynam) |
226 | { |
227 | struct mbuf *m; |
228 | int siz; |
229 | struct nfssvc_sock *slp; |
230 | struct socket *so; |
231 | struct nfssvc_sock *tslp; |
232 | int s, error; |
233 | |
234 | so = (struct socket *)fp->f_data; |
235 | tslp = NULL((void *)0); |
236 | /* |
237 | * Add it to the list, as required. |
238 | */ |
239 | if (so->so_proto->pr_protocol == IPPROTO_UDP17) { |
240 | tslp = nfs_udpsock; |
241 | if (tslp->ns_flag & SLP_VALID0x01) { |
242 | m_freem(mynam); |
243 | return (EPERM1); |
244 | } |
245 | } |
246 | if (so->so_type == SOCK_STREAM1) |
247 | siz = NFS_MAXPACKET(404 + (64 * 1024)) + sizeof (u_long); |
248 | else |
249 | siz = NFS_MAXPACKET(404 + (64 * 1024)); |
250 | s = solock(so); |
251 | error = soreserve(so, siz, siz); |
252 | if (error) { |
253 | sounlock(so, s); |
254 | m_freem(mynam); |
255 | return (error); |
256 | } |
257 | |
258 | /* |
259 | * Set protocol specific options { for now TCP only } and |
260 | * reserve some space. For datagram sockets, this can get called |
261 | * repeatedly for the same socket, but that isn't harmful. |
262 | */ |
263 | if (so->so_type == SOCK_STREAM1) { |
264 | MGET(m, M_WAIT, MT_SOOPTS)m = m_get((0x0001), (4)); |
265 | *mtod(m, int32_t *)((int32_t *)((m)->m_hdr.mh_data)) = 1; |
266 | m->m_lenm_hdr.mh_len = sizeof(int32_t); |
267 | sosetopt(so, SOL_SOCKET0xffff, SO_KEEPALIVE0x0008, m); |
268 | m_freem(m); |
269 | } |
270 | if (so->so_proto->pr_domain->dom_family == AF_INET2 && |
271 | so->so_proto->pr_protocol == IPPROTO_TCP6) { |
272 | MGET(m, M_WAIT, MT_SOOPTS)m = m_get((0x0001), (4)); |
273 | *mtod(m, int32_t *)((int32_t *)((m)->m_hdr.mh_data)) = 1; |
274 | m->m_lenm_hdr.mh_len = sizeof(int32_t); |
275 | sosetopt(so, IPPROTO_TCP6, TCP_NODELAY0x01, m); |
276 | m_freem(m); |
277 | } |
278 | so->so_rcv.sb_flags &= ~SB_NOINTR0x40; |
279 | so->so_rcv.sb_timeo_nsecs = INFSLP0xffffffffffffffffULL; |
280 | so->so_snd.sb_flags &= ~SB_NOINTR0x40; |
281 | so->so_snd.sb_timeo_nsecs = INFSLP0xffffffffffffffffULL; |
282 | sounlock(so, s); |
283 | if (tslp) |
284 | slp = tslp; |
285 | else { |
286 | slp = malloc(sizeof(*slp), M_NFSSVC50, M_WAITOK0x0001|M_ZERO0x0008); |
287 | TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain)do { (slp)->ns_chain.tqe_next = ((void *)0); (slp)->ns_chain .tqe_prev = (&nfssvc_sockhead)->tqh_last; *(&nfssvc_sockhead )->tqh_last = (slp); (&nfssvc_sockhead)->tqh_last = &(slp)->ns_chain.tqe_next; } while (0); |
288 | } |
289 | slp->ns_so = so; |
290 | slp->ns_nam = mynam; |
291 | FREF(fp)do { extern void vfs_stall_barrier(void); vfs_stall_barrier() ; _atomic_inc_int(&(fp)->f_count); } while (0); |
292 | slp->ns_fp = fp; |
293 | so->so_upcallarg = (caddr_t)slp; |
294 | so->so_upcall = nfsrv_rcv; |
295 | slp->ns_flag = (SLP_VALID0x01 | SLP_NEEDQ0x04); |
296 | nfsrv_wakenfsd(slp); |
297 | return (0); |
298 | } |
299 | |
300 | /* |
301 | * Called by nfssvc() for nfsds. Just loops around servicing rpc requests |
302 | * until it is killed by a signal. |
303 | */ |
304 | int |
305 | nfssvc_nfsd(struct nfsd *nfsd) |
306 | { |
307 | struct mbuf *m; |
308 | int siz; |
309 | struct nfssvc_sock *slp; |
310 | struct socket *so; |
311 | int *solockp; |
312 | struct nfsrv_descript *nd = NULL((void *)0); |
313 | struct mbuf *mreq; |
314 | int error = 0, cacherep, sotype; |
315 | |
316 | cacherep = RC_DOIT2; |
317 | |
318 | TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain)do { (nfsd)->nfsd_chain.tqe_next = ((void *)0); (nfsd)-> nfsd_chain.tqe_prev = (&nfsd_head)->tqh_last; *(&nfsd_head )->tqh_last = (nfsd); (&nfsd_head)->tqh_last = & (nfsd)->nfsd_chain.tqe_next; } while (0); |
319 | nfs_numnfsd++; |
320 | |
321 | /* Loop getting rpc requests until SIGKILL. */ |
322 | loop: |
323 | if (!ISSET(nfsd->nfsd_flag, NFSD_REQINPROG)((nfsd->nfsd_flag) & (0x02))) { |
324 | |
325 | /* attach an nfssvc_sock to nfsd */ |
326 | error = nfsrv_getslp(nfsd); |
327 | if (error) |
328 | goto done; |
329 | |
330 | slp = nfsd->nfsd_slp; |
331 | |
332 | if (ISSET(slp->ns_flag, SLP_VALID)((slp->ns_flag) & (0x01))) { |
333 | if (ISSET(slp->ns_flag, SLP_DISCONN)((slp->ns_flag) & (0x08))) { |
334 | nfsrv_zapsock(slp); |
335 | } else if (ISSET(slp->ns_flag, SLP_NEEDQ)((slp->ns_flag) & (0x04))) { |
336 | CLR(slp->ns_flag, SLP_NEEDQ)((slp->ns_flag) &= ~(0x04)); |
337 | nfs_sndlock(&slp->ns_solock, NULL((void *)0)); |
338 | nfsrv_rcv(slp->ns_so, (caddr_t)slp, M_WAIT0x0001); |
339 | nfs_sndunlock(&slp->ns_solock); |
340 | } |
341 | |
342 | error = nfsrv_dorec(slp, nfsd, &nd); |
343 | SET(nfsd->nfsd_flag, NFSD_REQINPROG)((nfsd->nfsd_flag) |= (0x02)); |
344 | } |
345 | } else { |
346 | error = 0; |
347 | slp = nfsd->nfsd_slp; |
348 | } |
349 | |
350 | if (error || !ISSET(slp->ns_flag, SLP_VALID)((slp->ns_flag) & (0x01))) { |
351 | if (nd != NULL((void *)0)) { |
352 | pool_put(&nfsrv_descript_pl, nd); |
353 | nd = NULL((void *)0); |
354 | } |
355 | nfsd->nfsd_slp = NULL((void *)0); |
356 | CLR(nfsd->nfsd_flag, NFSD_REQINPROG)((nfsd->nfsd_flag) &= ~(0x02)); |
357 | nfsrv_slpderef(slp); |
358 | goto loop; |
359 | } |
360 | |
361 | so = slp->ns_so; |
362 | sotype = so->so_type; |
363 | if (ISSET(so->so_proto->pr_flags, PR_CONNREQUIRED)((so->so_proto->pr_flags) & (0x04))) |
364 | solockp = &slp->ns_solock; |
365 | else |
366 | solockp = NULL((void *)0); |
367 | |
368 | if (nd) { |
369 | if (nd->nd_nam2) |
370 | nd->nd_nam = nd->nd_nam2; |
371 | else |
372 | nd->nd_nam = slp->ns_nam; |
373 | } |
374 | |
375 | cacherep = nfsrv_getcache(nd, slp, &mreq); |
376 | switch (cacherep) { |
377 | case RC_DOIT2: |
378 | error = (*(nfsrv3_procs[nd->nd_procnum]))(nd, slp, nfsd->nfsd_procp, &mreq); |
379 | if (mreq == NULL((void *)0)) { |
380 | if (nd != NULL((void *)0)) { |
381 | m_freem(nd->nd_nam2); |
382 | m_freem(nd->nd_mrep); |
383 | } |
384 | break; |
385 | } |
386 | if (error) { |
387 | nfsstats.srv_errs++; |
388 | nfsrv_updatecache(nd, 0, mreq); |
389 | m_freem(nd->nd_nam2); |
390 | break; |
391 | } |
392 | nfsstats.srvrpccnt[nd->nd_procnum]++; |
393 | nfsrv_updatecache(nd, 1, mreq); |
394 | nd->nd_mrep = NULL((void *)0); |
395 | |
396 | /* FALLTHROUGH */ |
397 | case RC_REPLY1: |
398 | m = mreq; |
399 | siz = 0; |
400 | while (m) { |
401 | siz += m->m_lenm_hdr.mh_len; |
402 | m = m->m_nextm_hdr.mh_next; |
403 | } |
404 | |
405 | if (siz <= 0 || siz > NFS_MAXPACKET(404 + (64 * 1024))) |
406 | panic("bad nfs svc reply, siz = %i", siz); |
407 | |
408 | m = mreq; |
409 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = siz; |
410 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = 0; |
411 | |
412 | /* For stream protocols, prepend a Sun RPC Record Mark. */ |
413 | if (sotype == SOCK_STREAM1) { |
414 | M_PREPEND(m, NFSX_UNSIGNED, M_WAIT)(m) = m_prepend((m), (4), (0x0001)); |
415 | *mtod(m, u_int32_t *)((u_int32_t *)((m)->m_hdr.mh_data)) = htonl(0x80000000 | siz)(__uint32_t)(__builtin_constant_p(0x80000000 | siz) ? (__uint32_t )(((__uint32_t)(0x80000000 | siz) & 0xff) << 24 | ( (__uint32_t)(0x80000000 | siz) & 0xff00) << 8 | ((__uint32_t )(0x80000000 | siz) & 0xff0000) >> 8 | ((__uint32_t )(0x80000000 | siz) & 0xff000000) >> 24) : __swap32md (0x80000000 | siz)); |
416 | } |
417 | |
418 | if (solockp) |
419 | nfs_sndlock(solockp, NULL((void *)0)); |
420 | |
421 | if (ISSET(slp->ns_flag, SLP_VALID)((slp->ns_flag) & (0x01))) |
422 | error = nfs_send(so, nd->nd_nam2, m, NULL((void *)0)); |
423 | else { |
424 | error = EPIPE32; |
425 | m_freem(m); |
426 | } |
427 | m_freem(nd->nd_nam2); |
428 | m_freem(nd->nd_mrep); |
429 | if (error == EPIPE32) |
430 | nfsrv_zapsock(slp); |
431 | if (solockp) |
432 | nfs_sndunlock(solockp); |
433 | if (error == EINTR4 || error == ERESTART-1) { |
434 | pool_put(&nfsrv_descript_pl, nd); |
435 | nfsrv_slpderef(slp); |
436 | goto done; |
437 | } |
438 | break; |
439 | case RC_DROPIT0: |
440 | m_freem(nd->nd_mrep); |
441 | m_freem(nd->nd_nam2); |
442 | break; |
443 | }; |
444 | |
445 | if (nd) { |
446 | pool_put(&nfsrv_descript_pl, nd); |
447 | nd = NULL((void *)0); |
448 | } |
449 | |
450 | if (nfsrv_dorec(slp, nfsd, &nd)) { |
451 | nfsd->nfsd_flag &= ~NFSD_REQINPROG0x02; |
452 | nfsd->nfsd_slp = NULL((void *)0); |
453 | nfsrv_slpderef(slp); |
454 | } |
455 | goto loop; |
456 | |
457 | done: |
458 | TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain)do { if (((nfsd)->nfsd_chain.tqe_next) != ((void *)0)) (nfsd )->nfsd_chain.tqe_next->nfsd_chain.tqe_prev = (nfsd)-> nfsd_chain.tqe_prev; else (&nfsd_head)->tqh_last = (nfsd )->nfsd_chain.tqe_prev; *(nfsd)->nfsd_chain.tqe_prev = ( nfsd)->nfsd_chain.tqe_next; ((nfsd)->nfsd_chain.tqe_prev ) = ((void *)-1); ((nfsd)->nfsd_chain.tqe_next) = ((void * )-1); } while (0); |
459 | free(nfsd, M_NFSD52, sizeof(*nfsd)); |
460 | if (--nfs_numnfsd == 0) |
461 | nfsrv_init(1); /* Reinitialize everything */ |
462 | return (error); |
463 | } |
464 | |
465 | /* |
466 | * Shut down a socket associated with an nfssvc_sock structure. |
467 | * Should be called with the send lock set, if required. |
468 | * The trick here is to increment the sref at the start, so that the nfsds |
469 | * will stop using it and clear ns_flag at the end so that it will not be |
470 | * reassigned during cleanup. |
471 | */ |
472 | void |
473 | nfsrv_zapsock(struct nfssvc_sock *slp) |
474 | { |
475 | struct socket *so; |
476 | struct file *fp; |
477 | struct mbuf *m, *n; |
478 | |
479 | slp->ns_flag &= ~SLP_ALLFLAGS0xff; |
480 | fp = slp->ns_fp; |
481 | if (fp) { |
482 | FREF(fp)do { extern void vfs_stall_barrier(void); vfs_stall_barrier() ; _atomic_inc_int(&(fp)->f_count); } while (0); |
483 | slp->ns_fp = NULL((void *)0); |
484 | so = slp->ns_so; |
485 | so->so_upcall = NULL((void *)0); |
486 | soshutdown(so, SHUT_RDWR2); |
487 | closef(fp, NULL((void *)0)); |
488 | if (slp->ns_nam) |
489 | m = m_free(slp->ns_nam); |
Value stored to 'm' is never read | |
490 | m_freem(slp->ns_raw); |
491 | m = slp->ns_rec; |
492 | while (m) { |
493 | n = m->m_nextpktm_hdr.mh_nextpkt; |
494 | m_freem(m); |
495 | m = n; |
496 | } |
497 | } |
498 | } |
499 | |
500 | /* |
501 | * Dereference a server socket structure. If it has no more references and |
502 | * is no longer valid, you can throw it away. |
503 | */ |
504 | void |
505 | nfsrv_slpderef(struct nfssvc_sock *slp) |
506 | { |
507 | if (--(slp->ns_sref) == 0 && (slp->ns_flag & SLP_VALID0x01) == 0) { |
508 | TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain)do { if (((slp)->ns_chain.tqe_next) != ((void *)0)) (slp)-> ns_chain.tqe_next->ns_chain.tqe_prev = (slp)->ns_chain. tqe_prev; else (&nfssvc_sockhead)->tqh_last = (slp)-> ns_chain.tqe_prev; *(slp)->ns_chain.tqe_prev = (slp)->ns_chain .tqe_next; ((slp)->ns_chain.tqe_prev) = ((void *)-1); ((slp )->ns_chain.tqe_next) = ((void *)-1); } while (0); |
509 | free(slp, M_NFSSVC50, sizeof(*slp)); |
510 | } |
511 | } |
512 | |
513 | /* |
514 | * Initialize the data structures for the server. |
515 | * Handshake with any new nfsds starting up to avoid any chance of |
516 | * corruption. |
517 | */ |
518 | void |
519 | nfsrv_init(int terminating) |
520 | { |
521 | struct nfssvc_sock *slp, *nslp; |
522 | |
523 | if (nfssvc_sockhead_flag & SLP_INIT0x01) |
524 | panic("nfsd init"); |
525 | nfssvc_sockhead_flag |= SLP_INIT0x01; |
526 | if (terminating) { |
527 | for (slp = TAILQ_FIRST(&nfssvc_sockhead)((&nfssvc_sockhead)->tqh_first); slp != NULL((void *)0); |
528 | slp = nslp) { |
529 | nslp = TAILQ_NEXT(slp, ns_chain)((slp)->ns_chain.tqe_next); |
530 | if (slp->ns_flag & SLP_VALID0x01) |
531 | nfsrv_zapsock(slp); |
532 | TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain)do { if (((slp)->ns_chain.tqe_next) != ((void *)0)) (slp)-> ns_chain.tqe_next->ns_chain.tqe_prev = (slp)->ns_chain. tqe_prev; else (&nfssvc_sockhead)->tqh_last = (slp)-> ns_chain.tqe_prev; *(slp)->ns_chain.tqe_prev = (slp)->ns_chain .tqe_next; ((slp)->ns_chain.tqe_prev) = ((void *)-1); ((slp )->ns_chain.tqe_next) = ((void *)-1); } while (0); |
533 | free(slp, M_NFSSVC50, sizeof(*slp)); |
534 | } |
535 | nfsrv_cleancache(); /* And clear out server cache */ |
536 | } |
537 | |
538 | TAILQ_INIT(&nfssvc_sockhead)do { (&nfssvc_sockhead)->tqh_first = ((void *)0); (& nfssvc_sockhead)->tqh_last = &(&nfssvc_sockhead)-> tqh_first; } while (0); |
539 | nfssvc_sockhead_flag &= ~SLP_INIT0x01; |
540 | if (nfssvc_sockhead_flag & SLP_WANTINIT0x02) { |
541 | nfssvc_sockhead_flag &= ~SLP_WANTINIT0x02; |
542 | wakeup((caddr_t)&nfssvc_sockhead); |
543 | } |
544 | |
545 | TAILQ_INIT(&nfsd_head)do { (&nfsd_head)->tqh_first = ((void *)0); (&nfsd_head )->tqh_last = &(&nfsd_head)->tqh_first; } while (0); |
546 | nfsd_head_flag &= ~NFSD_CHECKSLP0x01; |
547 | |
548 | nfs_udpsock = malloc(sizeof(*nfs_udpsock), M_NFSSVC50, |
549 | M_WAITOK0x0001|M_ZERO0x0008); |
550 | TAILQ_INSERT_HEAD(&nfssvc_sockhead, nfs_udpsock, ns_chain)do { if (((nfs_udpsock)->ns_chain.tqe_next = (&nfssvc_sockhead )->tqh_first) != ((void *)0)) (&nfssvc_sockhead)->tqh_first ->ns_chain.tqe_prev = &(nfs_udpsock)->ns_chain.tqe_next ; else (&nfssvc_sockhead)->tqh_last = &(nfs_udpsock )->ns_chain.tqe_next; (&nfssvc_sockhead)->tqh_first = (nfs_udpsock); (nfs_udpsock)->ns_chain.tqe_prev = & (&nfssvc_sockhead)->tqh_first; } while (0); |
551 | |
552 | if (!terminating) { |
553 | pool_init(&nfsrv_descript_pl, sizeof(struct nfsrv_descript), |
554 | 0, IPL_NONE0x0, PR_WAITOK0x0001, "ndscpl", NULL((void *)0)); |
555 | } |
556 | } |
557 | #endif /* NFSSERVER */ |
558 | |
559 | #ifdef NFSCLIENT1 |
560 | /* |
561 | * Asynchronous I/O threads for client nfs. |
562 | * They do read-ahead and write-behind operations on the block I/O cache. |
563 | * Never returns unless it fails or gets killed. |
564 | */ |
565 | void |
566 | nfssvc_iod(void *arg) |
567 | { |
568 | struct proc *p = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc; |
569 | struct buf *bp, *nbp; |
570 | int i, myiod; |
571 | struct vnode *vp; |
572 | int error = 0, s, bufcount; |
573 | |
574 | bufcount = MIN(256, bcstats.kvaslots / 8)(((256)<(bcstats.kvaslots / 8))?(256):(bcstats.kvaslots / 8 )); |
575 | bufcount = MIN(bufcount, bcstats.numbufs / 8)(((bufcount)<(bcstats.numbufs / 8))?(bufcount):(bcstats.numbufs / 8)); |
576 | |
577 | /* Assign my position or return error if too many already running. */ |
578 | myiod = -1; |
579 | for (i = 0; i < NFS_MAXASYNCDAEMON20; i++) { |
580 | if (nfs_asyncdaemon[i] == NULL((void *)0)) { |
581 | myiod = i; |
582 | break; |
583 | } |
584 | } |
585 | if (myiod == -1) |
586 | kthread_exit(EBUSY16); |
587 | |
588 | nfs_asyncdaemon[myiod] = p; |
589 | nfs_numasync++; |
590 | |
591 | /* Upper limit on how many bufs we'll queue up for this iod. */ |
592 | if (nfs_bufqmax > bcstats.kvaslots / 4) { |
593 | nfs_bufqmax = bcstats.kvaslots / 4; |
594 | bufcount = 0; |
595 | } |
596 | if (nfs_bufqmax > bcstats.numbufs / 4) { |
597 | nfs_bufqmax = bcstats.numbufs / 4; |
598 | bufcount = 0; |
599 | } |
600 | |
601 | nfs_bufqmax += bufcount; |
602 | wakeup(&nfs_bufqlen); /* wake up anyone waiting for room to enqueue IO */ |
603 | |
604 | /* Just loop around doin our stuff until SIGKILL. */ |
605 | for (;;) { |
606 | while (TAILQ_FIRST(&nfs_bufq)((&nfs_bufq)->tqh_first) == NULL((void *)0) && error == 0) { |
607 | error = tsleep_nsec(&nfs_bufq, |
608 | PWAIT32 | PCATCH0x100, "nfsidl", INFSLP0xffffffffffffffffULL); |
609 | } |
610 | while ((bp = TAILQ_FIRST(&nfs_bufq)((&nfs_bufq)->tqh_first)) != NULL((void *)0)) { |
611 | /* Take one off the front of the list */ |
612 | TAILQ_REMOVE(&nfs_bufq, bp, b_freelist)do { if (((bp)->b_freelist.tqe_next) != ((void *)0)) (bp)-> b_freelist.tqe_next->b_freelist.tqe_prev = (bp)->b_freelist .tqe_prev; else (&nfs_bufq)->tqh_last = (bp)->b_freelist .tqe_prev; *(bp)->b_freelist.tqe_prev = (bp)->b_freelist .tqe_next; ((bp)->b_freelist.tqe_prev) = ((void *)-1); ((bp )->b_freelist.tqe_next) = ((void *)-1); } while (0); |
613 | nfs_bufqlen--; |
614 | wakeup_one(&nfs_bufqlen)wakeup_n((&nfs_bufqlen), 1); |
615 | if (bp->b_flags & B_READ0x00008000) |
616 | (void) nfs_doio(bp, NULL((void *)0)); |
617 | else do { |
618 | /* |
619 | * Look for a delayed write for the same vnode, so I can do |
620 | * it now. We must grab it before calling nfs_doio() to |
621 | * avoid any risk of the vnode getting vclean()'d while |
622 | * we are doing the write rpc. |
623 | */ |
624 | vp = bp->b_vp; |
625 | s = splbio()splraise(0x6); |
626 | LIST_FOREACH(nbp, &vp->v_dirtyblkhd, b_vnbufs)for((nbp) = ((&vp->v_dirtyblkhd)->lh_first); (nbp)!= ((void *)0); (nbp) = ((nbp)->b_vnbufs.le_next)) { |
627 | if ((nbp->b_flags & |
628 | (B_BUSY0x00000010|B_DELWRI0x00000080|B_NEEDCOMMIT0x00000002|B_NOCACHE0x00001000))!=B_DELWRI0x00000080) |
629 | continue; |
630 | nbp->b_flags |= B_ASYNC0x00000004; |
631 | bremfreebufcache_take(nbp); |
632 | buf_acquire(nbp); |
633 | break; |
634 | } |
635 | /* |
636 | * For the delayed write, do the first part of nfs_bwrite() |
637 | * up to, but not including nfs_strategy(). |
638 | */ |
639 | if (nbp) { |
640 | nbp->b_flags &= ~(B_READ0x00008000|B_DONE0x00000100|B_ERROR0x00000400); |
641 | buf_undirty(nbp); |
642 | nbp->b_vp->v_numoutput++; |
643 | } |
644 | splx(s)spllower(s); |
645 | |
646 | (void) nfs_doio(bp, NULL((void *)0)); |
647 | } while ((bp = nbp) != NULL((void *)0)); |
648 | } |
649 | if (error) { |
650 | nfs_asyncdaemon[myiod] = NULL((void *)0); |
651 | nfs_numasync--; |
652 | nfs_bufqmax -= bufcount; |
653 | kthread_exit(error); |
654 | } |
655 | } |
656 | } |
657 | |
658 | void |
659 | nfs_getset_niothreads(int set) |
660 | { |
661 | int i, have, start; |
662 | |
663 | for (have = 0, i = 0; i < NFS_MAXASYNCDAEMON20; i++) |
664 | if (nfs_asyncdaemon[i] != NULL((void *)0)) |
665 | have++; |
666 | |
667 | if (set) { |
668 | /* clamp to sane range */ |
669 | nfs_niothreads = max(0, min(nfs_niothreads, NFS_MAXASYNCDAEMON20)); |
670 | |
671 | start = nfs_niothreads - have; |
672 | |
673 | while (start > 0) { |
674 | kthread_create(nfssvc_iod, NULL((void *)0), NULL((void *)0), "nfsio"); |
675 | start--; |
676 | } |
677 | |
678 | for (i = 0; (start < 0) && (i < NFS_MAXASYNCDAEMON20); i++) |
679 | if (nfs_asyncdaemon[i] != NULL((void *)0)) { |
680 | psignal(nfs_asyncdaemon[i], SIGKILL9); |
681 | start++; |
682 | } |
683 | } else { |
684 | if (nfs_niothreads >= 0) |
685 | nfs_niothreads = have; |
686 | } |
687 | } |
688 | #endif /* NFSCLIENT */ |
689 | |
690 | #ifdef NFSSERVER1 |
691 | /* |
692 | * Find an nfssrv_sock for nfsd, sleeping if needed. |
693 | */ |
694 | int |
695 | nfsrv_getslp(struct nfsd *nfsd) |
696 | { |
697 | struct nfssvc_sock *slp; |
698 | int error; |
699 | |
700 | again: |
701 | while (nfsd->nfsd_slp == NULL((void *)0) && |
702 | (nfsd_head_flag & NFSD_CHECKSLP0x01) == 0) { |
703 | nfsd->nfsd_flag |= NFSD_WAITING0x01; |
704 | nfsd_waiting++; |
705 | error = tsleep_nsec(nfsd, PSOCK24 | PCATCH0x100, "nfsd", INFSLP0xffffffffffffffffULL); |
706 | nfsd_waiting--; |
707 | if (error) |
708 | return (error); |
709 | } |
710 | |
711 | if (nfsd->nfsd_slp == NULL((void *)0) && |
712 | (nfsd_head_flag & NFSD_CHECKSLP0x01) != 0) { |
713 | TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain)for((slp) = ((&nfssvc_sockhead)->tqh_first); (slp) != ( (void *)0); (slp) = ((slp)->ns_chain.tqe_next)) { |
714 | if ((slp->ns_flag & (SLP_VALID0x01 | SLP_DOREC0x02)) == |
715 | (SLP_VALID0x01 | SLP_DOREC0x02)) { |
716 | slp->ns_flag &= ~SLP_DOREC0x02; |
717 | slp->ns_sref++; |
718 | nfsd->nfsd_slp = slp; |
719 | break; |
720 | } |
721 | } |
722 | if (slp == NULL((void *)0)) |
723 | nfsd_head_flag &= ~NFSD_CHECKSLP0x01; |
724 | } |
725 | |
726 | if (nfsd->nfsd_slp == NULL((void *)0)) |
727 | goto again; |
728 | |
729 | return (0); |
730 | } |
731 | #endif /* NFSSERVER */ |