1*33de042dSApple OSS Distributions /*
2*33de042dSApple OSS Distributions * Copyright (c) 2019-2022 Apple Inc. All rights reserved.
3*33de042dSApple OSS Distributions *
4*33de042dSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*33de042dSApple OSS Distributions *
6*33de042dSApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*33de042dSApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*33de042dSApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*33de042dSApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*33de042dSApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*33de042dSApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*33de042dSApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*33de042dSApple OSS Distributions * terms of an Apple operating system software license agreement.
14*33de042dSApple OSS Distributions *
15*33de042dSApple OSS Distributions * Please obtain a copy of the License at
16*33de042dSApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*33de042dSApple OSS Distributions *
18*33de042dSApple OSS Distributions * The Original Code and all software distributed under the License are
19*33de042dSApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*33de042dSApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*33de042dSApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*33de042dSApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*33de042dSApple OSS Distributions * Please see the License for the specific language governing rights and
24*33de042dSApple OSS Distributions * limitations under the License.
25*33de042dSApple OSS Distributions *
26*33de042dSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*33de042dSApple OSS Distributions */
28*33de042dSApple OSS Distributions /*
29*33de042dSApple OSS Distributions * Copyright (c) 1982, 1986, 1989, 1993
30*33de042dSApple OSS Distributions * The Regents of the University of California. All rights reserved.
31*33de042dSApple OSS Distributions *
32*33de042dSApple OSS Distributions * This code is derived from software contributed to Berkeley by
33*33de042dSApple OSS Distributions * Scooter Morris at Genentech Inc.
34*33de042dSApple OSS Distributions *
35*33de042dSApple OSS Distributions * Redistribution and use in source and binary forms, with or without
36*33de042dSApple OSS Distributions * modification, are permitted provided that the following conditions
37*33de042dSApple OSS Distributions * are met:
38*33de042dSApple OSS Distributions * 1. Redistributions of source code must retain the above copyright
39*33de042dSApple OSS Distributions * notice, this list of conditions and the following disclaimer.
40*33de042dSApple OSS Distributions * 2. Redistributions in binary form must reproduce the above copyright
41*33de042dSApple OSS Distributions * notice, this list of conditions and the following disclaimer in the
42*33de042dSApple OSS Distributions * documentation and/or other materials provided with the distribution.
43*33de042dSApple OSS Distributions * 4. Neither the name of the University nor the names of its contributors
44*33de042dSApple OSS Distributions * may be used to endorse or promote products derived from this software
45*33de042dSApple OSS Distributions * without specific prior written permission.
46*33de042dSApple OSS Distributions *
47*33de042dSApple OSS Distributions * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48*33de042dSApple OSS Distributions * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49*33de042dSApple OSS Distributions * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50*33de042dSApple OSS Distributions * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51*33de042dSApple OSS Distributions * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52*33de042dSApple OSS Distributions * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53*33de042dSApple OSS Distributions * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54*33de042dSApple OSS Distributions * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55*33de042dSApple OSS Distributions * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56*33de042dSApple OSS Distributions * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57*33de042dSApple OSS Distributions * SUCH DAMAGE.
58*33de042dSApple OSS Distributions *
59*33de042dSApple OSS Distributions * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
60*33de042dSApple OSS Distributions */
61*33de042dSApple OSS Distributions
62*33de042dSApple OSS Distributions #include <sys/cdefs.h>
63*33de042dSApple OSS Distributions #include <sys/param.h>
64*33de042dSApple OSS Distributions #include <sys/systm.h>
65*33de042dSApple OSS Distributions #include <sys/kernel.h>
66*33de042dSApple OSS Distributions #include <sys/lock.h>
67*33de042dSApple OSS Distributions #include <sys/mount.h>
68*33de042dSApple OSS Distributions #include <sys/proc.h>
69*33de042dSApple OSS Distributions #include <sys/signalvar.h>
70*33de042dSApple OSS Distributions #include <sys/unistd.h>
71*33de042dSApple OSS Distributions #include <sys/user.h>
72*33de042dSApple OSS Distributions #include <sys/vnode.h>
73*33de042dSApple OSS Distributions #include <sys/vnode_internal.h>
74*33de042dSApple OSS Distributions #include <sys/vnode_if.h>
75*33de042dSApple OSS Distributions #include <sys/malloc.h>
76*33de042dSApple OSS Distributions #include <sys/fcntl.h>
77*33de042dSApple OSS Distributions #include <sys/lockf.h>
78*33de042dSApple OSS Distributions #include <sys/sdt.h>
79*33de042dSApple OSS Distributions #include <kern/policy_internal.h>
80*33de042dSApple OSS Distributions
81*33de042dSApple OSS Distributions #include <sys/file_internal.h>
82*33de042dSApple OSS Distributions
83*33de042dSApple OSS Distributions #if (DEVELOPMENT || DEBUG)
84*33de042dSApple OSS Distributions #define LOCKF_DEBUGGING 1
85*33de042dSApple OSS Distributions #endif
86*33de042dSApple OSS Distributions
87*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
88*33de042dSApple OSS Distributions #include <sys/sysctl.h>
89*33de042dSApple OSS Distributions void lf_print(const char *tag, struct lockf *lock);
90*33de042dSApple OSS Distributions void lf_printlist(const char *tag, struct lockf *lock);
91*33de042dSApple OSS Distributions
92*33de042dSApple OSS Distributions #define LF_DBG_LOCKOP (1 << 0) /* setlk, getlk, clearlk */
93*33de042dSApple OSS Distributions #define LF_DBG_LIST (1 << 1) /* split, coalesce */
94*33de042dSApple OSS Distributions #define LF_DBG_IMPINH (1 << 2) /* importance inheritance */
95*33de042dSApple OSS Distributions #define LF_DBG_TRACE (1 << 3) /* errors, exit */
96*33de042dSApple OSS Distributions #define LF_DBG_DEADLOCK (1 << 4) /* deadlock detection */
97*33de042dSApple OSS Distributions
98*33de042dSApple OSS Distributions static int lockf_debug = 0; /* was 2, could be 3 ;-) */
99*33de042dSApple OSS Distributions SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lockf_debug, 0, "");
100*33de042dSApple OSS Distributions
101*33de042dSApple OSS Distributions /*
102*33de042dSApple OSS Distributions * If the selector is set, then output the debugging diagnostic.
103*33de042dSApple OSS Distributions */
104*33de042dSApple OSS Distributions #define LOCKF_DEBUG(mask, ...) \
105*33de042dSApple OSS Distributions do { \
106*33de042dSApple OSS Distributions if ((mask) & lockf_debug) { \
107*33de042dSApple OSS Distributions printf("%s>", __FUNCTION__); \
108*33de042dSApple OSS Distributions printf(__VA_ARGS__); \
109*33de042dSApple OSS Distributions } \
110*33de042dSApple OSS Distributions } while(0)
111*33de042dSApple OSS Distributions
112*33de042dSApple OSS Distributions #define LOCKF_DEBUGP(mask) \
113*33de042dSApple OSS Distributions ({ \
114*33de042dSApple OSS Distributions ((mask) & lockf_debug); \
115*33de042dSApple OSS Distributions })
116*33de042dSApple OSS Distributions #else /* !LOCKF_DEBUGGING */
117*33de042dSApple OSS Distributions #define LOCKF_DEBUG(mask, ...) /* mask */
118*33de042dSApple OSS Distributions #endif /* !LOCKF_DEBUGGING */
119*33de042dSApple OSS Distributions
120*33de042dSApple OSS Distributions KALLOC_TYPE_DEFINE(KT_LOCKF, struct lockf, KT_PRIV_ACCT);
121*33de042dSApple OSS Distributions
122*33de042dSApple OSS Distributions #define NOLOCKF (struct lockf *)0
123*33de042dSApple OSS Distributions #define SELF 0x1
124*33de042dSApple OSS Distributions #define OTHERS 0x2
125*33de042dSApple OSS Distributions #define OFF_MAX 0x7fffffffffffffffULL /* max off_t */
126*33de042dSApple OSS Distributions
127*33de042dSApple OSS Distributions /* return the effective end of a 'struct lockf': lf_end == -1 is OFF_MAX */
128*33de042dSApple OSS Distributions #define LF_END(l) ((l)->lf_end == -1 ? OFF_MAX : (l)->lf_end)
129*33de042dSApple OSS Distributions
130*33de042dSApple OSS Distributions /*
131*33de042dSApple OSS Distributions * Overlapping lock states
132*33de042dSApple OSS Distributions *
133*33de042dSApple OSS Distributions * For lk_find_overlap(..., SELF, ...), the possible sequences are a single:
134*33de042dSApple OSS Distributions * - OVERLAP_NONE,
135*33de042dSApple OSS Distributions * - OVERLAP_EQUALS_LOCK, or
136*33de042dSApple OSS Distributions * - OVERLAP_CONTAINS_LOCK
137*33de042dSApple OSS Distributions *
138*33de042dSApple OSS Distributions * or the following sequence:
139*33de042dSApple OSS Distributions * - optional OVERLAP_STARTS_BEFORE_LOCK
140*33de042dSApple OSS Distributions * - zero or more OVERLAP_CONTAINED_BY_LOCK
141*33de042dSApple OSS Distributions * - optional OVERLAP_ENDS_AFTER_LOCK
142*33de042dSApple OSS Distributions * - OVERLAP_NONE
143*33de042dSApple OSS Distributions *
144*33de042dSApple OSS Distributions * In the annotations:
145*33de042dSApple OSS Distributions * - the search lock is [SS, SE] and
146*33de042dSApple OSS Distributions * - the returned overlap lock is [OS,OE].
147*33de042dSApple OSS Distributions */
148*33de042dSApple OSS Distributions typedef enum {
149*33de042dSApple OSS Distributions OVERLAP_NONE = 0,
150*33de042dSApple OSS Distributions OVERLAP_EQUALS_LOCK, /* OS == SS && OE == SE */
151*33de042dSApple OSS Distributions OVERLAP_CONTAINS_LOCK, /* OS <= SS && OE >= SE */
152*33de042dSApple OSS Distributions OVERLAP_CONTAINED_BY_LOCK, /* OS >= SS && OE <= SE */
153*33de042dSApple OSS Distributions OVERLAP_STARTS_BEFORE_LOCK, /* OS < SS && OE >= SS */
154*33de042dSApple OSS Distributions OVERLAP_ENDS_AFTER_LOCK /* OS > SS && OE > SE */
155*33de042dSApple OSS Distributions } overlap_t;
156*33de042dSApple OSS Distributions
157*33de042dSApple OSS Distributions static int lf_clearlock(struct lockf *);
158*33de042dSApple OSS Distributions static int lf_transferlock(struct lockf *);
159*33de042dSApple OSS Distributions static overlap_t lf_findoverlap(struct lockf *,
160*33de042dSApple OSS Distributions struct lockf *, int, struct lockf ***, struct lockf **);
161*33de042dSApple OSS Distributions static struct lockf *lf_getblock(struct lockf *, pid_t);
162*33de042dSApple OSS Distributions static int lf_getlock(struct lockf *, struct flock *, pid_t);
163*33de042dSApple OSS Distributions static int lf_setlock(struct lockf *, struct timespec *);
164*33de042dSApple OSS Distributions static int lf_split(struct lockf *, struct lockf *);
165*33de042dSApple OSS Distributions static void lf_wakelock(struct lockf *, boolean_t);
166*33de042dSApple OSS Distributions #if IMPORTANCE_INHERITANCE
167*33de042dSApple OSS Distributions static void lf_hold_assertion(task_t, struct lockf *);
168*33de042dSApple OSS Distributions static void lf_jump_to_queue_head(struct lockf *, struct lockf *);
169*33de042dSApple OSS Distributions static void lf_drop_assertion(struct lockf *);
170*33de042dSApple OSS Distributions static void lf_boost_blocking_proc(struct lockf *, struct lockf *);
171*33de042dSApple OSS Distributions static void lf_adjust_assertion(struct lockf *block);
172*33de042dSApple OSS Distributions #endif /* IMPORTANCE_INHERITANCE */
173*33de042dSApple OSS Distributions
174*33de042dSApple OSS Distributions static LCK_GRP_DECLARE(lf_dead_lock_grp, "lf_dead_lock");
175*33de042dSApple OSS Distributions static LCK_MTX_DECLARE(lf_dead_lock, &lf_dead_lock_grp);
176*33de042dSApple OSS Distributions
177*33de042dSApple OSS Distributions /*
178*33de042dSApple OSS Distributions * lf_advlock
179*33de042dSApple OSS Distributions *
180*33de042dSApple OSS Distributions * Description: Advisory record locking support
181*33de042dSApple OSS Distributions *
182*33de042dSApple OSS Distributions * Parameters: ap Argument pointer to a vnop_advlock_args
183*33de042dSApple OSS Distributions * argument descriptor structure for the
184*33de042dSApple OSS Distributions * lock operation to be attempted.
185*33de042dSApple OSS Distributions *
186*33de042dSApple OSS Distributions * Returns: 0 Success
187*33de042dSApple OSS Distributions * EOVERFLOW
188*33de042dSApple OSS Distributions * EINVAL
189*33de042dSApple OSS Distributions * ENOLCK Number of locked regions exceeds limit
190*33de042dSApple OSS Distributions * lf_setlock:EAGAIN
191*33de042dSApple OSS Distributions * lf_setlock:EDEADLK
192*33de042dSApple OSS Distributions * lf_setlock:EINTR
193*33de042dSApple OSS Distributions * lf_setlock:ENOLCK
194*33de042dSApple OSS Distributions * lf_setlock:ETIMEDOUT
195*33de042dSApple OSS Distributions * lf_clearlock:ENOLCK
196*33de042dSApple OSS Distributions * vnode_size:???
197*33de042dSApple OSS Distributions *
198*33de042dSApple OSS Distributions * Notes: We return ENOLCK when we run out of memory to support locks; as
199*33de042dSApple OSS Distributions * such, there is no specific expectation limit other than the
200*33de042dSApple OSS Distributions * amount of available resources.
201*33de042dSApple OSS Distributions */
202*33de042dSApple OSS Distributions int
lf_advlock(struct vnop_advlock_args * ap)203*33de042dSApple OSS Distributions lf_advlock(struct vnop_advlock_args *ap)
204*33de042dSApple OSS Distributions {
205*33de042dSApple OSS Distributions struct vnode *vp = ap->a_vp;
206*33de042dSApple OSS Distributions struct flock *fl = ap->a_fl;
207*33de042dSApple OSS Distributions vfs_context_t context = ap->a_context;
208*33de042dSApple OSS Distributions struct lockf *lock;
209*33de042dSApple OSS Distributions off_t start, end, oadd;
210*33de042dSApple OSS Distributions u_quad_t size;
211*33de042dSApple OSS Distributions int error;
212*33de042dSApple OSS Distributions struct lockf **head = &vp->v_lockf;
213*33de042dSApple OSS Distributions
214*33de042dSApple OSS Distributions /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
215*33de042dSApple OSS Distributions
216*33de042dSApple OSS Distributions /*
217*33de042dSApple OSS Distributions * Avoid the common case of unlocking when inode has no locks.
218*33de042dSApple OSS Distributions */
219*33de042dSApple OSS Distributions if (*head == (struct lockf *)0) {
220*33de042dSApple OSS Distributions if (ap->a_op != F_SETLK) {
221*33de042dSApple OSS Distributions fl->l_type = F_UNLCK;
222*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_TRACE,
223*33de042dSApple OSS Distributions "lf_advlock: '%s' unlock without lock\n",
224*33de042dSApple OSS Distributions vfs_context_proc(context)->p_comm);
225*33de042dSApple OSS Distributions return 0;
226*33de042dSApple OSS Distributions }
227*33de042dSApple OSS Distributions }
228*33de042dSApple OSS Distributions
229*33de042dSApple OSS Distributions /*
230*33de042dSApple OSS Distributions * Convert the flock structure into a start and end.
231*33de042dSApple OSS Distributions */
232*33de042dSApple OSS Distributions switch (fl->l_whence) {
233*33de042dSApple OSS Distributions case SEEK_SET:
234*33de042dSApple OSS Distributions case SEEK_CUR:
235*33de042dSApple OSS Distributions /*
236*33de042dSApple OSS Distributions * Caller is responsible for adding any necessary offset
237*33de042dSApple OSS Distributions * when SEEK_CUR is used.
238*33de042dSApple OSS Distributions */
239*33de042dSApple OSS Distributions start = fl->l_start;
240*33de042dSApple OSS Distributions break;
241*33de042dSApple OSS Distributions
242*33de042dSApple OSS Distributions case SEEK_END:
243*33de042dSApple OSS Distributions
244*33de042dSApple OSS Distributions /*
245*33de042dSApple OSS Distributions * It's OK to cast the u_quad_t to and off_t here, since they
246*33de042dSApple OSS Distributions * are the same storage size, and the value of the returned
247*33de042dSApple OSS Distributions * contents will never overflow into the sign bit. We need to
248*33de042dSApple OSS Distributions * do this because we will use size to force range checks.
249*33de042dSApple OSS Distributions */
250*33de042dSApple OSS Distributions if ((error = vnode_size(vp, (off_t *)&size, context))) {
251*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_TRACE,
252*33de042dSApple OSS Distributions "lf_advlock: vnode_getattr failed: %d\n", error);
253*33de042dSApple OSS Distributions return error;
254*33de042dSApple OSS Distributions }
255*33de042dSApple OSS Distributions
256*33de042dSApple OSS Distributions if (size > OFF_MAX ||
257*33de042dSApple OSS Distributions (fl->l_start > 0 &&
258*33de042dSApple OSS Distributions size > (u_quad_t)(OFF_MAX - fl->l_start))) {
259*33de042dSApple OSS Distributions return EOVERFLOW;
260*33de042dSApple OSS Distributions }
261*33de042dSApple OSS Distributions start = size + fl->l_start;
262*33de042dSApple OSS Distributions break;
263*33de042dSApple OSS Distributions
264*33de042dSApple OSS Distributions default:
265*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: unknown whence %d\n",
266*33de042dSApple OSS Distributions fl->l_whence);
267*33de042dSApple OSS Distributions return EINVAL;
268*33de042dSApple OSS Distributions }
269*33de042dSApple OSS Distributions if (start < 0) {
270*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: start < 0 (%qd)\n",
271*33de042dSApple OSS Distributions start);
272*33de042dSApple OSS Distributions return EINVAL;
273*33de042dSApple OSS Distributions }
274*33de042dSApple OSS Distributions if (fl->l_len < 0) {
275*33de042dSApple OSS Distributions if (start == 0) {
276*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_TRACE,
277*33de042dSApple OSS Distributions "lf_advlock: len < 0 & start == 0\n");
278*33de042dSApple OSS Distributions return EINVAL;
279*33de042dSApple OSS Distributions }
280*33de042dSApple OSS Distributions end = start - 1;
281*33de042dSApple OSS Distributions start += fl->l_len;
282*33de042dSApple OSS Distributions if (start < 0) {
283*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_TRACE,
284*33de042dSApple OSS Distributions "lf_advlock: start < 0 (%qd)\n", start);
285*33de042dSApple OSS Distributions return EINVAL;
286*33de042dSApple OSS Distributions }
287*33de042dSApple OSS Distributions } else if (fl->l_len == 0) {
288*33de042dSApple OSS Distributions end = -1;
289*33de042dSApple OSS Distributions } else {
290*33de042dSApple OSS Distributions oadd = fl->l_len - 1;
291*33de042dSApple OSS Distributions if (oadd > (off_t)(OFF_MAX - start)) {
292*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: overflow\n");
293*33de042dSApple OSS Distributions return EOVERFLOW;
294*33de042dSApple OSS Distributions }
295*33de042dSApple OSS Distributions end = start + oadd;
296*33de042dSApple OSS Distributions }
297*33de042dSApple OSS Distributions /*
298*33de042dSApple OSS Distributions * Create the lockf structure
299*33de042dSApple OSS Distributions */
300*33de042dSApple OSS Distributions lock = zalloc_flags(KT_LOCKF, Z_WAITOK | Z_NOFAIL);
301*33de042dSApple OSS Distributions lock->lf_start = start;
302*33de042dSApple OSS Distributions lock->lf_end = end;
303*33de042dSApple OSS Distributions lock->lf_id = ap->a_id;
304*33de042dSApple OSS Distributions lock->lf_vnode = vp;
305*33de042dSApple OSS Distributions lock->lf_type = fl->l_type;
306*33de042dSApple OSS Distributions lock->lf_head = head;
307*33de042dSApple OSS Distributions lock->lf_next = (struct lockf *)0;
308*33de042dSApple OSS Distributions TAILQ_INIT(&lock->lf_blkhd);
309*33de042dSApple OSS Distributions lock->lf_flags = (short)ap->a_flags;
310*33de042dSApple OSS Distributions #if IMPORTANCE_INHERITANCE
311*33de042dSApple OSS Distributions lock->lf_boosted = LF_NOT_BOOSTED;
312*33de042dSApple OSS Distributions #endif
313*33de042dSApple OSS Distributions if (ap->a_flags & F_POSIX) {
314*33de042dSApple OSS Distributions lock->lf_owner = (struct proc *)lock->lf_id;
315*33de042dSApple OSS Distributions } else {
316*33de042dSApple OSS Distributions lock->lf_owner = NULL;
317*33de042dSApple OSS Distributions }
318*33de042dSApple OSS Distributions
319*33de042dSApple OSS Distributions if (ap->a_flags & F_FLOCK) {
320*33de042dSApple OSS Distributions lock->lf_flags |= F_WAKE1_SAFE;
321*33de042dSApple OSS Distributions }
322*33de042dSApple OSS Distributions
323*33de042dSApple OSS Distributions lck_mtx_lock(&vp->v_lock); /* protect the lockf list */
324*33de042dSApple OSS Distributions /*
325*33de042dSApple OSS Distributions * Do the requested operation.
326*33de042dSApple OSS Distributions */
327*33de042dSApple OSS Distributions switch (ap->a_op) {
328*33de042dSApple OSS Distributions case F_SETLK:
329*33de042dSApple OSS Distributions /*
330*33de042dSApple OSS Distributions * For OFD locks, lf_id is derived from the fileglob.
331*33de042dSApple OSS Distributions * Record an "lf_owner" iff this is a confined fd
332*33de042dSApple OSS Distributions * i.e. it cannot escape this process and will be
333*33de042dSApple OSS Distributions * F_UNLCKed before the owner exits. (This is
334*33de042dSApple OSS Distributions * the implicit guarantee needed to ensure lf_owner
335*33de042dSApple OSS Distributions * remains a valid reference.)
336*33de042dSApple OSS Distributions */
337*33de042dSApple OSS Distributions if ((ap->a_flags & F_OFD_LOCK) && (ap->a_flags & F_CONFINED)) {
338*33de042dSApple OSS Distributions lock->lf_owner = current_proc();
339*33de042dSApple OSS Distributions }
340*33de042dSApple OSS Distributions error = lf_setlock(lock, ap->a_timeout);
341*33de042dSApple OSS Distributions break;
342*33de042dSApple OSS Distributions
343*33de042dSApple OSS Distributions case F_UNLCK:
344*33de042dSApple OSS Distributions error = lf_clearlock(lock);
345*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
346*33de042dSApple OSS Distributions break;
347*33de042dSApple OSS Distributions
348*33de042dSApple OSS Distributions case F_TRANSFER:
349*33de042dSApple OSS Distributions /*
350*33de042dSApple OSS Distributions * The new owner is passed in the context, set the new owner
351*33de042dSApple OSS Distributions * in the lf_owner field.
352*33de042dSApple OSS Distributions */
353*33de042dSApple OSS Distributions lock->lf_owner = vfs_context_proc(context);
354*33de042dSApple OSS Distributions assert(lock->lf_owner != current_proc());
355*33de042dSApple OSS Distributions error = lf_transferlock(lock);
356*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
357*33de042dSApple OSS Distributions break;
358*33de042dSApple OSS Distributions
359*33de042dSApple OSS Distributions case F_GETLK:
360*33de042dSApple OSS Distributions error = lf_getlock(lock, fl, -1);
361*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
362*33de042dSApple OSS Distributions break;
363*33de042dSApple OSS Distributions
364*33de042dSApple OSS Distributions case F_GETLKPID:
365*33de042dSApple OSS Distributions error = lf_getlock(lock, fl, fl->l_pid);
366*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
367*33de042dSApple OSS Distributions break;
368*33de042dSApple OSS Distributions
369*33de042dSApple OSS Distributions default:
370*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
371*33de042dSApple OSS Distributions error = EINVAL;
372*33de042dSApple OSS Distributions break;
373*33de042dSApple OSS Distributions }
374*33de042dSApple OSS Distributions lck_mtx_unlock(&vp->v_lock); /* done manipulating the list */
375*33de042dSApple OSS Distributions
376*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: normal exit: %d\n", error);
377*33de042dSApple OSS Distributions return error;
378*33de042dSApple OSS Distributions }
379*33de042dSApple OSS Distributions
380*33de042dSApple OSS Distributions /*
381*33de042dSApple OSS Distributions * Empty the queue of msleeping requests for a lock on the given vnode.
382*33de042dSApple OSS Distributions * Called with the vnode already locked. Used for forced unmount, where
383*33de042dSApple OSS Distributions * a flock(2) invoker sleeping on a blocked lock holds an iocount reference
384*33de042dSApple OSS Distributions * that prevents the vnode from ever being drained. Force unmounting wins.
385*33de042dSApple OSS Distributions */
386*33de042dSApple OSS Distributions void
lf_abort_advlocks(vnode_t vp)387*33de042dSApple OSS Distributions lf_abort_advlocks(vnode_t vp)
388*33de042dSApple OSS Distributions {
389*33de042dSApple OSS Distributions struct lockf *lock;
390*33de042dSApple OSS Distributions
391*33de042dSApple OSS Distributions if ((lock = vp->v_lockf) == NULL) {
392*33de042dSApple OSS Distributions return;
393*33de042dSApple OSS Distributions }
394*33de042dSApple OSS Distributions
395*33de042dSApple OSS Distributions lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
396*33de042dSApple OSS Distributions
397*33de042dSApple OSS Distributions if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
398*33de042dSApple OSS Distributions struct lockf *tlock;
399*33de042dSApple OSS Distributions
400*33de042dSApple OSS Distributions TAILQ_FOREACH(tlock, &lock->lf_blkhd, lf_block) {
401*33de042dSApple OSS Distributions /*
402*33de042dSApple OSS Distributions * Setting this flag should cause all
403*33de042dSApple OSS Distributions * currently blocked F_SETLK request to
404*33de042dSApple OSS Distributions * return to userland with an errno.
405*33de042dSApple OSS Distributions */
406*33de042dSApple OSS Distributions tlock->lf_flags |= F_ABORT;
407*33de042dSApple OSS Distributions }
408*33de042dSApple OSS Distributions lf_wakelock(lock, TRUE);
409*33de042dSApple OSS Distributions }
410*33de042dSApple OSS Distributions }
411*33de042dSApple OSS Distributions
412*33de042dSApple OSS Distributions /*
413*33de042dSApple OSS Distributions * Take any lock attempts which are currently blocked by a given lock ("from")
414*33de042dSApple OSS Distributions * and mark them as blocked by a different lock ("to"). Used in the case
415*33de042dSApple OSS Distributions * where a byte range currently occupied by "from" is to be occupied by "to."
416*33de042dSApple OSS Distributions */
417*33de042dSApple OSS Distributions static void
lf_move_blocked(struct lockf * to,struct lockf * from)418*33de042dSApple OSS Distributions lf_move_blocked(struct lockf *to, struct lockf *from)
419*33de042dSApple OSS Distributions {
420*33de042dSApple OSS Distributions struct lockf *tlock;
421*33de042dSApple OSS Distributions
422*33de042dSApple OSS Distributions TAILQ_FOREACH(tlock, &from->lf_blkhd, lf_block) {
423*33de042dSApple OSS Distributions tlock->lf_next = to;
424*33de042dSApple OSS Distributions }
425*33de042dSApple OSS Distributions
426*33de042dSApple OSS Distributions TAILQ_CONCAT(&to->lf_blkhd, &from->lf_blkhd, lf_block);
427*33de042dSApple OSS Distributions }
428*33de042dSApple OSS Distributions
429*33de042dSApple OSS Distributions /*
430*33de042dSApple OSS Distributions * lf_coalesce_adjacent
431*33de042dSApple OSS Distributions *
432*33de042dSApple OSS Distributions * Description: Helper function: when setting a lock, coalesce adjacent
433*33de042dSApple OSS Distributions * locks. Needed because adjacent locks are not overlapping,
434*33de042dSApple OSS Distributions * but POSIX requires that they be coalesced.
435*33de042dSApple OSS Distributions *
436*33de042dSApple OSS Distributions * Parameters: lock The new lock which may be adjacent
437*33de042dSApple OSS Distributions * to already locked regions, and which
438*33de042dSApple OSS Distributions * should therefore be coalesced with them
439*33de042dSApple OSS Distributions *
440*33de042dSApple OSS Distributions * Returns: <void>
441*33de042dSApple OSS Distributions */
442*33de042dSApple OSS Distributions static void
lf_coalesce_adjacent(struct lockf * lock)443*33de042dSApple OSS Distributions lf_coalesce_adjacent(struct lockf *lock)
444*33de042dSApple OSS Distributions {
445*33de042dSApple OSS Distributions struct lockf **lf = lock->lf_head;
446*33de042dSApple OSS Distributions
447*33de042dSApple OSS Distributions while (*lf != NOLOCKF) {
448*33de042dSApple OSS Distributions /* reject locks that obviously could not be coalesced */
449*33de042dSApple OSS Distributions if ((*lf == lock) ||
450*33de042dSApple OSS Distributions ((*lf)->lf_id != lock->lf_id) ||
451*33de042dSApple OSS Distributions ((*lf)->lf_type != lock->lf_type)) {
452*33de042dSApple OSS Distributions lf = &(*lf)->lf_next;
453*33de042dSApple OSS Distributions continue;
454*33de042dSApple OSS Distributions }
455*33de042dSApple OSS Distributions
456*33de042dSApple OSS Distributions /*
457*33de042dSApple OSS Distributions * NOTE: Assumes that if two locks are adjacent on the number line
458*33de042dSApple OSS Distributions * and belong to the same owner, then they are adjacent on the list.
459*33de042dSApple OSS Distributions */
460*33de042dSApple OSS Distributions if (LF_END(*lf) < OFF_MAX &&
461*33de042dSApple OSS Distributions (LF_END(*lf) + 1) == lock->lf_start) {
462*33de042dSApple OSS Distributions struct lockf *adjacent = *lf;
463*33de042dSApple OSS Distributions
464*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_LIST, "lf_coalesce_adjacent: coalesce adjacent previous\n");
465*33de042dSApple OSS Distributions lock->lf_start = (*lf)->lf_start;
466*33de042dSApple OSS Distributions *lf = lock;
467*33de042dSApple OSS Distributions lf = &(*lf)->lf_next;
468*33de042dSApple OSS Distributions
469*33de042dSApple OSS Distributions lf_move_blocked(lock, adjacent);
470*33de042dSApple OSS Distributions
471*33de042dSApple OSS Distributions zfree(KT_LOCKF, adjacent);
472*33de042dSApple OSS Distributions continue;
473*33de042dSApple OSS Distributions }
474*33de042dSApple OSS Distributions /* If the lock starts adjacent to us, we can coalesce it */
475*33de042dSApple OSS Distributions if (LF_END(lock) < OFF_MAX &&
476*33de042dSApple OSS Distributions (LF_END(lock) + 1) == (*lf)->lf_start) {
477*33de042dSApple OSS Distributions struct lockf *adjacent = *lf;
478*33de042dSApple OSS Distributions
479*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_LIST, "lf_coalesce_adjacent: coalesce adjacent following\n");
480*33de042dSApple OSS Distributions lock->lf_end = (*lf)->lf_end;
481*33de042dSApple OSS Distributions lock->lf_next = (*lf)->lf_next;
482*33de042dSApple OSS Distributions lf = &lock->lf_next;
483*33de042dSApple OSS Distributions
484*33de042dSApple OSS Distributions lf_move_blocked(lock, adjacent);
485*33de042dSApple OSS Distributions
486*33de042dSApple OSS Distributions zfree(KT_LOCKF, adjacent);
487*33de042dSApple OSS Distributions continue;
488*33de042dSApple OSS Distributions }
489*33de042dSApple OSS Distributions
490*33de042dSApple OSS Distributions /* no matching conditions; go on to next lock */
491*33de042dSApple OSS Distributions lf = &(*lf)->lf_next;
492*33de042dSApple OSS Distributions }
493*33de042dSApple OSS Distributions }
494*33de042dSApple OSS Distributions
495*33de042dSApple OSS Distributions /*
496*33de042dSApple OSS Distributions * lf_setlock
497*33de042dSApple OSS Distributions *
498*33de042dSApple OSS Distributions * Description: Set a byte-range lock.
499*33de042dSApple OSS Distributions *
500*33de042dSApple OSS Distributions * Parameters: lock The lock structure describing the lock
501*33de042dSApple OSS Distributions * to be set; allocated by the caller, it
502*33de042dSApple OSS Distributions * will be linked into the lock list if
503*33de042dSApple OSS Distributions * the set is successful, and freed if the
504*33de042dSApple OSS Distributions * set is unsuccessful.
505*33de042dSApple OSS Distributions *
506*33de042dSApple OSS Distributions * timeout Timeout specified in the case of
507*33de042dSApple OSS Distributions * SETLKWTIMEOUT.
508*33de042dSApple OSS Distributions *
509*33de042dSApple OSS Distributions * Returns: 0 Success
510*33de042dSApple OSS Distributions * EAGAIN
511*33de042dSApple OSS Distributions * EDEADLK
512*33de042dSApple OSS Distributions * lf_split:ENOLCK
513*33de042dSApple OSS Distributions * lf_clearlock:ENOLCK
514*33de042dSApple OSS Distributions * msleep:EINTR
515*33de042dSApple OSS Distributions * msleep:ETIMEDOUT
516*33de042dSApple OSS Distributions *
517*33de042dSApple OSS Distributions * Notes: We add the lock to the provisional lock list. We do not
518*33de042dSApple OSS Distributions * coalesce at this time; this has implications for other lock
519*33de042dSApple OSS Distributions * requestors in the blocker search mechanism.
520*33de042dSApple OSS Distributions */
521*33de042dSApple OSS Distributions static int
lf_setlock(struct lockf * lock,struct timespec * timeout)522*33de042dSApple OSS Distributions lf_setlock(struct lockf *lock, struct timespec *timeout)
523*33de042dSApple OSS Distributions {
524*33de042dSApple OSS Distributions struct lockf *block;
525*33de042dSApple OSS Distributions struct lockf **head = lock->lf_head;
526*33de042dSApple OSS Distributions struct lockf **prev, *overlap;
527*33de042dSApple OSS Distributions static const char lockstr[] = "lockf";
528*33de042dSApple OSS Distributions int priority, needtolink, error;
529*33de042dSApple OSS Distributions struct vnode *vp = lock->lf_vnode;
530*33de042dSApple OSS Distributions overlap_t ovcase;
531*33de042dSApple OSS Distributions
532*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
533*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
534*33de042dSApple OSS Distributions lf_print("lf_setlock", lock);
535*33de042dSApple OSS Distributions lf_printlist("lf_setlock(in)", lock);
536*33de042dSApple OSS Distributions }
537*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
538*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p Looking for deadlock, vnode %p\n", lock, lock->lf_vnode);
539*33de042dSApple OSS Distributions
540*33de042dSApple OSS Distributions /*
541*33de042dSApple OSS Distributions * Set the priority
542*33de042dSApple OSS Distributions */
543*33de042dSApple OSS Distributions priority = PLOCK;
544*33de042dSApple OSS Distributions if (lock->lf_type == F_WRLCK) {
545*33de042dSApple OSS Distributions priority += 4;
546*33de042dSApple OSS Distributions }
547*33de042dSApple OSS Distributions priority |= PCATCH;
548*33de042dSApple OSS Distributions scan:
549*33de042dSApple OSS Distributions /*
550*33de042dSApple OSS Distributions * Scan lock list for this file looking for locks that would block us.
551*33de042dSApple OSS Distributions */
552*33de042dSApple OSS Distributions while ((block = lf_getblock(lock, -1))) {
553*33de042dSApple OSS Distributions /*
554*33de042dSApple OSS Distributions * Free the structure and return if nonblocking.
555*33de042dSApple OSS Distributions */
556*33de042dSApple OSS Distributions if ((lock->lf_flags & F_WAIT) == 0) {
557*33de042dSApple OSS Distributions DTRACE_FSINFO(advlock__nowait, vnode_t, vp);
558*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
559*33de042dSApple OSS Distributions return EAGAIN;
560*33de042dSApple OSS Distributions }
561*33de042dSApple OSS Distributions
562*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p found blocking lock %p\n", lock, block);
563*33de042dSApple OSS Distributions
564*33de042dSApple OSS Distributions /*
565*33de042dSApple OSS Distributions * We are blocked. Since flock style locks cover
566*33de042dSApple OSS Distributions * the whole file, there is no chance for deadlock.
567*33de042dSApple OSS Distributions *
568*33de042dSApple OSS Distributions * OFD byte-range locks currently do NOT support
569*33de042dSApple OSS Distributions * deadlock detection.
570*33de042dSApple OSS Distributions *
571*33de042dSApple OSS Distributions * For POSIX byte-range locks we must check for deadlock.
572*33de042dSApple OSS Distributions *
573*33de042dSApple OSS Distributions * Deadlock detection is done by looking through the
574*33de042dSApple OSS Distributions * wait channels to see if there are any cycles that
575*33de042dSApple OSS Distributions * involve us.
576*33de042dSApple OSS Distributions */
577*33de042dSApple OSS Distributions if ((lock->lf_flags & F_POSIX) &&
578*33de042dSApple OSS Distributions (block->lf_flags & F_POSIX)) {
579*33de042dSApple OSS Distributions lck_mtx_lock(&lf_dead_lock);
580*33de042dSApple OSS Distributions
581*33de042dSApple OSS Distributions /* The blocked process is waiting on something */
582*33de042dSApple OSS Distributions struct proc *wproc = block->lf_owner;
583*33de042dSApple OSS Distributions proc_lock(wproc);
584*33de042dSApple OSS Distributions
585*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p owned by pid %d\n", lock, proc_pid(wproc));
586*33de042dSApple OSS Distributions
587*33de042dSApple OSS Distributions struct uthread *ut;
588*33de042dSApple OSS Distributions TAILQ_FOREACH(ut, &wproc->p_uthlist, uu_list) {
589*33de042dSApple OSS Distributions /*
590*33de042dSApple OSS Distributions * If the thread is (a) asleep (uu_wchan != 0)
591*33de042dSApple OSS Distributions * and (b) in this code (uu_wmesg == lockstr)
592*33de042dSApple OSS Distributions * then check to see if the lock is blocked behind
593*33de042dSApple OSS Distributions * someone blocked behind us.
594*33de042dSApple OSS Distributions *
595*33de042dSApple OSS Distributions * Note: (i) vp->v_lock is held, preventing other
596*33de042dSApple OSS Distributions * threads from mutating the blocking list for our vnode.
597*33de042dSApple OSS Distributions * and (ii) the proc_lock is held i.e the thread list
598*33de042dSApple OSS Distributions * is stable.
599*33de042dSApple OSS Distributions *
600*33de042dSApple OSS Distributions * HOWEVER some thread in wproc might be sleeping on a lockf
601*33de042dSApple OSS Distributions * structure for a different vnode, and be woken at any
602*33de042dSApple OSS Distributions * time. Thus the waitblock list could mutate while
603*33de042dSApple OSS Distributions * it's being inspected by this thread, and what
604*33de042dSApple OSS Distributions * ut->uu_wchan was just pointing at could even be freed.
605*33de042dSApple OSS Distributions *
606*33de042dSApple OSS Distributions * Nevertheless this is safe here because of lf_dead_lock; if
607*33de042dSApple OSS Distributions * any thread blocked with uu_wmesg == lockstr wakes (see below)
608*33de042dSApple OSS Distributions * it will try to acquire lf_dead_lock which is already held
609*33de042dSApple OSS Distributions * here. Holding that lock prevents the lockf structure being
610*33de042dSApple OSS Distributions * pointed at by ut->uu_wchan from going away. Thus the vnode
611*33de042dSApple OSS Distributions * involved can be found and locked, and the corresponding
612*33de042dSApple OSS Distributions * blocking chain can then be examined safely.
613*33de042dSApple OSS Distributions */
614*33de042dSApple OSS Distributions const struct lockf *waitblock = (const void *)ut->uu_wchan;
615*33de042dSApple OSS Distributions if ((waitblock != NULL) && (ut->uu_wmesg == lockstr)) {
616*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is also blocked on lock %p vnode %p\n", lock, waitblock, waitblock->lf_vnode);
617*33de042dSApple OSS Distributions
618*33de042dSApple OSS Distributions vnode_t othervp = NULL;
619*33de042dSApple OSS Distributions if (waitblock->lf_vnode != vp) {
620*33de042dSApple OSS Distributions /*
621*33de042dSApple OSS Distributions * This thread in wproc is waiting for a lock
622*33de042dSApple OSS Distributions * on a different vnode; grab the lock on it
623*33de042dSApple OSS Distributions * that protects lf_next while we examine it.
624*33de042dSApple OSS Distributions */
625*33de042dSApple OSS Distributions othervp = waitblock->lf_vnode;
626*33de042dSApple OSS Distributions if (!lck_mtx_try_lock(&othervp->v_lock)) {
627*33de042dSApple OSS Distributions /*
628*33de042dSApple OSS Distributions * avoid kernel deadlock: drop all
629*33de042dSApple OSS Distributions * locks, pause for a bit to let the
630*33de042dSApple OSS Distributions * other thread do what it needs to do,
631*33de042dSApple OSS Distributions * then (because we drop and retake
632*33de042dSApple OSS Distributions * v_lock) retry the scan.
633*33de042dSApple OSS Distributions */
634*33de042dSApple OSS Distributions proc_unlock(wproc);
635*33de042dSApple OSS Distributions lck_mtx_unlock(&lf_dead_lock);
636*33de042dSApple OSS Distributions static struct timespec ts = {
637*33de042dSApple OSS Distributions .tv_sec = 0,
638*33de042dSApple OSS Distributions .tv_nsec = 2 * NSEC_PER_MSEC,
639*33de042dSApple OSS Distributions };
640*33de042dSApple OSS Distributions static const char pausestr[] = "lockf:pause";
641*33de042dSApple OSS Distributions (void) msleep(lock, &vp->v_lock, priority, pausestr, &ts);
642*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p contention for vp %p => restart\n", lock, othervp);
643*33de042dSApple OSS Distributions goto scan;
644*33de042dSApple OSS Distributions }
645*33de042dSApple OSS Distributions }
646*33de042dSApple OSS Distributions
647*33de042dSApple OSS Distributions /*
648*33de042dSApple OSS Distributions * Get the lock blocking the lock
649*33de042dSApple OSS Distributions * which would block us, and make
650*33de042dSApple OSS Distributions * certain it hasn't become unblocked
651*33de042dSApple OSS Distributions * (been granted, e.g. between the time
652*33de042dSApple OSS Distributions * we called lf_getblock, and the time
653*33de042dSApple OSS Distributions * we successfully acquired the
654*33de042dSApple OSS Distributions * proc_lock).
655*33de042dSApple OSS Distributions */
656*33de042dSApple OSS Distributions const struct lockf *nextblock = waitblock->lf_next;
657*33de042dSApple OSS Distributions if (nextblock == NULL) {
658*33de042dSApple OSS Distributions if (othervp) {
659*33de042dSApple OSS Distributions lck_mtx_unlock(&othervp->v_lock);
660*33de042dSApple OSS Distributions }
661*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p with waitblock %p and no lf_next; othervp %p\n", lock, waitblock, othervp);
662*33de042dSApple OSS Distributions continue;
663*33de042dSApple OSS Distributions }
664*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is also blocked on lock %p vnode %p\n", lock, nextblock, nextblock->lf_vnode);
665*33de042dSApple OSS Distributions
666*33de042dSApple OSS Distributions /*
667*33de042dSApple OSS Distributions * Make sure it's an advisory range
668*33de042dSApple OSS Distributions * lock and not any other kind of lock;
669*33de042dSApple OSS Distributions * if we mix lock types, it's our own
670*33de042dSApple OSS Distributions * fault.
671*33de042dSApple OSS Distributions */
672*33de042dSApple OSS Distributions if ((nextblock->lf_flags & F_POSIX) == 0) {
673*33de042dSApple OSS Distributions if (othervp) {
674*33de042dSApple OSS Distributions lck_mtx_unlock(&othervp->v_lock);
675*33de042dSApple OSS Distributions }
676*33de042dSApple OSS Distributions continue;
677*33de042dSApple OSS Distributions }
678*33de042dSApple OSS Distributions
679*33de042dSApple OSS Distributions /*
680*33de042dSApple OSS Distributions * If the owner of the lock that's
681*33de042dSApple OSS Distributions * blocking a lock that's blocking us
682*33de042dSApple OSS Distributions * getting the requested lock, then we
683*33de042dSApple OSS Distributions * would deadlock, so error out.
684*33de042dSApple OSS Distributions */
685*33de042dSApple OSS Distributions struct proc *bproc = nextblock->lf_owner;
686*33de042dSApple OSS Distributions const boolean_t deadlocked = bproc == lock->lf_owner;
687*33de042dSApple OSS Distributions
688*33de042dSApple OSS Distributions if (othervp) {
689*33de042dSApple OSS Distributions lck_mtx_unlock(&othervp->v_lock);
690*33de042dSApple OSS Distributions }
691*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p owned by pid %d\n", lock, proc_pid(bproc));
692*33de042dSApple OSS Distributions if (deadlocked) {
693*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is me, so EDEADLK\n", lock);
694*33de042dSApple OSS Distributions proc_unlock(wproc);
695*33de042dSApple OSS Distributions lck_mtx_unlock(&lf_dead_lock);
696*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
697*33de042dSApple OSS Distributions return EDEADLK;
698*33de042dSApple OSS Distributions }
699*33de042dSApple OSS Distributions }
700*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p bottom of thread loop\n", lock);
701*33de042dSApple OSS Distributions }
702*33de042dSApple OSS Distributions proc_unlock(wproc);
703*33de042dSApple OSS Distributions lck_mtx_unlock(&lf_dead_lock);
704*33de042dSApple OSS Distributions }
705*33de042dSApple OSS Distributions
706*33de042dSApple OSS Distributions /*
707*33de042dSApple OSS Distributions * For flock type locks, we must first remove
708*33de042dSApple OSS Distributions * any shared locks that we hold before we sleep
709*33de042dSApple OSS Distributions * waiting for an exclusive lock.
710*33de042dSApple OSS Distributions */
711*33de042dSApple OSS Distributions if ((lock->lf_flags & F_FLOCK) &&
712*33de042dSApple OSS Distributions lock->lf_type == F_WRLCK) {
713*33de042dSApple OSS Distributions lock->lf_type = F_UNLCK;
714*33de042dSApple OSS Distributions if ((error = lf_clearlock(lock)) != 0) {
715*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
716*33de042dSApple OSS Distributions return error;
717*33de042dSApple OSS Distributions }
718*33de042dSApple OSS Distributions lock->lf_type = F_WRLCK;
719*33de042dSApple OSS Distributions }
720*33de042dSApple OSS Distributions /*
721*33de042dSApple OSS Distributions * Add our lock to the blocked list and sleep until we're free.
722*33de042dSApple OSS Distributions * Remember who blocked us (for deadlock detection).
723*33de042dSApple OSS Distributions */
724*33de042dSApple OSS Distributions lock->lf_next = block;
725*33de042dSApple OSS Distributions TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
726*33de042dSApple OSS Distributions
727*33de042dSApple OSS Distributions if (!(lock->lf_flags & F_FLOCK)) {
728*33de042dSApple OSS Distributions block->lf_flags &= ~F_WAKE1_SAFE;
729*33de042dSApple OSS Distributions }
730*33de042dSApple OSS Distributions
731*33de042dSApple OSS Distributions #if IMPORTANCE_INHERITANCE
732*33de042dSApple OSS Distributions /*
733*33de042dSApple OSS Distributions * Importance donation is done only for cases where the
734*33de042dSApple OSS Distributions * owning task can be unambiguously determined.
735*33de042dSApple OSS Distributions *
736*33de042dSApple OSS Distributions * POSIX type locks are not inherited by child processes;
737*33de042dSApple OSS Distributions * we maintain a 1:1 mapping between a lock and its owning
738*33de042dSApple OSS Distributions * process.
739*33de042dSApple OSS Distributions *
740*33de042dSApple OSS Distributions * Flock type locks are inherited across fork() and there is
741*33de042dSApple OSS Distributions * no 1:1 mapping in the general case. However, the fileglobs
742*33de042dSApple OSS Distributions * used by OFD locks *may* be confined to the process that
743*33de042dSApple OSS Distributions * created them, and thus have an "owner", in which case
744*33de042dSApple OSS Distributions * we also attempt importance donation.
745*33de042dSApple OSS Distributions */
746*33de042dSApple OSS Distributions if ((lock->lf_flags & block->lf_flags & F_POSIX) != 0) {
747*33de042dSApple OSS Distributions lf_boost_blocking_proc(lock, block);
748*33de042dSApple OSS Distributions } else if ((lock->lf_flags & block->lf_flags & F_OFD_LOCK) &&
749*33de042dSApple OSS Distributions lock->lf_owner != block->lf_owner &&
750*33de042dSApple OSS Distributions NULL != lock->lf_owner && NULL != block->lf_owner) {
751*33de042dSApple OSS Distributions lf_boost_blocking_proc(lock, block);
752*33de042dSApple OSS Distributions }
753*33de042dSApple OSS Distributions #endif /* IMPORTANCE_INHERITANCE */
754*33de042dSApple OSS Distributions
755*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
756*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
757*33de042dSApple OSS Distributions lf_print("lf_setlock: blocking on", block);
758*33de042dSApple OSS Distributions lf_printlist("lf_setlock(block)", block);
759*33de042dSApple OSS Distributions }
760*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
761*33de042dSApple OSS Distributions DTRACE_FSINFO(advlock__wait, vnode_t, vp);
762*33de042dSApple OSS Distributions
763*33de042dSApple OSS Distributions if (lock->lf_flags & F_POSIX) {
764*33de042dSApple OSS Distributions error = msleep(lock, &vp->v_lock, priority, lockstr, timeout);
765*33de042dSApple OSS Distributions /*
766*33de042dSApple OSS Distributions * Ensure that 'lock' doesn't get mutated or freed if a
767*33de042dSApple OSS Distributions * wakeup occurs while hunting for deadlocks (and holding
768*33de042dSApple OSS Distributions * lf_dead_lock - see above)
769*33de042dSApple OSS Distributions */
770*33de042dSApple OSS Distributions lck_mtx_lock(&lf_dead_lock);
771*33de042dSApple OSS Distributions lck_mtx_unlock(&lf_dead_lock);
772*33de042dSApple OSS Distributions } else {
773*33de042dSApple OSS Distributions static const char lockstr_np[] = "lockf:np";
774*33de042dSApple OSS Distributions error = msleep(lock, &vp->v_lock, priority, lockstr_np, timeout);
775*33de042dSApple OSS Distributions }
776*33de042dSApple OSS Distributions
777*33de042dSApple OSS Distributions if (error == 0 && (lock->lf_flags & F_ABORT) != 0) {
778*33de042dSApple OSS Distributions error = EBADF;
779*33de042dSApple OSS Distributions }
780*33de042dSApple OSS Distributions
781*33de042dSApple OSS Distributions if (lock->lf_next) {
782*33de042dSApple OSS Distributions /*
783*33de042dSApple OSS Distributions * lf_wakelock() always sets wakelock->lf_next to
784*33de042dSApple OSS Distributions * NULL before a wakeup; so we've been woken early
785*33de042dSApple OSS Distributions * - perhaps by a debugger, signal or other event.
786*33de042dSApple OSS Distributions *
787*33de042dSApple OSS Distributions * Remove 'lock' from the block list (avoids double-add
788*33de042dSApple OSS Distributions * in the spurious case, which would create a cycle)
789*33de042dSApple OSS Distributions */
790*33de042dSApple OSS Distributions TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
791*33de042dSApple OSS Distributions #if IMPORTANCE_INHERITANCE
792*33de042dSApple OSS Distributions /*
793*33de042dSApple OSS Distributions * Adjust the boost on lf_next.
794*33de042dSApple OSS Distributions */
795*33de042dSApple OSS Distributions lf_adjust_assertion(lock->lf_next);
796*33de042dSApple OSS Distributions #endif /* IMPORTANCE_INHERITANCE */
797*33de042dSApple OSS Distributions lock->lf_next = NULL;
798*33de042dSApple OSS Distributions
799*33de042dSApple OSS Distributions if (error == 0) {
800*33de042dSApple OSS Distributions /*
801*33de042dSApple OSS Distributions * If this was a spurious wakeup, retry
802*33de042dSApple OSS Distributions */
803*33de042dSApple OSS Distributions printf("%s: spurious wakeup, retrying lock\n",
804*33de042dSApple OSS Distributions __func__);
805*33de042dSApple OSS Distributions continue;
806*33de042dSApple OSS Distributions }
807*33de042dSApple OSS Distributions }
808*33de042dSApple OSS Distributions
809*33de042dSApple OSS Distributions if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
810*33de042dSApple OSS Distributions if ((block = lf_getblock(lock, -1)) != NULL) {
811*33de042dSApple OSS Distributions lf_move_blocked(block, lock);
812*33de042dSApple OSS Distributions }
813*33de042dSApple OSS Distributions }
814*33de042dSApple OSS Distributions
815*33de042dSApple OSS Distributions if (error) {
816*33de042dSApple OSS Distributions if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
817*33de042dSApple OSS Distributions lf_wakelock(lock, TRUE);
818*33de042dSApple OSS Distributions }
819*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
820*33de042dSApple OSS Distributions /* Return ETIMEDOUT if timeout occoured. */
821*33de042dSApple OSS Distributions if (error == EWOULDBLOCK) {
822*33de042dSApple OSS Distributions error = ETIMEDOUT;
823*33de042dSApple OSS Distributions }
824*33de042dSApple OSS Distributions return error;
825*33de042dSApple OSS Distributions }
826*33de042dSApple OSS Distributions }
827*33de042dSApple OSS Distributions
828*33de042dSApple OSS Distributions /*
829*33de042dSApple OSS Distributions * No blocks!! Add the lock. Note that we will
830*33de042dSApple OSS Distributions * downgrade or upgrade any overlapping locks this
831*33de042dSApple OSS Distributions * process already owns.
832*33de042dSApple OSS Distributions *
833*33de042dSApple OSS Distributions * Skip over locks owned by other processes.
834*33de042dSApple OSS Distributions * Handle any locks that overlap and are owned by ourselves.
835*33de042dSApple OSS Distributions */
836*33de042dSApple OSS Distributions prev = head;
837*33de042dSApple OSS Distributions block = *head;
838*33de042dSApple OSS Distributions needtolink = 1;
839*33de042dSApple OSS Distributions for (;;) {
840*33de042dSApple OSS Distributions const off_t lkend = LF_END(lock);
841*33de042dSApple OSS Distributions ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
842*33de042dSApple OSS Distributions if (ovcase) {
843*33de042dSApple OSS Distributions block = overlap->lf_next;
844*33de042dSApple OSS Distributions }
845*33de042dSApple OSS Distributions /*
846*33de042dSApple OSS Distributions * Six cases:
847*33de042dSApple OSS Distributions * 0) no overlap
848*33de042dSApple OSS Distributions * 1) overlap == lock
849*33de042dSApple OSS Distributions * 2) overlap contains lock
850*33de042dSApple OSS Distributions * 3) lock contains overlap
851*33de042dSApple OSS Distributions * 4) overlap starts before lock
852*33de042dSApple OSS Distributions * 5) overlap ends after lock
853*33de042dSApple OSS Distributions */
854*33de042dSApple OSS Distributions switch (ovcase) {
855*33de042dSApple OSS Distributions case OVERLAP_NONE:
856*33de042dSApple OSS Distributions if (needtolink) {
857*33de042dSApple OSS Distributions *prev = lock;
858*33de042dSApple OSS Distributions lock->lf_next = overlap;
859*33de042dSApple OSS Distributions }
860*33de042dSApple OSS Distributions break;
861*33de042dSApple OSS Distributions
862*33de042dSApple OSS Distributions case OVERLAP_EQUALS_LOCK:
863*33de042dSApple OSS Distributions /*
864*33de042dSApple OSS Distributions * If downgrading lock, others may be
865*33de042dSApple OSS Distributions * able to acquire it.
866*33de042dSApple OSS Distributions */
867*33de042dSApple OSS Distributions if (lock->lf_type == F_RDLCK &&
868*33de042dSApple OSS Distributions overlap->lf_type == F_WRLCK) {
869*33de042dSApple OSS Distributions lf_wakelock(overlap, TRUE);
870*33de042dSApple OSS Distributions }
871*33de042dSApple OSS Distributions overlap->lf_type = lock->lf_type;
872*33de042dSApple OSS Distributions lf_move_blocked(overlap, lock);
873*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
874*33de042dSApple OSS Distributions lock = overlap; /* for lf_coalesce_adjacent() */
875*33de042dSApple OSS Distributions break;
876*33de042dSApple OSS Distributions
877*33de042dSApple OSS Distributions case OVERLAP_CONTAINS_LOCK:
878*33de042dSApple OSS Distributions /*
879*33de042dSApple OSS Distributions * Check for common starting point and different types.
880*33de042dSApple OSS Distributions */
881*33de042dSApple OSS Distributions if (overlap->lf_type == lock->lf_type) {
882*33de042dSApple OSS Distributions lf_move_blocked(overlap, lock);
883*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
884*33de042dSApple OSS Distributions lock = overlap; /* for lf_coalesce_adjacent() */
885*33de042dSApple OSS Distributions break;
886*33de042dSApple OSS Distributions }
887*33de042dSApple OSS Distributions if (overlap->lf_start == lock->lf_start) {
888*33de042dSApple OSS Distributions *prev = lock;
889*33de042dSApple OSS Distributions lock->lf_next = overlap;
890*33de042dSApple OSS Distributions assert(lkend < OFF_MAX);
891*33de042dSApple OSS Distributions overlap->lf_start = lkend + 1;
892*33de042dSApple OSS Distributions } else {
893*33de042dSApple OSS Distributions /*
894*33de042dSApple OSS Distributions * If we can't split the lock, we can't
895*33de042dSApple OSS Distributions * grant it. Claim a system limit for the
896*33de042dSApple OSS Distributions * resource shortage.
897*33de042dSApple OSS Distributions */
898*33de042dSApple OSS Distributions if (lf_split(overlap, lock)) {
899*33de042dSApple OSS Distributions zfree(KT_LOCKF, lock);
900*33de042dSApple OSS Distributions return ENOLCK;
901*33de042dSApple OSS Distributions }
902*33de042dSApple OSS Distributions }
903*33de042dSApple OSS Distributions lf_wakelock(overlap, TRUE);
904*33de042dSApple OSS Distributions break;
905*33de042dSApple OSS Distributions
906*33de042dSApple OSS Distributions case OVERLAP_CONTAINED_BY_LOCK:
907*33de042dSApple OSS Distributions /*
908*33de042dSApple OSS Distributions * If downgrading lock, others may be able to
909*33de042dSApple OSS Distributions * acquire it, otherwise take the list.
910*33de042dSApple OSS Distributions */
911*33de042dSApple OSS Distributions if (lock->lf_type == F_RDLCK &&
912*33de042dSApple OSS Distributions overlap->lf_type == F_WRLCK) {
913*33de042dSApple OSS Distributions lf_wakelock(overlap, TRUE);
914*33de042dSApple OSS Distributions } else {
915*33de042dSApple OSS Distributions lf_move_blocked(lock, overlap);
916*33de042dSApple OSS Distributions }
917*33de042dSApple OSS Distributions /*
918*33de042dSApple OSS Distributions * Add the new lock if necessary and delete the overlap.
919*33de042dSApple OSS Distributions */
920*33de042dSApple OSS Distributions if (needtolink) {
921*33de042dSApple OSS Distributions *prev = lock;
922*33de042dSApple OSS Distributions lock->lf_next = overlap->lf_next;
923*33de042dSApple OSS Distributions prev = &lock->lf_next;
924*33de042dSApple OSS Distributions needtolink = 0;
925*33de042dSApple OSS Distributions } else {
926*33de042dSApple OSS Distributions *prev = overlap->lf_next;
927*33de042dSApple OSS Distributions }
928*33de042dSApple OSS Distributions zfree(KT_LOCKF, overlap);
929*33de042dSApple OSS Distributions continue;
930*33de042dSApple OSS Distributions
931*33de042dSApple OSS Distributions case OVERLAP_STARTS_BEFORE_LOCK:
932*33de042dSApple OSS Distributions /*
933*33de042dSApple OSS Distributions * Add lock after overlap on the list.
934*33de042dSApple OSS Distributions */
935*33de042dSApple OSS Distributions lock->lf_next = overlap->lf_next;
936*33de042dSApple OSS Distributions overlap->lf_next = lock;
937*33de042dSApple OSS Distributions assert(lock->lf_start > 0);
938*33de042dSApple OSS Distributions overlap->lf_end = lock->lf_start - 1;
939*33de042dSApple OSS Distributions prev = &lock->lf_next;
940*33de042dSApple OSS Distributions lf_wakelock(overlap, TRUE);
941*33de042dSApple OSS Distributions needtolink = 0;
942*33de042dSApple OSS Distributions continue;
943*33de042dSApple OSS Distributions
944*33de042dSApple OSS Distributions case OVERLAP_ENDS_AFTER_LOCK:
945*33de042dSApple OSS Distributions /*
946*33de042dSApple OSS Distributions * Add the new lock before overlap.
947*33de042dSApple OSS Distributions */
948*33de042dSApple OSS Distributions if (needtolink) {
949*33de042dSApple OSS Distributions *prev = lock;
950*33de042dSApple OSS Distributions lock->lf_next = overlap;
951*33de042dSApple OSS Distributions }
952*33de042dSApple OSS Distributions assert(lkend < OFF_MAX);
953*33de042dSApple OSS Distributions overlap->lf_start = lkend + 1;
954*33de042dSApple OSS Distributions lf_wakelock(overlap, TRUE);
955*33de042dSApple OSS Distributions break;
956*33de042dSApple OSS Distributions }
957*33de042dSApple OSS Distributions break;
958*33de042dSApple OSS Distributions }
959*33de042dSApple OSS Distributions /* Coalesce adjacent locks with identical attributes */
960*33de042dSApple OSS Distributions lf_coalesce_adjacent(lock);
961*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
962*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
963*33de042dSApple OSS Distributions lf_print("lf_setlock: got the lock", lock);
964*33de042dSApple OSS Distributions lf_printlist("lf_setlock(out)", lock);
965*33de042dSApple OSS Distributions }
966*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
967*33de042dSApple OSS Distributions return 0;
968*33de042dSApple OSS Distributions }
969*33de042dSApple OSS Distributions
970*33de042dSApple OSS Distributions
971*33de042dSApple OSS Distributions /*
972*33de042dSApple OSS Distributions * lf_clearlock
973*33de042dSApple OSS Distributions *
974*33de042dSApple OSS Distributions * Description: Remove a byte-range lock on an vnode. Generally, find the
975*33de042dSApple OSS Distributions * lock (or an overlap to that lock) and remove it (or shrink
976*33de042dSApple OSS Distributions * it), then wakeup anyone we can.
977*33de042dSApple OSS Distributions *
978*33de042dSApple OSS Distributions * Parameters: unlock The lock to clear
979*33de042dSApple OSS Distributions *
980*33de042dSApple OSS Distributions * Returns: 0 Success
981*33de042dSApple OSS Distributions * lf_split:ENOLCK
982*33de042dSApple OSS Distributions *
983*33de042dSApple OSS Distributions * Notes: A caller may unlock all the locks owned by the caller by
984*33de042dSApple OSS Distributions * specifying the entire file range; locks owned by other
985*33de042dSApple OSS Distributions * callers are not effected by this operation.
986*33de042dSApple OSS Distributions */
987*33de042dSApple OSS Distributions static int
lf_clearlock(struct lockf * unlock)988*33de042dSApple OSS Distributions lf_clearlock(struct lockf *unlock)
989*33de042dSApple OSS Distributions {
990*33de042dSApple OSS Distributions struct lockf **head = unlock->lf_head;
991*33de042dSApple OSS Distributions struct lockf *lf = *head;
992*33de042dSApple OSS Distributions struct lockf *overlap, **prev;
993*33de042dSApple OSS Distributions overlap_t ovcase;
994*33de042dSApple OSS Distributions
995*33de042dSApple OSS Distributions if (lf == NOLOCKF) {
996*33de042dSApple OSS Distributions return 0;
997*33de042dSApple OSS Distributions }
998*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
999*33de042dSApple OSS Distributions if (unlock->lf_type != F_UNLCK) {
1000*33de042dSApple OSS Distributions panic("lf_clearlock: bad type");
1001*33de042dSApple OSS Distributions }
1002*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1003*33de042dSApple OSS Distributions lf_print("lf_clearlock", unlock);
1004*33de042dSApple OSS Distributions }
1005*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1006*33de042dSApple OSS Distributions prev = head;
1007*33de042dSApple OSS Distributions while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) != OVERLAP_NONE) {
1008*33de042dSApple OSS Distributions const off_t unlkend = LF_END(unlock);
1009*33de042dSApple OSS Distributions /*
1010*33de042dSApple OSS Distributions * Wakeup the list of locks to be retried.
1011*33de042dSApple OSS Distributions */
1012*33de042dSApple OSS Distributions lf_wakelock(overlap, FALSE);
1013*33de042dSApple OSS Distributions #if IMPORTANCE_INHERITANCE
1014*33de042dSApple OSS Distributions if (overlap->lf_boosted == LF_BOOSTED) {
1015*33de042dSApple OSS Distributions lf_drop_assertion(overlap);
1016*33de042dSApple OSS Distributions }
1017*33de042dSApple OSS Distributions #endif /* IMPORTANCE_INHERITANCE */
1018*33de042dSApple OSS Distributions
1019*33de042dSApple OSS Distributions switch (ovcase) {
1020*33de042dSApple OSS Distributions case OVERLAP_NONE: /* satisfy compiler enum/switch */
1021*33de042dSApple OSS Distributions break;
1022*33de042dSApple OSS Distributions
1023*33de042dSApple OSS Distributions case OVERLAP_EQUALS_LOCK:
1024*33de042dSApple OSS Distributions *prev = overlap->lf_next;
1025*33de042dSApple OSS Distributions zfree(KT_LOCKF, overlap);
1026*33de042dSApple OSS Distributions break;
1027*33de042dSApple OSS Distributions
1028*33de042dSApple OSS Distributions case OVERLAP_CONTAINS_LOCK: /* split it */
1029*33de042dSApple OSS Distributions if (overlap->lf_start == unlock->lf_start) {
1030*33de042dSApple OSS Distributions assert(unlkend < OFF_MAX);
1031*33de042dSApple OSS Distributions overlap->lf_start = unlkend + 1;
1032*33de042dSApple OSS Distributions break;
1033*33de042dSApple OSS Distributions }
1034*33de042dSApple OSS Distributions /*
1035*33de042dSApple OSS Distributions * If we can't split the lock, we can't grant it.
1036*33de042dSApple OSS Distributions * Claim a system limit for the resource shortage.
1037*33de042dSApple OSS Distributions */
1038*33de042dSApple OSS Distributions if (lf_split(overlap, unlock)) {
1039*33de042dSApple OSS Distributions return ENOLCK;
1040*33de042dSApple OSS Distributions }
1041*33de042dSApple OSS Distributions overlap->lf_next = unlock->lf_next;
1042*33de042dSApple OSS Distributions break;
1043*33de042dSApple OSS Distributions
1044*33de042dSApple OSS Distributions case OVERLAP_CONTAINED_BY_LOCK:
1045*33de042dSApple OSS Distributions *prev = overlap->lf_next;
1046*33de042dSApple OSS Distributions lf = overlap->lf_next;
1047*33de042dSApple OSS Distributions zfree(KT_LOCKF, overlap);
1048*33de042dSApple OSS Distributions continue;
1049*33de042dSApple OSS Distributions
1050*33de042dSApple OSS Distributions case OVERLAP_STARTS_BEFORE_LOCK:
1051*33de042dSApple OSS Distributions assert(unlock->lf_start > 0);
1052*33de042dSApple OSS Distributions overlap->lf_end = unlock->lf_start - 1;
1053*33de042dSApple OSS Distributions prev = &overlap->lf_next;
1054*33de042dSApple OSS Distributions lf = overlap->lf_next;
1055*33de042dSApple OSS Distributions continue;
1056*33de042dSApple OSS Distributions
1057*33de042dSApple OSS Distributions case OVERLAP_ENDS_AFTER_LOCK:
1058*33de042dSApple OSS Distributions assert(unlkend < OFF_MAX);
1059*33de042dSApple OSS Distributions overlap->lf_start = unlkend + 1;
1060*33de042dSApple OSS Distributions break;
1061*33de042dSApple OSS Distributions }
1062*33de042dSApple OSS Distributions break;
1063*33de042dSApple OSS Distributions }
1064*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1065*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1066*33de042dSApple OSS Distributions lf_printlist("lf_clearlock", unlock);
1067*33de042dSApple OSS Distributions }
1068*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1069*33de042dSApple OSS Distributions return 0;
1070*33de042dSApple OSS Distributions }
1071*33de042dSApple OSS Distributions
1072*33de042dSApple OSS Distributions
1073*33de042dSApple OSS Distributions /*
1074*33de042dSApple OSS Distributions * lf_transferlock
1075*33de042dSApple OSS Distributions *
1076*33de042dSApple OSS Distributions * Description: Transfer a give lock from old_proc to new proc during exec
1077*33de042dSApple OSS Distributions *
1078*33de042dSApple OSS Distributions * Parameters: unlock The lock to transfer
1079*33de042dSApple OSS Distributions *
1080*33de042dSApple OSS Distributions * Returns: 0 Success
1081*33de042dSApple OSS Distributions *
1082*33de042dSApple OSS Distributions * Notes: A caller may transfer all the locks owned by the caller by
1083*33de042dSApple OSS Distributions * specifying the entire file range; locks owned by other
1084*33de042dSApple OSS Distributions * callers are not effected by this operation.
1085*33de042dSApple OSS Distributions */
1086*33de042dSApple OSS Distributions static int
lf_transferlock(struct lockf * transfer)1087*33de042dSApple OSS Distributions lf_transferlock(struct lockf *transfer)
1088*33de042dSApple OSS Distributions {
1089*33de042dSApple OSS Distributions struct lockf **head = transfer->lf_head;
1090*33de042dSApple OSS Distributions struct lockf *lf = *head;
1091*33de042dSApple OSS Distributions struct lockf *overlap, **prev;
1092*33de042dSApple OSS Distributions overlap_t ovcase;
1093*33de042dSApple OSS Distributions
1094*33de042dSApple OSS Distributions if (lf == NOLOCKF) {
1095*33de042dSApple OSS Distributions return 0;
1096*33de042dSApple OSS Distributions }
1097*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1098*33de042dSApple OSS Distributions if (transfer->lf_type != F_TRANSFER) {
1099*33de042dSApple OSS Distributions panic("lf_transferlock: bad type");
1100*33de042dSApple OSS Distributions }
1101*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1102*33de042dSApple OSS Distributions lf_print("lf_transferlock", transfer);
1103*33de042dSApple OSS Distributions }
1104*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1105*33de042dSApple OSS Distributions prev = head;
1106*33de042dSApple OSS Distributions while ((ovcase = lf_findoverlap(lf, transfer, SELF, &prev, &overlap)) != OVERLAP_NONE) {
1107*33de042dSApple OSS Distributions /* For POSIX Locks, change lf_id and lf_owner */
1108*33de042dSApple OSS Distributions if (overlap->lf_flags & F_POSIX) {
1109*33de042dSApple OSS Distributions overlap->lf_id = (caddr_t)transfer->lf_owner;
1110*33de042dSApple OSS Distributions overlap->lf_owner = transfer->lf_owner;
1111*33de042dSApple OSS Distributions } else if (overlap->lf_flags & F_OFD_LOCK) {
1112*33de042dSApple OSS Distributions /* Change the owner of the ofd style lock, if there is an owner */
1113*33de042dSApple OSS Distributions if (overlap->lf_owner) {
1114*33de042dSApple OSS Distributions overlap->lf_owner = transfer->lf_owner;
1115*33de042dSApple OSS Distributions }
1116*33de042dSApple OSS Distributions }
1117*33de042dSApple OSS Distributions /* Find the next lock */
1118*33de042dSApple OSS Distributions lf = overlap->lf_next;
1119*33de042dSApple OSS Distributions }
1120*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1121*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1122*33de042dSApple OSS Distributions lf_printlist("lf_transferlock", transfer);
1123*33de042dSApple OSS Distributions }
1124*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1125*33de042dSApple OSS Distributions return 0;
1126*33de042dSApple OSS Distributions }
1127*33de042dSApple OSS Distributions
1128*33de042dSApple OSS Distributions
1129*33de042dSApple OSS Distributions /*
1130*33de042dSApple OSS Distributions * lf_getlock
1131*33de042dSApple OSS Distributions *
1132*33de042dSApple OSS Distributions * Description: Check whether there is a blocking lock, and if so return
1133*33de042dSApple OSS Distributions * its process identifier into the lock being requested.
1134*33de042dSApple OSS Distributions *
1135*33de042dSApple OSS Distributions * Parameters: lock Pointer to lock to test for blocks
1136*33de042dSApple OSS Distributions * fl Pointer to flock structure to receive
1137*33de042dSApple OSS Distributions * the blocking lock information, if a
1138*33de042dSApple OSS Distributions * blocking lock is found.
1139*33de042dSApple OSS Distributions * matchpid -1, or pid value to match in lookup.
1140*33de042dSApple OSS Distributions *
1141*33de042dSApple OSS Distributions * Returns: 0 Success
1142*33de042dSApple OSS Distributions *
1143*33de042dSApple OSS Distributions * Implicit Returns:
1144*33de042dSApple OSS Distributions * *fl Contents modified to reflect the
1145*33de042dSApple OSS Distributions * blocking lock, if one is found; not
1146*33de042dSApple OSS Distributions * modified otherwise
1147*33de042dSApple OSS Distributions *
1148*33de042dSApple OSS Distributions * Notes: fl->l_pid will be (-1) for file locks and will only be set to
1149*33de042dSApple OSS Distributions * the blocking process ID for advisory record locks.
1150*33de042dSApple OSS Distributions */
1151*33de042dSApple OSS Distributions static int
lf_getlock(struct lockf * lock,struct flock * fl,pid_t matchpid)1152*33de042dSApple OSS Distributions lf_getlock(struct lockf *lock, struct flock *fl, pid_t matchpid)
1153*33de042dSApple OSS Distributions {
1154*33de042dSApple OSS Distributions struct lockf *block;
1155*33de042dSApple OSS Distributions
1156*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1157*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1158*33de042dSApple OSS Distributions lf_print("lf_getlock", lock);
1159*33de042dSApple OSS Distributions }
1160*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1161*33de042dSApple OSS Distributions
1162*33de042dSApple OSS Distributions if ((block = lf_getblock(lock, matchpid))) {
1163*33de042dSApple OSS Distributions fl->l_type = block->lf_type;
1164*33de042dSApple OSS Distributions fl->l_whence = SEEK_SET;
1165*33de042dSApple OSS Distributions fl->l_start = block->lf_start;
1166*33de042dSApple OSS Distributions if (block->lf_end == -1 ||
1167*33de042dSApple OSS Distributions (block->lf_start == 0 && LF_END(block) == OFF_MAX)) {
1168*33de042dSApple OSS Distributions fl->l_len = 0;
1169*33de042dSApple OSS Distributions } else {
1170*33de042dSApple OSS Distributions fl->l_len = LF_END(block) - block->lf_start + 1;
1171*33de042dSApple OSS Distributions }
1172*33de042dSApple OSS Distributions if (NULL != block->lf_owner) {
1173*33de042dSApple OSS Distributions /*
1174*33de042dSApple OSS Distributions * lf_owner is only non-NULL when the lock
1175*33de042dSApple OSS Distributions * "owner" can be unambiguously determined
1176*33de042dSApple OSS Distributions */
1177*33de042dSApple OSS Distributions fl->l_pid = proc_pid(block->lf_owner);
1178*33de042dSApple OSS Distributions } else {
1179*33de042dSApple OSS Distributions fl->l_pid = -1;
1180*33de042dSApple OSS Distributions }
1181*33de042dSApple OSS Distributions } else {
1182*33de042dSApple OSS Distributions fl->l_type = F_UNLCK;
1183*33de042dSApple OSS Distributions }
1184*33de042dSApple OSS Distributions return 0;
1185*33de042dSApple OSS Distributions }
1186*33de042dSApple OSS Distributions
1187*33de042dSApple OSS Distributions /*
1188*33de042dSApple OSS Distributions * lf_getblock
1189*33de042dSApple OSS Distributions *
1190*33de042dSApple OSS Distributions * Description: Walk the list of locks for an inode and return the first
1191*33de042dSApple OSS Distributions * blocking lock. A lock is considered blocking if we are not
1192*33de042dSApple OSS Distributions * the lock owner; otherwise, we are permitted to upgrade or
1193*33de042dSApple OSS Distributions * downgrade it, and it's not considered blocking.
1194*33de042dSApple OSS Distributions *
1195*33de042dSApple OSS Distributions * Parameters: lock The lock for which we are interested
1196*33de042dSApple OSS Distributions * in obtaining the blocking lock, if any
1197*33de042dSApple OSS Distributions * matchpid -1, or pid value to match in lookup.
1198*33de042dSApple OSS Distributions *
1199*33de042dSApple OSS Distributions * Returns: NOLOCKF No blocking lock exists
1200*33de042dSApple OSS Distributions * !NOLOCKF The address of the blocking lock's
1201*33de042dSApple OSS Distributions * struct lockf.
1202*33de042dSApple OSS Distributions */
1203*33de042dSApple OSS Distributions static struct lockf *
lf_getblock(struct lockf * lock,pid_t matchpid)1204*33de042dSApple OSS Distributions lf_getblock(struct lockf *lock, pid_t matchpid)
1205*33de042dSApple OSS Distributions {
1206*33de042dSApple OSS Distributions struct lockf **prev, *overlap, *lf = *(lock->lf_head);
1207*33de042dSApple OSS Distributions
1208*33de042dSApple OSS Distributions for (prev = lock->lf_head;
1209*33de042dSApple OSS Distributions lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != OVERLAP_NONE;
1210*33de042dSApple OSS Distributions lf = overlap->lf_next) {
1211*33de042dSApple OSS Distributions /*
1212*33de042dSApple OSS Distributions * Found an overlap.
1213*33de042dSApple OSS Distributions *
1214*33de042dSApple OSS Distributions * If we're matching pids, and it's a record lock,
1215*33de042dSApple OSS Distributions * or it's an OFD lock on a process-confined fd,
1216*33de042dSApple OSS Distributions * but the pid doesn't match, then keep on looking ..
1217*33de042dSApple OSS Distributions */
1218*33de042dSApple OSS Distributions if (matchpid != -1 &&
1219*33de042dSApple OSS Distributions (overlap->lf_flags & (F_POSIX | F_OFD_LOCK)) != 0 &&
1220*33de042dSApple OSS Distributions proc_pid(overlap->lf_owner) != matchpid) {
1221*33de042dSApple OSS Distributions continue;
1222*33de042dSApple OSS Distributions }
1223*33de042dSApple OSS Distributions
1224*33de042dSApple OSS Distributions /*
1225*33de042dSApple OSS Distributions * does it block us?
1226*33de042dSApple OSS Distributions */
1227*33de042dSApple OSS Distributions if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) {
1228*33de042dSApple OSS Distributions return overlap;
1229*33de042dSApple OSS Distributions }
1230*33de042dSApple OSS Distributions }
1231*33de042dSApple OSS Distributions return NOLOCKF;
1232*33de042dSApple OSS Distributions }
1233*33de042dSApple OSS Distributions
1234*33de042dSApple OSS Distributions
1235*33de042dSApple OSS Distributions /*
1236*33de042dSApple OSS Distributions * lf_findoverlap
1237*33de042dSApple OSS Distributions *
1238*33de042dSApple OSS Distributions * Description: Walk the list of locks to find an overlapping lock (if any).
1239*33de042dSApple OSS Distributions *
1240*33de042dSApple OSS Distributions * Parameters: lf First lock on lock list
1241*33de042dSApple OSS Distributions * lock The lock we are checking for an overlap
1242*33de042dSApple OSS Distributions * check Check type
1243*33de042dSApple OSS Distributions * prev pointer to pointer pointer to contain
1244*33de042dSApple OSS Distributions * address of pointer to previous lock
1245*33de042dSApple OSS Distributions * pointer to overlapping lock, if overlap
1246*33de042dSApple OSS Distributions * overlap pointer to pointer to contain address
1247*33de042dSApple OSS Distributions * of overlapping lock
1248*33de042dSApple OSS Distributions *
1249*33de042dSApple OSS Distributions * Returns: OVERLAP_NONE
1250*33de042dSApple OSS Distributions * OVERLAP_EQUALS_LOCK
1251*33de042dSApple OSS Distributions * OVERLAP_CONTAINS_LOCK
1252*33de042dSApple OSS Distributions * OVERLAP_CONTAINED_BY_LOCK
1253*33de042dSApple OSS Distributions * OVERLAP_STARTS_BEFORE_LOCK
1254*33de042dSApple OSS Distributions * OVERLAP_ENDS_AFTER_LOCK
1255*33de042dSApple OSS Distributions *
1256*33de042dSApple OSS Distributions * Implicit Returns:
1257*33de042dSApple OSS Distributions * *prev The address of the next pointer in the
1258*33de042dSApple OSS Distributions * lock previous to the overlapping lock;
1259*33de042dSApple OSS Distributions * this is generally used to relink the
1260*33de042dSApple OSS Distributions * lock list, avoiding a second iteration.
1261*33de042dSApple OSS Distributions * *overlap The pointer to the overlapping lock
1262*33de042dSApple OSS Distributions * itself; this is used to return data in
1263*33de042dSApple OSS Distributions * the check == OTHERS case, and for the
1264*33de042dSApple OSS Distributions * caller to modify the overlapping lock,
1265*33de042dSApple OSS Distributions * in the check == SELF case
1266*33de042dSApple OSS Distributions *
1267*33de042dSApple OSS Distributions * Note: This returns only the FIRST overlapping lock. There may be
1268*33de042dSApple OSS Distributions * more than one. lf_getlock will return the first blocking lock,
1269*33de042dSApple OSS Distributions * while lf_setlock will iterate over all overlapping locks to
1270*33de042dSApple OSS Distributions *
1271*33de042dSApple OSS Distributions * The check parameter can be SELF, meaning we are looking for
1272*33de042dSApple OSS Distributions * overlapping locks owned by us, or it can be OTHERS, meaning
1273*33de042dSApple OSS Distributions * we are looking for overlapping locks owned by someone else so
1274*33de042dSApple OSS Distributions * we can report a blocking lock on an F_GETLK request.
1275*33de042dSApple OSS Distributions *
1276*33de042dSApple OSS Distributions * The value of *overlap and *prev are modified, even if there is
1277*33de042dSApple OSS Distributions * no overlapping lock found; always check the return code.
1278*33de042dSApple OSS Distributions */
1279*33de042dSApple OSS Distributions static overlap_t
lf_findoverlap(struct lockf * lf,struct lockf * lock,int type,struct lockf *** prev,struct lockf ** overlap)1280*33de042dSApple OSS Distributions lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
1281*33de042dSApple OSS Distributions struct lockf ***prev, struct lockf **overlap)
1282*33de042dSApple OSS Distributions {
1283*33de042dSApple OSS Distributions int found_self = 0;
1284*33de042dSApple OSS Distributions
1285*33de042dSApple OSS Distributions *overlap = lf;
1286*33de042dSApple OSS Distributions if (lf == NOLOCKF) {
1287*33de042dSApple OSS Distributions return 0;
1288*33de042dSApple OSS Distributions }
1289*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1290*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1291*33de042dSApple OSS Distributions lf_print("lf_findoverlap: looking for overlap in", lock);
1292*33de042dSApple OSS Distributions }
1293*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1294*33de042dSApple OSS Distributions const off_t start = lock->lf_start;
1295*33de042dSApple OSS Distributions const off_t end = LF_END(lock);
1296*33de042dSApple OSS Distributions while (lf != NOLOCKF) {
1297*33de042dSApple OSS Distributions if (((type & SELF) && lf->lf_id != lock->lf_id) ||
1298*33de042dSApple OSS Distributions ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
1299*33de042dSApple OSS Distributions /*
1300*33de042dSApple OSS Distributions * Locks belonging to one process are adjacent on the
1301*33de042dSApple OSS Distributions * list, so if we've found any locks belonging to us,
1302*33de042dSApple OSS Distributions * and we're now seeing something else, then we've
1303*33de042dSApple OSS Distributions * examined all "self" locks. Note that bailing out
1304*33de042dSApple OSS Distributions * here is quite important; for coalescing, we assume
1305*33de042dSApple OSS Distributions * numerically adjacent locks from the same owner to
1306*33de042dSApple OSS Distributions * be adjacent on the list.
1307*33de042dSApple OSS Distributions */
1308*33de042dSApple OSS Distributions if ((type & SELF) && found_self) {
1309*33de042dSApple OSS Distributions return OVERLAP_NONE;
1310*33de042dSApple OSS Distributions }
1311*33de042dSApple OSS Distributions
1312*33de042dSApple OSS Distributions *prev = &lf->lf_next;
1313*33de042dSApple OSS Distributions *overlap = lf = lf->lf_next;
1314*33de042dSApple OSS Distributions continue;
1315*33de042dSApple OSS Distributions }
1316*33de042dSApple OSS Distributions
1317*33de042dSApple OSS Distributions if ((type & SELF)) {
1318*33de042dSApple OSS Distributions found_self = 1;
1319*33de042dSApple OSS Distributions }
1320*33de042dSApple OSS Distributions
1321*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1322*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1323*33de042dSApple OSS Distributions lf_print("\tchecking", lf);
1324*33de042dSApple OSS Distributions }
1325*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1326*33de042dSApple OSS Distributions /*
1327*33de042dSApple OSS Distributions * OK, check for overlap
1328*33de042dSApple OSS Distributions */
1329*33de042dSApple OSS Distributions const off_t lfstart = lf->lf_start;
1330*33de042dSApple OSS Distributions const off_t lfend = LF_END(lf);
1331*33de042dSApple OSS Distributions
1332*33de042dSApple OSS Distributions if ((start > lfend) || (lfstart > end)) {
1333*33de042dSApple OSS Distributions /* Case 0 */
1334*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_LIST, "no overlap\n");
1335*33de042dSApple OSS Distributions
1336*33de042dSApple OSS Distributions /*
1337*33de042dSApple OSS Distributions * NOTE: assumes that locks for the same process are
1338*33de042dSApple OSS Distributions * nonintersecting and ordered.
1339*33de042dSApple OSS Distributions */
1340*33de042dSApple OSS Distributions if ((type & SELF) && lfstart > end) {
1341*33de042dSApple OSS Distributions return OVERLAP_NONE;
1342*33de042dSApple OSS Distributions }
1343*33de042dSApple OSS Distributions *prev = &lf->lf_next;
1344*33de042dSApple OSS Distributions *overlap = lf = lf->lf_next;
1345*33de042dSApple OSS Distributions continue;
1346*33de042dSApple OSS Distributions }
1347*33de042dSApple OSS Distributions if ((lfstart == start) && (lfend == end)) {
1348*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_LIST, "overlap == lock\n");
1349*33de042dSApple OSS Distributions return OVERLAP_EQUALS_LOCK;
1350*33de042dSApple OSS Distributions }
1351*33de042dSApple OSS Distributions if ((lfstart <= start) && (lfend >= end)) {
1352*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_LIST, "overlap contains lock\n");
1353*33de042dSApple OSS Distributions return OVERLAP_CONTAINS_LOCK;
1354*33de042dSApple OSS Distributions }
1355*33de042dSApple OSS Distributions if ((start <= lfstart) && (end >= lfend)) {
1356*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_LIST, "lock contains overlap\n");
1357*33de042dSApple OSS Distributions return OVERLAP_CONTAINED_BY_LOCK;
1358*33de042dSApple OSS Distributions }
1359*33de042dSApple OSS Distributions if ((lfstart < start) && (lfend >= start)) {
1360*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_LIST, "overlap starts before lock\n");
1361*33de042dSApple OSS Distributions return OVERLAP_STARTS_BEFORE_LOCK;
1362*33de042dSApple OSS Distributions }
1363*33de042dSApple OSS Distributions if ((lfstart > start) && (lfend > end)) {
1364*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_LIST, "overlap ends after lock\n");
1365*33de042dSApple OSS Distributions return OVERLAP_ENDS_AFTER_LOCK;
1366*33de042dSApple OSS Distributions }
1367*33de042dSApple OSS Distributions panic("lf_findoverlap: default");
1368*33de042dSApple OSS Distributions }
1369*33de042dSApple OSS Distributions return OVERLAP_NONE;
1370*33de042dSApple OSS Distributions }
1371*33de042dSApple OSS Distributions
1372*33de042dSApple OSS Distributions
1373*33de042dSApple OSS Distributions /*
1374*33de042dSApple OSS Distributions * lf_split
1375*33de042dSApple OSS Distributions *
1376*33de042dSApple OSS Distributions * Description: Split a lock and a contained region into two or three locks
1377*33de042dSApple OSS Distributions * as necessary.
1378*33de042dSApple OSS Distributions *
1379*33de042dSApple OSS Distributions * Parameters: lock1 Lock to split
1380*33de042dSApple OSS Distributions * lock2 Overlapping lock region requiring the
1381*33de042dSApple OSS Distributions * split (upgrade/downgrade/unlock)
1382*33de042dSApple OSS Distributions *
1383*33de042dSApple OSS Distributions * Returns: 0 Success
1384*33de042dSApple OSS Distributions * ENOLCK No memory for new lock
1385*33de042dSApple OSS Distributions *
1386*33de042dSApple OSS Distributions * Implicit Returns:
1387*33de042dSApple OSS Distributions * *lock1 Modified original lock
1388*33de042dSApple OSS Distributions * *lock2 Overlapping lock (inserted into list)
1389*33de042dSApple OSS Distributions * (new lock) Potential new lock inserted into list
1390*33de042dSApple OSS Distributions * if split results in 3 locks
1391*33de042dSApple OSS Distributions *
1392*33de042dSApple OSS Distributions * Notes: This operation can only fail if the split would result in three
1393*33de042dSApple OSS Distributions * locks, and there is insufficient memory to allocate the third
1394*33de042dSApple OSS Distributions * lock; in that case, neither of the locks will be modified.
1395*33de042dSApple OSS Distributions */
1396*33de042dSApple OSS Distributions static int
lf_split(struct lockf * lock1,struct lockf * lock2)1397*33de042dSApple OSS Distributions lf_split(struct lockf *lock1, struct lockf *lock2)
1398*33de042dSApple OSS Distributions {
1399*33de042dSApple OSS Distributions struct lockf *splitlock;
1400*33de042dSApple OSS Distributions
1401*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1402*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1403*33de042dSApple OSS Distributions lf_print("lf_split", lock1);
1404*33de042dSApple OSS Distributions lf_print("splitting from", lock2);
1405*33de042dSApple OSS Distributions }
1406*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1407*33de042dSApple OSS Distributions /*
1408*33de042dSApple OSS Distributions * Check to see if splitting into only two pieces.
1409*33de042dSApple OSS Distributions */
1410*33de042dSApple OSS Distributions if (lock1->lf_start == lock2->lf_start) {
1411*33de042dSApple OSS Distributions assert(LF_END(lock2) < OFF_MAX);
1412*33de042dSApple OSS Distributions lock1->lf_start = LF_END(lock2) + 1;
1413*33de042dSApple OSS Distributions lock2->lf_next = lock1;
1414*33de042dSApple OSS Distributions return 0;
1415*33de042dSApple OSS Distributions }
1416*33de042dSApple OSS Distributions if (LF_END(lock1) == LF_END(lock2)) {
1417*33de042dSApple OSS Distributions assert(lock2->lf_start > 0);
1418*33de042dSApple OSS Distributions lock1->lf_end = lock2->lf_start - 1;
1419*33de042dSApple OSS Distributions lock2->lf_next = lock1->lf_next;
1420*33de042dSApple OSS Distributions lock1->lf_next = lock2;
1421*33de042dSApple OSS Distributions return 0;
1422*33de042dSApple OSS Distributions }
1423*33de042dSApple OSS Distributions /*
1424*33de042dSApple OSS Distributions * Make a new lock consisting of the last part of
1425*33de042dSApple OSS Distributions * the encompassing lock
1426*33de042dSApple OSS Distributions */
1427*33de042dSApple OSS Distributions splitlock = zalloc_flags(KT_LOCKF, Z_WAITOK | Z_NOFAIL);
1428*33de042dSApple OSS Distributions bcopy(lock1, splitlock, sizeof *splitlock);
1429*33de042dSApple OSS Distributions assert(LF_END(lock2) < OFF_MAX);
1430*33de042dSApple OSS Distributions splitlock->lf_start = LF_END(lock2) + 1;
1431*33de042dSApple OSS Distributions TAILQ_INIT(&splitlock->lf_blkhd);
1432*33de042dSApple OSS Distributions assert(lock2->lf_start > 0);
1433*33de042dSApple OSS Distributions lock1->lf_end = lock2->lf_start - 1;
1434*33de042dSApple OSS Distributions /*
1435*33de042dSApple OSS Distributions * OK, now link it in
1436*33de042dSApple OSS Distributions */
1437*33de042dSApple OSS Distributions splitlock->lf_next = lock1->lf_next;
1438*33de042dSApple OSS Distributions lock2->lf_next = splitlock;
1439*33de042dSApple OSS Distributions lock1->lf_next = lock2;
1440*33de042dSApple OSS Distributions
1441*33de042dSApple OSS Distributions return 0;
1442*33de042dSApple OSS Distributions }
1443*33de042dSApple OSS Distributions
1444*33de042dSApple OSS Distributions
1445*33de042dSApple OSS Distributions /*
1446*33de042dSApple OSS Distributions * lf_wakelock
1447*33de042dSApple OSS Distributions *
1448*33de042dSApple OSS Distributions * Wakeup a blocklist in the case of a downgrade or unlock, since others
1449*33de042dSApple OSS Distributions * waiting on the lock may now be able to acquire it.
1450*33de042dSApple OSS Distributions *
1451*33de042dSApple OSS Distributions * Parameters: listhead Lock list head on which waiters may
1452*33de042dSApple OSS Distributions * have pending locks
1453*33de042dSApple OSS Distributions *
1454*33de042dSApple OSS Distributions * Returns: <void>
1455*33de042dSApple OSS Distributions *
1456*33de042dSApple OSS Distributions * Notes: This function iterates a list of locks and wakes all waiters,
1457*33de042dSApple OSS Distributions * rather than only waiters for the contended regions. Because
1458*33de042dSApple OSS Distributions * of this, for heavily contended files, this can result in a
1459*33de042dSApple OSS Distributions * "thundering herd" situation. Refactoring the code could make
1460*33de042dSApple OSS Distributions * this operation more efficient, if heavy contention ever results
1461*33de042dSApple OSS Distributions * in a real-world performance problem.
1462*33de042dSApple OSS Distributions */
1463*33de042dSApple OSS Distributions static void
lf_wakelock(struct lockf * listhead,boolean_t force_all)1464*33de042dSApple OSS Distributions lf_wakelock(struct lockf *listhead, boolean_t force_all)
1465*33de042dSApple OSS Distributions {
1466*33de042dSApple OSS Distributions struct lockf *wakelock;
1467*33de042dSApple OSS Distributions boolean_t wake_all = TRUE;
1468*33de042dSApple OSS Distributions
1469*33de042dSApple OSS Distributions if (force_all == FALSE && (listhead->lf_flags & F_WAKE1_SAFE)) {
1470*33de042dSApple OSS Distributions wake_all = FALSE;
1471*33de042dSApple OSS Distributions }
1472*33de042dSApple OSS Distributions
1473*33de042dSApple OSS Distributions while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1474*33de042dSApple OSS Distributions wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
1475*33de042dSApple OSS Distributions TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
1476*33de042dSApple OSS Distributions
1477*33de042dSApple OSS Distributions wakelock->lf_next = NOLOCKF;
1478*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1479*33de042dSApple OSS Distributions if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1480*33de042dSApple OSS Distributions lf_print("lf_wakelock: awakening", wakelock);
1481*33de042dSApple OSS Distributions }
1482*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1483*33de042dSApple OSS Distributions if (wake_all == FALSE) {
1484*33de042dSApple OSS Distributions /*
1485*33de042dSApple OSS Distributions * If there are items on the list head block list,
1486*33de042dSApple OSS Distributions * move them to the wakelock list instead, and then
1487*33de042dSApple OSS Distributions * correct their lf_next pointers.
1488*33de042dSApple OSS Distributions */
1489*33de042dSApple OSS Distributions if (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1490*33de042dSApple OSS Distributions TAILQ_CONCAT(&wakelock->lf_blkhd, &listhead->lf_blkhd, lf_block);
1491*33de042dSApple OSS Distributions
1492*33de042dSApple OSS Distributions struct lockf *tlock;
1493*33de042dSApple OSS Distributions
1494*33de042dSApple OSS Distributions TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
1495*33de042dSApple OSS Distributions if (TAILQ_NEXT(tlock, lf_block) == tlock) {
1496*33de042dSApple OSS Distributions /* See rdar://10887303 */
1497*33de042dSApple OSS Distributions panic("cycle in wakelock list");
1498*33de042dSApple OSS Distributions }
1499*33de042dSApple OSS Distributions tlock->lf_next = wakelock;
1500*33de042dSApple OSS Distributions }
1501*33de042dSApple OSS Distributions }
1502*33de042dSApple OSS Distributions }
1503*33de042dSApple OSS Distributions wakeup(wakelock);
1504*33de042dSApple OSS Distributions
1505*33de042dSApple OSS Distributions if (wake_all == FALSE) {
1506*33de042dSApple OSS Distributions break;
1507*33de042dSApple OSS Distributions }
1508*33de042dSApple OSS Distributions }
1509*33de042dSApple OSS Distributions }
1510*33de042dSApple OSS Distributions
1511*33de042dSApple OSS Distributions
1512*33de042dSApple OSS Distributions #ifdef LOCKF_DEBUGGING
1513*33de042dSApple OSS Distributions #define GET_LF_OWNER_PID(lf) (proc_pid((lf)->lf_owner))
1514*33de042dSApple OSS Distributions
1515*33de042dSApple OSS Distributions /*
1516*33de042dSApple OSS Distributions * lf_print DEBUG
1517*33de042dSApple OSS Distributions *
1518*33de042dSApple OSS Distributions * Print out a lock; lock information is prefixed by the string in 'tag'
1519*33de042dSApple OSS Distributions *
1520*33de042dSApple OSS Distributions * Parameters: tag A string tag for debugging
1521*33de042dSApple OSS Distributions * lock The lock whose information should be
1522*33de042dSApple OSS Distributions * displayed
1523*33de042dSApple OSS Distributions *
1524*33de042dSApple OSS Distributions * Returns: <void>
1525*33de042dSApple OSS Distributions */
1526*33de042dSApple OSS Distributions void
lf_print(const char * tag,struct lockf * lock)1527*33de042dSApple OSS Distributions lf_print(const char *tag, struct lockf *lock)
1528*33de042dSApple OSS Distributions {
1529*33de042dSApple OSS Distributions printf("%s: lock %p for ", tag, (void *)lock);
1530*33de042dSApple OSS Distributions if (lock->lf_flags & F_POSIX) {
1531*33de042dSApple OSS Distributions printf("proc %p (owner %d)",
1532*33de042dSApple OSS Distributions lock->lf_id, GET_LF_OWNER_PID(lock));
1533*33de042dSApple OSS Distributions } else if (lock->lf_flags & F_OFD_LOCK) {
1534*33de042dSApple OSS Distributions printf("fg %p (owner %d)",
1535*33de042dSApple OSS Distributions lock->lf_id, GET_LF_OWNER_PID(lock));
1536*33de042dSApple OSS Distributions } else {
1537*33de042dSApple OSS Distributions printf("id %p", (void *)lock->lf_id);
1538*33de042dSApple OSS Distributions }
1539*33de042dSApple OSS Distributions if (lock->lf_vnode != 0) {
1540*33de042dSApple OSS Distributions printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx",
1541*33de042dSApple OSS Distributions lock->lf_vnode,
1542*33de042dSApple OSS Distributions lock->lf_type == F_RDLCK ? "shared" :
1543*33de042dSApple OSS Distributions lock->lf_type == F_WRLCK ? "exclusive" :
1544*33de042dSApple OSS Distributions lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1545*33de042dSApple OSS Distributions (uint64_t)lock->lf_start, (uint64_t)lock->lf_end);
1546*33de042dSApple OSS Distributions } else {
1547*33de042dSApple OSS Distributions printf(" %s, start 0x%016llx, end 0x%016llx",
1548*33de042dSApple OSS Distributions lock->lf_type == F_RDLCK ? "shared" :
1549*33de042dSApple OSS Distributions lock->lf_type == F_WRLCK ? "exclusive" :
1550*33de042dSApple OSS Distributions lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1551*33de042dSApple OSS Distributions (uint64_t)lock->lf_start, (uint64_t)lock->lf_end);
1552*33de042dSApple OSS Distributions }
1553*33de042dSApple OSS Distributions if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
1554*33de042dSApple OSS Distributions printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
1555*33de042dSApple OSS Distributions } else {
1556*33de042dSApple OSS Distributions printf("\n");
1557*33de042dSApple OSS Distributions }
1558*33de042dSApple OSS Distributions }
1559*33de042dSApple OSS Distributions
1560*33de042dSApple OSS Distributions
1561*33de042dSApple OSS Distributions /*
1562*33de042dSApple OSS Distributions * lf_printlist DEBUG
1563*33de042dSApple OSS Distributions *
1564*33de042dSApple OSS Distributions * Print out a lock list for the vnode associated with 'lock'; lock information
1565*33de042dSApple OSS Distributions * is prefixed by the string in 'tag'
1566*33de042dSApple OSS Distributions *
1567*33de042dSApple OSS Distributions * Parameters: tag A string tag for debugging
1568*33de042dSApple OSS Distributions * lock The lock whose vnode's lock list should
1569*33de042dSApple OSS Distributions * be displayed
1570*33de042dSApple OSS Distributions *
1571*33de042dSApple OSS Distributions * Returns: <void>
1572*33de042dSApple OSS Distributions */
1573*33de042dSApple OSS Distributions void
lf_printlist(const char * tag,struct lockf * lock)1574*33de042dSApple OSS Distributions lf_printlist(const char *tag, struct lockf *lock)
1575*33de042dSApple OSS Distributions {
1576*33de042dSApple OSS Distributions struct lockf *lf, *blk;
1577*33de042dSApple OSS Distributions
1578*33de042dSApple OSS Distributions if (lock->lf_vnode == 0) {
1579*33de042dSApple OSS Distributions return;
1580*33de042dSApple OSS Distributions }
1581*33de042dSApple OSS Distributions
1582*33de042dSApple OSS Distributions printf("%s: Lock list for vno %p:\n",
1583*33de042dSApple OSS Distributions tag, lock->lf_vnode);
1584*33de042dSApple OSS Distributions for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) {
1585*33de042dSApple OSS Distributions printf("\tlock %p for ", (void *)lf);
1586*33de042dSApple OSS Distributions if (lf->lf_flags & F_POSIX) {
1587*33de042dSApple OSS Distributions printf("proc %p (owner %d)",
1588*33de042dSApple OSS Distributions lf->lf_id, GET_LF_OWNER_PID(lf));
1589*33de042dSApple OSS Distributions } else if (lf->lf_flags & F_OFD_LOCK) {
1590*33de042dSApple OSS Distributions printf("fg %p (owner %d)",
1591*33de042dSApple OSS Distributions lf->lf_id, GET_LF_OWNER_PID(lf));
1592*33de042dSApple OSS Distributions } else {
1593*33de042dSApple OSS Distributions printf("id %p", (void *)lf->lf_id);
1594*33de042dSApple OSS Distributions }
1595*33de042dSApple OSS Distributions printf(", %s, start 0x%016llx, end 0x%016llx",
1596*33de042dSApple OSS Distributions lf->lf_type == F_RDLCK ? "shared" :
1597*33de042dSApple OSS Distributions lf->lf_type == F_WRLCK ? "exclusive" :
1598*33de042dSApple OSS Distributions lf->lf_type == F_UNLCK ? "unlock" :
1599*33de042dSApple OSS Distributions "unknown", (uint64_t)lf->lf_start, (uint64_t)lf->lf_end);
1600*33de042dSApple OSS Distributions TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
1601*33de042dSApple OSS Distributions printf("\n\t\tlock request %p for ", (void *)blk);
1602*33de042dSApple OSS Distributions if (blk->lf_flags & F_POSIX) {
1603*33de042dSApple OSS Distributions printf("proc %p (owner %d)",
1604*33de042dSApple OSS Distributions blk->lf_id, GET_LF_OWNER_PID(blk));
1605*33de042dSApple OSS Distributions } else if (blk->lf_flags & F_OFD_LOCK) {
1606*33de042dSApple OSS Distributions printf("fg %p (owner %d)",
1607*33de042dSApple OSS Distributions blk->lf_id, GET_LF_OWNER_PID(blk));
1608*33de042dSApple OSS Distributions } else {
1609*33de042dSApple OSS Distributions printf("id %p", (void *)blk->lf_id);
1610*33de042dSApple OSS Distributions }
1611*33de042dSApple OSS Distributions printf(", %s, start 0x%016llx, end 0x%016llx",
1612*33de042dSApple OSS Distributions blk->lf_type == F_RDLCK ? "shared" :
1613*33de042dSApple OSS Distributions blk->lf_type == F_WRLCK ? "exclusive" :
1614*33de042dSApple OSS Distributions blk->lf_type == F_UNLCK ? "unlock" :
1615*33de042dSApple OSS Distributions "unknown", (uint64_t)blk->lf_start,
1616*33de042dSApple OSS Distributions (uint64_t)blk->lf_end);
1617*33de042dSApple OSS Distributions if (!TAILQ_EMPTY(&blk->lf_blkhd)) {
1618*33de042dSApple OSS Distributions panic("lf_printlist: bad list");
1619*33de042dSApple OSS Distributions }
1620*33de042dSApple OSS Distributions }
1621*33de042dSApple OSS Distributions printf("\n");
1622*33de042dSApple OSS Distributions }
1623*33de042dSApple OSS Distributions }
1624*33de042dSApple OSS Distributions #endif /* LOCKF_DEBUGGING */
1625*33de042dSApple OSS Distributions
1626*33de042dSApple OSS Distributions #if IMPORTANCE_INHERITANCE
1627*33de042dSApple OSS Distributions
1628*33de042dSApple OSS Distributions /*
1629*33de042dSApple OSS Distributions * lf_hold_assertion
1630*33de042dSApple OSS Distributions *
1631*33de042dSApple OSS Distributions * Call task importance hold assertion on the owner of the lock.
1632*33de042dSApple OSS Distributions *
1633*33de042dSApple OSS Distributions * Parameters: block_task Owner of the lock blocking
1634*33de042dSApple OSS Distributions * current thread.
1635*33de042dSApple OSS Distributions *
1636*33de042dSApple OSS Distributions * block lock on which the current thread
1637*33de042dSApple OSS Distributions * is blocking on.
1638*33de042dSApple OSS Distributions *
1639*33de042dSApple OSS Distributions * Returns: <void>
1640*33de042dSApple OSS Distributions *
1641*33de042dSApple OSS Distributions * Notes: The task reference on block_task is not needed to be hold since
1642*33de042dSApple OSS Distributions * the current thread has vnode lock and block_task has a file
1643*33de042dSApple OSS Distributions * lock, thus removing file lock in exit requires block_task to
1644*33de042dSApple OSS Distributions * grab the vnode lock.
1645*33de042dSApple OSS Distributions */
1646*33de042dSApple OSS Distributions static void
lf_hold_assertion(task_t block_task,struct lockf * block)1647*33de042dSApple OSS Distributions lf_hold_assertion(task_t block_task, struct lockf *block)
1648*33de042dSApple OSS Distributions {
1649*33de042dSApple OSS Distributions if (task_importance_hold_file_lock_assertion(block_task, 1) == 0) {
1650*33de042dSApple OSS Distributions block->lf_boosted = LF_BOOSTED;
1651*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_IMPINH,
1652*33de042dSApple OSS Distributions "lf: importance hold file lock assert on pid %d lock %p\n",
1653*33de042dSApple OSS Distributions proc_pid(block->lf_owner), block);
1654*33de042dSApple OSS Distributions }
1655*33de042dSApple OSS Distributions }
1656*33de042dSApple OSS Distributions
1657*33de042dSApple OSS Distributions
1658*33de042dSApple OSS Distributions /*
1659*33de042dSApple OSS Distributions * lf_jump_to_queue_head
1660*33de042dSApple OSS Distributions *
1661*33de042dSApple OSS Distributions * Jump the lock from the tail of the block queue to the head of
1662*33de042dSApple OSS Distributions * the queue.
1663*33de042dSApple OSS Distributions *
1664*33de042dSApple OSS Distributions * Parameters: block lockf struct containing the
1665*33de042dSApple OSS Distributions * block queue.
1666*33de042dSApple OSS Distributions * lock lockf struct to be jumped to the
1667*33de042dSApple OSS Distributions * front.
1668*33de042dSApple OSS Distributions *
1669*33de042dSApple OSS Distributions * Returns: <void>
1670*33de042dSApple OSS Distributions */
1671*33de042dSApple OSS Distributions static void
lf_jump_to_queue_head(struct lockf * block,struct lockf * lock)1672*33de042dSApple OSS Distributions lf_jump_to_queue_head(struct lockf *block, struct lockf *lock)
1673*33de042dSApple OSS Distributions {
1674*33de042dSApple OSS Distributions /* Move the lock to the head of the block queue. */
1675*33de042dSApple OSS Distributions TAILQ_REMOVE(&block->lf_blkhd, lock, lf_block);
1676*33de042dSApple OSS Distributions TAILQ_INSERT_HEAD(&block->lf_blkhd, lock, lf_block);
1677*33de042dSApple OSS Distributions }
1678*33de042dSApple OSS Distributions
1679*33de042dSApple OSS Distributions
1680*33de042dSApple OSS Distributions /*
1681*33de042dSApple OSS Distributions * lf_drop_assertion
1682*33de042dSApple OSS Distributions *
1683*33de042dSApple OSS Distributions * Drops the task hold assertion.
1684*33de042dSApple OSS Distributions *
1685*33de042dSApple OSS Distributions * Parameters: block lockf struct holding the assertion.
1686*33de042dSApple OSS Distributions *
1687*33de042dSApple OSS Distributions * Returns: <void>
1688*33de042dSApple OSS Distributions */
1689*33de042dSApple OSS Distributions static void
lf_drop_assertion(struct lockf * block)1690*33de042dSApple OSS Distributions lf_drop_assertion(struct lockf *block)
1691*33de042dSApple OSS Distributions {
1692*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_IMPINH, "lf: %d: dropping assertion for lock %p\n",
1693*33de042dSApple OSS Distributions proc_pid(block->lf_owner), block);
1694*33de042dSApple OSS Distributions
1695*33de042dSApple OSS Distributions task_t current_task = proc_task(block->lf_owner);
1696*33de042dSApple OSS Distributions task_importance_drop_file_lock_assertion(current_task, 1);
1697*33de042dSApple OSS Distributions block->lf_boosted = LF_NOT_BOOSTED;
1698*33de042dSApple OSS Distributions }
1699*33de042dSApple OSS Distributions
1700*33de042dSApple OSS Distributions /*
1701*33de042dSApple OSS Distributions * lf_adjust_assertion
1702*33de042dSApple OSS Distributions *
1703*33de042dSApple OSS Distributions * Adjusts importance assertion of file lock. Goes through
1704*33de042dSApple OSS Distributions * all the blocking locks and checks if the file lock needs
1705*33de042dSApple OSS Distributions * to be boosted anymore.
1706*33de042dSApple OSS Distributions *
1707*33de042dSApple OSS Distributions * Parameters: block lockf structure which needs to be adjusted.
1708*33de042dSApple OSS Distributions *
1709*33de042dSApple OSS Distributions * Returns: <void>
1710*33de042dSApple OSS Distributions */
1711*33de042dSApple OSS Distributions static void
lf_adjust_assertion(struct lockf * block)1712*33de042dSApple OSS Distributions lf_adjust_assertion(struct lockf *block)
1713*33de042dSApple OSS Distributions {
1714*33de042dSApple OSS Distributions boolean_t drop_boost = TRUE;
1715*33de042dSApple OSS Distributions struct lockf *next;
1716*33de042dSApple OSS Distributions
1717*33de042dSApple OSS Distributions /* Return if the lock is not boosted */
1718*33de042dSApple OSS Distributions if (block->lf_boosted == LF_NOT_BOOSTED) {
1719*33de042dSApple OSS Distributions return;
1720*33de042dSApple OSS Distributions }
1721*33de042dSApple OSS Distributions
1722*33de042dSApple OSS Distributions TAILQ_FOREACH(next, &block->lf_blkhd, lf_block) {
1723*33de042dSApple OSS Distributions /* Check if block and next are same type of locks */
1724*33de042dSApple OSS Distributions if (((block->lf_flags & next->lf_flags & F_POSIX) != 0) ||
1725*33de042dSApple OSS Distributions ((block->lf_flags & next->lf_flags & F_OFD_LOCK) &&
1726*33de042dSApple OSS Distributions (block->lf_owner != next->lf_owner) &&
1727*33de042dSApple OSS Distributions (NULL != block->lf_owner && NULL != next->lf_owner))) {
1728*33de042dSApple OSS Distributions /* Check if next would be boosting block */
1729*33de042dSApple OSS Distributions if (task_is_importance_donor(proc_task(next->lf_owner)) &&
1730*33de042dSApple OSS Distributions task_is_importance_receiver_type(proc_task(block->lf_owner))) {
1731*33de042dSApple OSS Distributions /* Found a lock boosting block */
1732*33de042dSApple OSS Distributions drop_boost = FALSE;
1733*33de042dSApple OSS Distributions break;
1734*33de042dSApple OSS Distributions }
1735*33de042dSApple OSS Distributions }
1736*33de042dSApple OSS Distributions }
1737*33de042dSApple OSS Distributions
1738*33de042dSApple OSS Distributions if (drop_boost) {
1739*33de042dSApple OSS Distributions lf_drop_assertion(block);
1740*33de042dSApple OSS Distributions }
1741*33de042dSApple OSS Distributions }
1742*33de042dSApple OSS Distributions
1743*33de042dSApple OSS Distributions static void
lf_boost_blocking_proc(struct lockf * lock,struct lockf * block)1744*33de042dSApple OSS Distributions lf_boost_blocking_proc(struct lockf *lock, struct lockf *block)
1745*33de042dSApple OSS Distributions {
1746*33de042dSApple OSS Distributions task_t ltask = proc_task(lock->lf_owner);
1747*33de042dSApple OSS Distributions task_t btask = proc_task(block->lf_owner);
1748*33de042dSApple OSS Distributions
1749*33de042dSApple OSS Distributions /*
1750*33de042dSApple OSS Distributions * Check if ltask can donate importance. The
1751*33de042dSApple OSS Distributions * check of imp_donor bit is done without holding
1752*33de042dSApple OSS Distributions * any lock. The value may change after you read it,
1753*33de042dSApple OSS Distributions * but it is ok to boost a task while someone else is
1754*33de042dSApple OSS Distributions * unboosting you.
1755*33de042dSApple OSS Distributions *
1756*33de042dSApple OSS Distributions * TODO: Support live inheritance on file locks.
1757*33de042dSApple OSS Distributions */
1758*33de042dSApple OSS Distributions if (task_is_importance_donor(ltask)) {
1759*33de042dSApple OSS Distributions LOCKF_DEBUG(LF_DBG_IMPINH,
1760*33de042dSApple OSS Distributions "lf: %d: attempt to boost pid %d that holds lock %p\n",
1761*33de042dSApple OSS Distributions proc_pid(lock->lf_owner), proc_pid(block->lf_owner), block);
1762*33de042dSApple OSS Distributions
1763*33de042dSApple OSS Distributions if (block->lf_boosted != LF_BOOSTED &&
1764*33de042dSApple OSS Distributions task_is_importance_receiver_type(btask)) {
1765*33de042dSApple OSS Distributions lf_hold_assertion(btask, block);
1766*33de042dSApple OSS Distributions }
1767*33de042dSApple OSS Distributions lf_jump_to_queue_head(block, lock);
1768*33de042dSApple OSS Distributions }
1769*33de042dSApple OSS Distributions }
1770*33de042dSApple OSS Distributions #endif /* IMPORTANCE_INHERITANCE */
1771