1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_btree.h"
34 #include "xfs_alloc.h"
35 #include "xfs_error.h"
36 #include "xfs_trace.h"
37 
38 
39 #define XFS_ABSDIFF(a,b)	(((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
40 
41 #define	XFSA_FIXUP_BNO_OK	1
42 #define	XFSA_FIXUP_CNT_OK	2
43 
44 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
45 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
46 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
47 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
48 		xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
49 STATIC void xfs_alloc_busy_trim(struct xfs_alloc_arg *,
50 		xfs_agblock_t, xfs_extlen_t, xfs_agblock_t *, xfs_extlen_t *);
51 
52 /*
53  * Lookup the record equal to [bno, len] in the btree given by cur.
54  */
55 STATIC int				/* error */
xfs_alloc_lookup_eq(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)56 xfs_alloc_lookup_eq(
57 	struct xfs_btree_cur	*cur,	/* btree cursor */
58 	xfs_agblock_t		bno,	/* starting block of extent */
59 	xfs_extlen_t		len,	/* length of extent */
60 	int			*stat)	/* success/failure */
61 {
62 	cur->bc_rec.a.ar_startblock = bno;
63 	cur->bc_rec.a.ar_blockcount = len;
64 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
65 }
66 
67 /*
68  * Lookup the first record greater than or equal to [bno, len]
69  * in the btree given by cur.
70  */
71 STATIC int				/* error */
xfs_alloc_lookup_ge(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)72 xfs_alloc_lookup_ge(
73 	struct xfs_btree_cur	*cur,	/* btree cursor */
74 	xfs_agblock_t		bno,	/* starting block of extent */
75 	xfs_extlen_t		len,	/* length of extent */
76 	int			*stat)	/* success/failure */
77 {
78 	cur->bc_rec.a.ar_startblock = bno;
79 	cur->bc_rec.a.ar_blockcount = len;
80 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
81 }
82 
83 /*
84  * Lookup the first record less than or equal to [bno, len]
85  * in the btree given by cur.
86  */
87 int					/* error */
xfs_alloc_lookup_le(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)88 xfs_alloc_lookup_le(
89 	struct xfs_btree_cur	*cur,	/* btree cursor */
90 	xfs_agblock_t		bno,	/* starting block of extent */
91 	xfs_extlen_t		len,	/* length of extent */
92 	int			*stat)	/* success/failure */
93 {
94 	cur->bc_rec.a.ar_startblock = bno;
95 	cur->bc_rec.a.ar_blockcount = len;
96 	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
97 }
98 
99 /*
100  * Update the record referred to by cur to the value given
101  * by [bno, len].
102  * This either works (return 0) or gets an EFSCORRUPTED error.
103  */
104 STATIC int				/* error */
xfs_alloc_update(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len)105 xfs_alloc_update(
106 	struct xfs_btree_cur	*cur,	/* btree cursor */
107 	xfs_agblock_t		bno,	/* starting block of extent */
108 	xfs_extlen_t		len)	/* length of extent */
109 {
110 	union xfs_btree_rec	rec;
111 
112 	rec.alloc.ar_startblock = cpu_to_be32(bno);
113 	rec.alloc.ar_blockcount = cpu_to_be32(len);
114 	return xfs_btree_update(cur, &rec);
115 }
116 
117 /*
118  * Get the data from the pointed-to record.
119  */
120 int					/* error */
xfs_alloc_get_rec(struct xfs_btree_cur * cur,xfs_agblock_t * bno,xfs_extlen_t * len,int * stat)121 xfs_alloc_get_rec(
122 	struct xfs_btree_cur	*cur,	/* btree cursor */
123 	xfs_agblock_t		*bno,	/* output: starting block of extent */
124 	xfs_extlen_t		*len,	/* output: length of extent */
125 	int			*stat)	/* output: success/failure */
126 {
127 	union xfs_btree_rec	*rec;
128 	int			error;
129 
130 	error = xfs_btree_get_rec(cur, &rec, stat);
131 	if (!error && *stat == 1) {
132 		*bno = be32_to_cpu(rec->alloc.ar_startblock);
133 		*len = be32_to_cpu(rec->alloc.ar_blockcount);
134 	}
135 	return error;
136 }
137 
138 /*
139  * Compute aligned version of the found extent.
140  * Takes alignment and min length into account.
141  */
142 STATIC void
xfs_alloc_compute_aligned(xfs_alloc_arg_t * args,xfs_agblock_t foundbno,xfs_extlen_t foundlen,xfs_agblock_t * resbno,xfs_extlen_t * reslen)143 xfs_alloc_compute_aligned(
144 	xfs_alloc_arg_t	*args,		/* allocation argument structure */
145 	xfs_agblock_t	foundbno,	/* starting block in found extent */
146 	xfs_extlen_t	foundlen,	/* length in found extent */
147 	xfs_agblock_t	*resbno,	/* result block number */
148 	xfs_extlen_t	*reslen)	/* result length */
149 {
150 	xfs_agblock_t	bno;
151 	xfs_extlen_t	len;
152 
153 	/* Trim busy sections out of found extent */
154 	xfs_alloc_busy_trim(args, foundbno, foundlen, &bno, &len);
155 
156 	if (args->alignment > 1 && len >= args->minlen) {
157 		xfs_agblock_t	aligned_bno = roundup(bno, args->alignment);
158 		xfs_extlen_t	diff = aligned_bno - bno;
159 
160 		*resbno = aligned_bno;
161 		*reslen = diff >= len ? 0 : len - diff;
162 	} else {
163 		*resbno = bno;
164 		*reslen = len;
165 	}
166 }
167 
168 /*
169  * Compute best start block and diff for "near" allocations.
170  * freelen >= wantlen already checked by caller.
171  */
172 STATIC xfs_extlen_t			/* difference value (absolute) */
xfs_alloc_compute_diff(xfs_agblock_t wantbno,xfs_extlen_t wantlen,xfs_extlen_t alignment,xfs_agblock_t freebno,xfs_extlen_t freelen,xfs_agblock_t * newbnop)173 xfs_alloc_compute_diff(
174 	xfs_agblock_t	wantbno,	/* target starting block */
175 	xfs_extlen_t	wantlen,	/* target length */
176 	xfs_extlen_t	alignment,	/* target alignment */
177 	xfs_agblock_t	freebno,	/* freespace's starting block */
178 	xfs_extlen_t	freelen,	/* freespace's length */
179 	xfs_agblock_t	*newbnop)	/* result: best start block from free */
180 {
181 	xfs_agblock_t	freeend;	/* end of freespace extent */
182 	xfs_agblock_t	newbno1;	/* return block number */
183 	xfs_agblock_t	newbno2;	/* other new block number */
184 	xfs_extlen_t	newlen1=0;	/* length with newbno1 */
185 	xfs_extlen_t	newlen2=0;	/* length with newbno2 */
186 	xfs_agblock_t	wantend;	/* end of target extent */
187 
188 	ASSERT(freelen >= wantlen);
189 	freeend = freebno + freelen;
190 	wantend = wantbno + wantlen;
191 	if (freebno >= wantbno) {
192 		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
193 			newbno1 = NULLAGBLOCK;
194 	} else if (freeend >= wantend && alignment > 1) {
195 		newbno1 = roundup(wantbno, alignment);
196 		newbno2 = newbno1 - alignment;
197 		if (newbno1 >= freeend)
198 			newbno1 = NULLAGBLOCK;
199 		else
200 			newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
201 		if (newbno2 < freebno)
202 			newbno2 = NULLAGBLOCK;
203 		else
204 			newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
205 		if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
206 			if (newlen1 < newlen2 ||
207 			    (newlen1 == newlen2 &&
208 			     XFS_ABSDIFF(newbno1, wantbno) >
209 			     XFS_ABSDIFF(newbno2, wantbno)))
210 				newbno1 = newbno2;
211 		} else if (newbno2 != NULLAGBLOCK)
212 			newbno1 = newbno2;
213 	} else if (freeend >= wantend) {
214 		newbno1 = wantbno;
215 	} else if (alignment > 1) {
216 		newbno1 = roundup(freeend - wantlen, alignment);
217 		if (newbno1 > freeend - wantlen &&
218 		    newbno1 - alignment >= freebno)
219 			newbno1 -= alignment;
220 		else if (newbno1 >= freeend)
221 			newbno1 = NULLAGBLOCK;
222 	} else
223 		newbno1 = freeend - wantlen;
224 	*newbnop = newbno1;
225 	return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
226 }
227 
228 /*
229  * Fix up the length, based on mod and prod.
230  * len should be k * prod + mod for some k.
231  * If len is too small it is returned unchanged.
232  * If len hits maxlen it is left alone.
233  */
234 STATIC void
xfs_alloc_fix_len(xfs_alloc_arg_t * args)235 xfs_alloc_fix_len(
236 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
237 {
238 	xfs_extlen_t	k;
239 	xfs_extlen_t	rlen;
240 
241 	ASSERT(args->mod < args->prod);
242 	rlen = args->len;
243 	ASSERT(rlen >= args->minlen);
244 	ASSERT(rlen <= args->maxlen);
245 	if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
246 	    (args->mod == 0 && rlen < args->prod))
247 		return;
248 	k = rlen % args->prod;
249 	if (k == args->mod)
250 		return;
251 	if (k > args->mod) {
252 		if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
253 			return;
254 	} else {
255 		if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
256 		    (int)args->minlen)
257 			return;
258 	}
259 	ASSERT(rlen >= args->minlen);
260 	ASSERT(rlen <= args->maxlen);
261 	args->len = rlen;
262 }
263 
264 /*
265  * Fix up length if there is too little space left in the a.g.
266  * Return 1 if ok, 0 if too little, should give up.
267  */
268 STATIC int
xfs_alloc_fix_minleft(xfs_alloc_arg_t * args)269 xfs_alloc_fix_minleft(
270 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
271 {
272 	xfs_agf_t	*agf;		/* a.g. freelist header */
273 	int		diff;		/* free space difference */
274 
275 	if (args->minleft == 0)
276 		return 1;
277 	agf = XFS_BUF_TO_AGF(args->agbp);
278 	diff = be32_to_cpu(agf->agf_freeblks)
279 		- args->len - args->minleft;
280 	if (diff >= 0)
281 		return 1;
282 	args->len += diff;		/* shrink the allocated space */
283 	if (args->len >= args->minlen)
284 		return 1;
285 	args->agbno = NULLAGBLOCK;
286 	return 0;
287 }
288 
289 /*
290  * Update the two btrees, logically removing from freespace the extent
291  * starting at rbno, rlen blocks.  The extent is contained within the
292  * actual (current) free extent fbno for flen blocks.
293  * Flags are passed in indicating whether the cursors are set to the
294  * relevant records.
295  */
296 STATIC int				/* error code */
xfs_alloc_fixup_trees(xfs_btree_cur_t * cnt_cur,xfs_btree_cur_t * bno_cur,xfs_agblock_t fbno,xfs_extlen_t flen,xfs_agblock_t rbno,xfs_extlen_t rlen,int flags)297 xfs_alloc_fixup_trees(
298 	xfs_btree_cur_t	*cnt_cur,	/* cursor for by-size btree */
299 	xfs_btree_cur_t	*bno_cur,	/* cursor for by-block btree */
300 	xfs_agblock_t	fbno,		/* starting block of free extent */
301 	xfs_extlen_t	flen,		/* length of free extent */
302 	xfs_agblock_t	rbno,		/* starting block of returned extent */
303 	xfs_extlen_t	rlen,		/* length of returned extent */
304 	int		flags)		/* flags, XFSA_FIXUP_... */
305 {
306 	int		error;		/* error code */
307 	int		i;		/* operation results */
308 	xfs_agblock_t	nfbno1;		/* first new free startblock */
309 	xfs_agblock_t	nfbno2;		/* second new free startblock */
310 	xfs_extlen_t	nflen1=0;	/* first new free length */
311 	xfs_extlen_t	nflen2=0;	/* second new free length */
312 
313 	/*
314 	 * Look up the record in the by-size tree if necessary.
315 	 */
316 	if (flags & XFSA_FIXUP_CNT_OK) {
317 #ifdef DEBUG
318 		if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
319 			return error;
320 		XFS_WANT_CORRUPTED_RETURN(
321 			i == 1 && nfbno1 == fbno && nflen1 == flen);
322 #endif
323 	} else {
324 		if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
325 			return error;
326 		XFS_WANT_CORRUPTED_RETURN(i == 1);
327 	}
328 	/*
329 	 * Look up the record in the by-block tree if necessary.
330 	 */
331 	if (flags & XFSA_FIXUP_BNO_OK) {
332 #ifdef DEBUG
333 		if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
334 			return error;
335 		XFS_WANT_CORRUPTED_RETURN(
336 			i == 1 && nfbno1 == fbno && nflen1 == flen);
337 #endif
338 	} else {
339 		if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
340 			return error;
341 		XFS_WANT_CORRUPTED_RETURN(i == 1);
342 	}
343 
344 #ifdef DEBUG
345 	if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
346 		struct xfs_btree_block	*bnoblock;
347 		struct xfs_btree_block	*cntblock;
348 
349 		bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
350 		cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
351 
352 		XFS_WANT_CORRUPTED_RETURN(
353 			bnoblock->bb_numrecs == cntblock->bb_numrecs);
354 	}
355 #endif
356 
357 	/*
358 	 * Deal with all four cases: the allocated record is contained
359 	 * within the freespace record, so we can have new freespace
360 	 * at either (or both) end, or no freespace remaining.
361 	 */
362 	if (rbno == fbno && rlen == flen)
363 		nfbno1 = nfbno2 = NULLAGBLOCK;
364 	else if (rbno == fbno) {
365 		nfbno1 = rbno + rlen;
366 		nflen1 = flen - rlen;
367 		nfbno2 = NULLAGBLOCK;
368 	} else if (rbno + rlen == fbno + flen) {
369 		nfbno1 = fbno;
370 		nflen1 = flen - rlen;
371 		nfbno2 = NULLAGBLOCK;
372 	} else {
373 		nfbno1 = fbno;
374 		nflen1 = rbno - fbno;
375 		nfbno2 = rbno + rlen;
376 		nflen2 = (fbno + flen) - nfbno2;
377 	}
378 	/*
379 	 * Delete the entry from the by-size btree.
380 	 */
381 	if ((error = xfs_btree_delete(cnt_cur, &i)))
382 		return error;
383 	XFS_WANT_CORRUPTED_RETURN(i == 1);
384 	/*
385 	 * Add new by-size btree entry(s).
386 	 */
387 	if (nfbno1 != NULLAGBLOCK) {
388 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
389 			return error;
390 		XFS_WANT_CORRUPTED_RETURN(i == 0);
391 		if ((error = xfs_btree_insert(cnt_cur, &i)))
392 			return error;
393 		XFS_WANT_CORRUPTED_RETURN(i == 1);
394 	}
395 	if (nfbno2 != NULLAGBLOCK) {
396 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
397 			return error;
398 		XFS_WANT_CORRUPTED_RETURN(i == 0);
399 		if ((error = xfs_btree_insert(cnt_cur, &i)))
400 			return error;
401 		XFS_WANT_CORRUPTED_RETURN(i == 1);
402 	}
403 	/*
404 	 * Fix up the by-block btree entry(s).
405 	 */
406 	if (nfbno1 == NULLAGBLOCK) {
407 		/*
408 		 * No remaining freespace, just delete the by-block tree entry.
409 		 */
410 		if ((error = xfs_btree_delete(bno_cur, &i)))
411 			return error;
412 		XFS_WANT_CORRUPTED_RETURN(i == 1);
413 	} else {
414 		/*
415 		 * Update the by-block entry to start later|be shorter.
416 		 */
417 		if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
418 			return error;
419 	}
420 	if (nfbno2 != NULLAGBLOCK) {
421 		/*
422 		 * 2 resulting free entries, need to add one.
423 		 */
424 		if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
425 			return error;
426 		XFS_WANT_CORRUPTED_RETURN(i == 0);
427 		if ((error = xfs_btree_insert(bno_cur, &i)))
428 			return error;
429 		XFS_WANT_CORRUPTED_RETURN(i == 1);
430 	}
431 	return 0;
432 }
433 
434 /*
435  * Read in the allocation group free block array.
436  */
437 STATIC int				/* error */
xfs_alloc_read_agfl(xfs_mount_t * mp,xfs_trans_t * tp,xfs_agnumber_t agno,xfs_buf_t ** bpp)438 xfs_alloc_read_agfl(
439 	xfs_mount_t	*mp,		/* mount point structure */
440 	xfs_trans_t	*tp,		/* transaction pointer */
441 	xfs_agnumber_t	agno,		/* allocation group number */
442 	xfs_buf_t	**bpp)		/* buffer for the ag free block array */
443 {
444 	xfs_buf_t	*bp;		/* return value */
445 	int		error;
446 
447 	ASSERT(agno != NULLAGNUMBER);
448 	error = xfs_trans_read_buf(
449 			mp, tp, mp->m_ddev_targp,
450 			XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
451 			XFS_FSS_TO_BB(mp, 1), 0, &bp);
452 	if (error)
453 		return error;
454 	ASSERT(!xfs_buf_geterror(bp));
455 	xfs_buf_set_ref(bp, XFS_AGFL_REF);
456 	*bpp = bp;
457 	return 0;
458 }
459 
460 STATIC int
xfs_alloc_update_counters(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agbp,long len)461 xfs_alloc_update_counters(
462 	struct xfs_trans	*tp,
463 	struct xfs_perag	*pag,
464 	struct xfs_buf		*agbp,
465 	long			len)
466 {
467 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
468 
469 	pag->pagf_freeblks += len;
470 	be32_add_cpu(&agf->agf_freeblks, len);
471 
472 	xfs_trans_agblocks_delta(tp, len);
473 	if (unlikely(be32_to_cpu(agf->agf_freeblks) >
474 		     be32_to_cpu(agf->agf_length)))
475 		return EFSCORRUPTED;
476 
477 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
478 	return 0;
479 }
480 
481 /*
482  * Allocation group level functions.
483  */
484 
485 /*
486  * Allocate a variable extent in the allocation group agno.
487  * Type and bno are used to determine where in the allocation group the
488  * extent will start.
489  * Extent's length (returned in *len) will be between minlen and maxlen,
490  * and of the form k * prod + mod unless there's nothing that large.
491  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
492  */
493 STATIC int			/* error */
xfs_alloc_ag_vextent(xfs_alloc_arg_t * args)494 xfs_alloc_ag_vextent(
495 	xfs_alloc_arg_t	*args)	/* argument structure for allocation */
496 {
497 	int		error=0;
498 
499 	ASSERT(args->minlen > 0);
500 	ASSERT(args->maxlen > 0);
501 	ASSERT(args->minlen <= args->maxlen);
502 	ASSERT(args->mod < args->prod);
503 	ASSERT(args->alignment > 0);
504 	/*
505 	 * Branch to correct routine based on the type.
506 	 */
507 	args->wasfromfl = 0;
508 	switch (args->type) {
509 	case XFS_ALLOCTYPE_THIS_AG:
510 		error = xfs_alloc_ag_vextent_size(args);
511 		break;
512 	case XFS_ALLOCTYPE_NEAR_BNO:
513 		error = xfs_alloc_ag_vextent_near(args);
514 		break;
515 	case XFS_ALLOCTYPE_THIS_BNO:
516 		error = xfs_alloc_ag_vextent_exact(args);
517 		break;
518 	default:
519 		ASSERT(0);
520 		/* NOTREACHED */
521 	}
522 
523 	if (error || args->agbno == NULLAGBLOCK)
524 		return error;
525 
526 	ASSERT(args->len >= args->minlen);
527 	ASSERT(args->len <= args->maxlen);
528 	ASSERT(!args->wasfromfl || !args->isfl);
529 	ASSERT(args->agbno % args->alignment == 0);
530 
531 	if (!args->wasfromfl) {
532 		error = xfs_alloc_update_counters(args->tp, args->pag,
533 						  args->agbp,
534 						  -((long)(args->len)));
535 		if (error)
536 			return error;
537 
538 		ASSERT(!xfs_alloc_busy_search(args->mp, args->agno,
539 					      args->agbno, args->len));
540 	}
541 
542 	if (!args->isfl) {
543 		xfs_trans_mod_sb(args->tp, args->wasdel ?
544 				 XFS_TRANS_SB_RES_FDBLOCKS :
545 				 XFS_TRANS_SB_FDBLOCKS,
546 				 -((long)(args->len)));
547 	}
548 
549 	XFS_STATS_INC(xs_allocx);
550 	XFS_STATS_ADD(xs_allocb, args->len);
551 	return error;
552 }
553 
554 /*
555  * Allocate a variable extent at exactly agno/bno.
556  * Extent's length (returned in *len) will be between minlen and maxlen,
557  * and of the form k * prod + mod unless there's nothing that large.
558  * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
559  */
560 STATIC int			/* error */
xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t * args)561 xfs_alloc_ag_vextent_exact(
562 	xfs_alloc_arg_t	*args)	/* allocation argument structure */
563 {
564 	xfs_btree_cur_t	*bno_cur;/* by block-number btree cursor */
565 	xfs_btree_cur_t	*cnt_cur;/* by count btree cursor */
566 	int		error;
567 	xfs_agblock_t	fbno;	/* start block of found extent */
568 	xfs_extlen_t	flen;	/* length of found extent */
569 	xfs_agblock_t	tbno;	/* start block of trimmed extent */
570 	xfs_extlen_t	tlen;	/* length of trimmed extent */
571 	xfs_agblock_t	tend;	/* end block of trimmed extent */
572 	int		i;	/* success/failure of operation */
573 
574 	ASSERT(args->alignment == 1);
575 
576 	/*
577 	 * Allocate/initialize a cursor for the by-number freespace btree.
578 	 */
579 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
580 					  args->agno, XFS_BTNUM_BNO);
581 
582 	/*
583 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
584 	 * Look for the closest free block <= bno, it must contain bno
585 	 * if any free block does.
586 	 */
587 	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
588 	if (error)
589 		goto error0;
590 	if (!i)
591 		goto not_found;
592 
593 	/*
594 	 * Grab the freespace record.
595 	 */
596 	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
597 	if (error)
598 		goto error0;
599 	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
600 	ASSERT(fbno <= args->agbno);
601 
602 	/*
603 	 * Check for overlapping busy extents.
604 	 */
605 	xfs_alloc_busy_trim(args, fbno, flen, &tbno, &tlen);
606 
607 	/*
608 	 * Give up if the start of the extent is busy, or the freespace isn't
609 	 * long enough for the minimum request.
610 	 */
611 	if (tbno > args->agbno)
612 		goto not_found;
613 	if (tlen < args->minlen)
614 		goto not_found;
615 	tend = tbno + tlen;
616 	if (tend < args->agbno + args->minlen)
617 		goto not_found;
618 
619 	/*
620 	 * End of extent will be smaller of the freespace end and the
621 	 * maximal requested end.
622 	 *
623 	 * Fix the length according to mod and prod if given.
624 	 */
625 	args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
626 						- args->agbno;
627 	xfs_alloc_fix_len(args);
628 	if (!xfs_alloc_fix_minleft(args))
629 		goto not_found;
630 
631 	ASSERT(args->agbno + args->len <= tend);
632 
633 	/*
634 	 * We are allocating agbno for args->len
635 	 * Allocate/initialize a cursor for the by-size btree.
636 	 */
637 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
638 		args->agno, XFS_BTNUM_CNT);
639 	ASSERT(args->agbno + args->len <=
640 		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
641 	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
642 				      args->len, XFSA_FIXUP_BNO_OK);
643 	if (error) {
644 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
645 		goto error0;
646 	}
647 
648 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
649 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
650 
651 	args->wasfromfl = 0;
652 	trace_xfs_alloc_exact_done(args);
653 	return 0;
654 
655 not_found:
656 	/* Didn't find it, return null. */
657 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
658 	args->agbno = NULLAGBLOCK;
659 	trace_xfs_alloc_exact_notfound(args);
660 	return 0;
661 
662 error0:
663 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
664 	trace_xfs_alloc_exact_error(args);
665 	return error;
666 }
667 
668 /*
669  * Search the btree in a given direction via the search cursor and compare
670  * the records found against the good extent we've already found.
671  */
672 STATIC int
xfs_alloc_find_best_extent(struct xfs_alloc_arg * args,struct xfs_btree_cur ** gcur,struct xfs_btree_cur ** scur,xfs_agblock_t gdiff,xfs_agblock_t * sbno,xfs_extlen_t * slen,xfs_agblock_t * sbnoa,xfs_extlen_t * slena,int dir)673 xfs_alloc_find_best_extent(
674 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
675 	struct xfs_btree_cur	**gcur,	/* good cursor */
676 	struct xfs_btree_cur	**scur,	/* searching cursor */
677 	xfs_agblock_t		gdiff,	/* difference for search comparison */
678 	xfs_agblock_t		*sbno,	/* extent found by search */
679 	xfs_extlen_t		*slen,	/* extent length */
680 	xfs_agblock_t		*sbnoa,	/* aligned extent found by search */
681 	xfs_extlen_t		*slena,	/* aligned extent length */
682 	int			dir)	/* 0 = search right, 1 = search left */
683 {
684 	xfs_agblock_t		new;
685 	xfs_agblock_t		sdiff;
686 	int			error;
687 	int			i;
688 
689 	/* The good extent is perfect, no need to  search. */
690 	if (!gdiff)
691 		goto out_use_good;
692 
693 	/*
694 	 * Look until we find a better one, run out of space or run off the end.
695 	 */
696 	do {
697 		error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
698 		if (error)
699 			goto error0;
700 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
701 		xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
702 
703 		/*
704 		 * The good extent is closer than this one.
705 		 */
706 		if (!dir) {
707 			if (*sbnoa >= args->agbno + gdiff)
708 				goto out_use_good;
709 		} else {
710 			if (*sbnoa <= args->agbno - gdiff)
711 				goto out_use_good;
712 		}
713 
714 		/*
715 		 * Same distance, compare length and pick the best.
716 		 */
717 		if (*slena >= args->minlen) {
718 			args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
719 			xfs_alloc_fix_len(args);
720 
721 			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
722 						       args->alignment, *sbnoa,
723 						       *slena, &new);
724 
725 			/*
726 			 * Choose closer size and invalidate other cursor.
727 			 */
728 			if (sdiff < gdiff)
729 				goto out_use_search;
730 			goto out_use_good;
731 		}
732 
733 		if (!dir)
734 			error = xfs_btree_increment(*scur, 0, &i);
735 		else
736 			error = xfs_btree_decrement(*scur, 0, &i);
737 		if (error)
738 			goto error0;
739 	} while (i);
740 
741 out_use_good:
742 	xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
743 	*scur = NULL;
744 	return 0;
745 
746 out_use_search:
747 	xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
748 	*gcur = NULL;
749 	return 0;
750 
751 error0:
752 	/* caller invalidates cursors */
753 	return error;
754 }
755 
756 /*
757  * Allocate a variable extent near bno in the allocation group agno.
758  * Extent's length (returned in len) will be between minlen and maxlen,
759  * and of the form k * prod + mod unless there's nothing that large.
760  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
761  */
762 STATIC int				/* error */
xfs_alloc_ag_vextent_near(xfs_alloc_arg_t * args)763 xfs_alloc_ag_vextent_near(
764 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
765 {
766 	xfs_btree_cur_t	*bno_cur_gt;	/* cursor for bno btree, right side */
767 	xfs_btree_cur_t	*bno_cur_lt;	/* cursor for bno btree, left side */
768 	xfs_btree_cur_t	*cnt_cur;	/* cursor for count btree */
769 	xfs_agblock_t	gtbno;		/* start bno of right side entry */
770 	xfs_agblock_t	gtbnoa;		/* aligned ... */
771 	xfs_extlen_t	gtdiff;		/* difference to right side entry */
772 	xfs_extlen_t	gtlen;		/* length of right side entry */
773 	xfs_extlen_t	gtlena;		/* aligned ... */
774 	xfs_agblock_t	gtnew;		/* useful start bno of right side */
775 	int		error;		/* error code */
776 	int		i;		/* result code, temporary */
777 	int		j;		/* result code, temporary */
778 	xfs_agblock_t	ltbno;		/* start bno of left side entry */
779 	xfs_agblock_t	ltbnoa;		/* aligned ... */
780 	xfs_extlen_t	ltdiff;		/* difference to left side entry */
781 	xfs_extlen_t	ltlen;		/* length of left side entry */
782 	xfs_extlen_t	ltlena;		/* aligned ... */
783 	xfs_agblock_t	ltnew;		/* useful start bno of left side */
784 	xfs_extlen_t	rlen;		/* length of returned extent */
785 	int		forced = 0;
786 #if defined(DEBUG) && defined(__KERNEL__)
787 	/*
788 	 * Randomly don't execute the first algorithm.
789 	 */
790 	int		dofirst;	/* set to do first algorithm */
791 
792 	dofirst = random32() & 1;
793 #endif
794 
795 restart:
796 	bno_cur_lt = NULL;
797 	bno_cur_gt = NULL;
798 	ltlen = 0;
799 	gtlena = 0;
800 	ltlena = 0;
801 
802 	/*
803 	 * Get a cursor for the by-size btree.
804 	 */
805 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
806 		args->agno, XFS_BTNUM_CNT);
807 
808 	/*
809 	 * See if there are any free extents as big as maxlen.
810 	 */
811 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
812 		goto error0;
813 	/*
814 	 * If none, then pick up the last entry in the tree unless the
815 	 * tree is empty.
816 	 */
817 	if (!i) {
818 		if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
819 				&ltlen, &i)))
820 			goto error0;
821 		if (i == 0 || ltlen == 0) {
822 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
823 			trace_xfs_alloc_near_noentry(args);
824 			return 0;
825 		}
826 		ASSERT(i == 1);
827 	}
828 	args->wasfromfl = 0;
829 
830 	/*
831 	 * First algorithm.
832 	 * If the requested extent is large wrt the freespaces available
833 	 * in this a.g., then the cursor will be pointing to a btree entry
834 	 * near the right edge of the tree.  If it's in the last btree leaf
835 	 * block, then we just examine all the entries in that block
836 	 * that are big enough, and pick the best one.
837 	 * This is written as a while loop so we can break out of it,
838 	 * but we never loop back to the top.
839 	 */
840 	while (xfs_btree_islastblock(cnt_cur, 0)) {
841 		xfs_extlen_t	bdiff;
842 		int		besti=0;
843 		xfs_extlen_t	blen=0;
844 		xfs_agblock_t	bnew=0;
845 
846 #if defined(DEBUG) && defined(__KERNEL__)
847 		if (!dofirst)
848 			break;
849 #endif
850 		/*
851 		 * Start from the entry that lookup found, sequence through
852 		 * all larger free blocks.  If we're actually pointing at a
853 		 * record smaller than maxlen, go to the start of this block,
854 		 * and skip all those smaller than minlen.
855 		 */
856 		if (ltlen || args->alignment > 1) {
857 			cnt_cur->bc_ptrs[0] = 1;
858 			do {
859 				if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
860 						&ltlen, &i)))
861 					goto error0;
862 				XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
863 				if (ltlen >= args->minlen)
864 					break;
865 				if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
866 					goto error0;
867 			} while (i);
868 			ASSERT(ltlen >= args->minlen);
869 			if (!i)
870 				break;
871 		}
872 		i = cnt_cur->bc_ptrs[0];
873 		for (j = 1, blen = 0, bdiff = 0;
874 		     !error && j && (blen < args->maxlen || bdiff > 0);
875 		     error = xfs_btree_increment(cnt_cur, 0, &j)) {
876 			/*
877 			 * For each entry, decide if it's better than
878 			 * the previous best entry.
879 			 */
880 			if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
881 				goto error0;
882 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
883 			xfs_alloc_compute_aligned(args, ltbno, ltlen,
884 						  &ltbnoa, &ltlena);
885 			if (ltlena < args->minlen)
886 				continue;
887 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
888 			xfs_alloc_fix_len(args);
889 			ASSERT(args->len >= args->minlen);
890 			if (args->len < blen)
891 				continue;
892 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
893 				args->alignment, ltbnoa, ltlena, &ltnew);
894 			if (ltnew != NULLAGBLOCK &&
895 			    (args->len > blen || ltdiff < bdiff)) {
896 				bdiff = ltdiff;
897 				bnew = ltnew;
898 				blen = args->len;
899 				besti = cnt_cur->bc_ptrs[0];
900 			}
901 		}
902 		/*
903 		 * It didn't work.  We COULD be in a case where
904 		 * there's a good record somewhere, so try again.
905 		 */
906 		if (blen == 0)
907 			break;
908 		/*
909 		 * Point at the best entry, and retrieve it again.
910 		 */
911 		cnt_cur->bc_ptrs[0] = besti;
912 		if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
913 			goto error0;
914 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
915 		ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
916 		args->len = blen;
917 		if (!xfs_alloc_fix_minleft(args)) {
918 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
919 			trace_xfs_alloc_near_nominleft(args);
920 			return 0;
921 		}
922 		blen = args->len;
923 		/*
924 		 * We are allocating starting at bnew for blen blocks.
925 		 */
926 		args->agbno = bnew;
927 		ASSERT(bnew >= ltbno);
928 		ASSERT(bnew + blen <= ltbno + ltlen);
929 		/*
930 		 * Set up a cursor for the by-bno tree.
931 		 */
932 		bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
933 			args->agbp, args->agno, XFS_BTNUM_BNO);
934 		/*
935 		 * Fix up the btree entries.
936 		 */
937 		if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
938 				ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
939 			goto error0;
940 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
941 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
942 
943 		trace_xfs_alloc_near_first(args);
944 		return 0;
945 	}
946 	/*
947 	 * Second algorithm.
948 	 * Search in the by-bno tree to the left and to the right
949 	 * simultaneously, until in each case we find a space big enough,
950 	 * or run into the edge of the tree.  When we run into the edge,
951 	 * we deallocate that cursor.
952 	 * If both searches succeed, we compare the two spaces and pick
953 	 * the better one.
954 	 * With alignment, it's possible for both to fail; the upper
955 	 * level algorithm that picks allocation groups for allocations
956 	 * is not supposed to do this.
957 	 */
958 	/*
959 	 * Allocate and initialize the cursor for the leftward search.
960 	 */
961 	bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
962 		args->agno, XFS_BTNUM_BNO);
963 	/*
964 	 * Lookup <= bno to find the leftward search's starting point.
965 	 */
966 	if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
967 		goto error0;
968 	if (!i) {
969 		/*
970 		 * Didn't find anything; use this cursor for the rightward
971 		 * search.
972 		 */
973 		bno_cur_gt = bno_cur_lt;
974 		bno_cur_lt = NULL;
975 	}
976 	/*
977 	 * Found something.  Duplicate the cursor for the rightward search.
978 	 */
979 	else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
980 		goto error0;
981 	/*
982 	 * Increment the cursor, so we will point at the entry just right
983 	 * of the leftward entry if any, or to the leftmost entry.
984 	 */
985 	if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
986 		goto error0;
987 	if (!i) {
988 		/*
989 		 * It failed, there are no rightward entries.
990 		 */
991 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
992 		bno_cur_gt = NULL;
993 	}
994 	/*
995 	 * Loop going left with the leftward cursor, right with the
996 	 * rightward cursor, until either both directions give up or
997 	 * we find an entry at least as big as minlen.
998 	 */
999 	do {
1000 		if (bno_cur_lt) {
1001 			if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1002 				goto error0;
1003 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1004 			xfs_alloc_compute_aligned(args, ltbno, ltlen,
1005 						  &ltbnoa, &ltlena);
1006 			if (ltlena >= args->minlen)
1007 				break;
1008 			if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1009 				goto error0;
1010 			if (!i) {
1011 				xfs_btree_del_cursor(bno_cur_lt,
1012 						     XFS_BTREE_NOERROR);
1013 				bno_cur_lt = NULL;
1014 			}
1015 		}
1016 		if (bno_cur_gt) {
1017 			if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1018 				goto error0;
1019 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1020 			xfs_alloc_compute_aligned(args, gtbno, gtlen,
1021 						  &gtbnoa, &gtlena);
1022 			if (gtlena >= args->minlen)
1023 				break;
1024 			if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1025 				goto error0;
1026 			if (!i) {
1027 				xfs_btree_del_cursor(bno_cur_gt,
1028 						     XFS_BTREE_NOERROR);
1029 				bno_cur_gt = NULL;
1030 			}
1031 		}
1032 	} while (bno_cur_lt || bno_cur_gt);
1033 
1034 	/*
1035 	 * Got both cursors still active, need to find better entry.
1036 	 */
1037 	if (bno_cur_lt && bno_cur_gt) {
1038 		if (ltlena >= args->minlen) {
1039 			/*
1040 			 * Left side is good, look for a right side entry.
1041 			 */
1042 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1043 			xfs_alloc_fix_len(args);
1044 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1045 				args->alignment, ltbnoa, ltlena, &ltnew);
1046 
1047 			error = xfs_alloc_find_best_extent(args,
1048 						&bno_cur_lt, &bno_cur_gt,
1049 						ltdiff, &gtbno, &gtlen,
1050 						&gtbnoa, &gtlena,
1051 						0 /* search right */);
1052 		} else {
1053 			ASSERT(gtlena >= args->minlen);
1054 
1055 			/*
1056 			 * Right side is good, look for a left side entry.
1057 			 */
1058 			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1059 			xfs_alloc_fix_len(args);
1060 			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1061 				args->alignment, gtbnoa, gtlena, &gtnew);
1062 
1063 			error = xfs_alloc_find_best_extent(args,
1064 						&bno_cur_gt, &bno_cur_lt,
1065 						gtdiff, &ltbno, &ltlen,
1066 						&ltbnoa, &ltlena,
1067 						1 /* search left */);
1068 		}
1069 
1070 		if (error)
1071 			goto error0;
1072 	}
1073 
1074 	/*
1075 	 * If we couldn't get anything, give up.
1076 	 */
1077 	if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1078 		if (!forced++) {
1079 			trace_xfs_alloc_near_busy(args);
1080 			xfs_log_force(args->mp, XFS_LOG_SYNC);
1081 			goto restart;
1082 		}
1083 
1084 		trace_xfs_alloc_size_neither(args);
1085 		args->agbno = NULLAGBLOCK;
1086 		return 0;
1087 	}
1088 
1089 	/*
1090 	 * At this point we have selected a freespace entry, either to the
1091 	 * left or to the right.  If it's on the right, copy all the
1092 	 * useful variables to the "left" set so we only have one
1093 	 * copy of this code.
1094 	 */
1095 	if (bno_cur_gt) {
1096 		bno_cur_lt = bno_cur_gt;
1097 		bno_cur_gt = NULL;
1098 		ltbno = gtbno;
1099 		ltbnoa = gtbnoa;
1100 		ltlen = gtlen;
1101 		ltlena = gtlena;
1102 		j = 1;
1103 	} else
1104 		j = 0;
1105 
1106 	/*
1107 	 * Fix up the length and compute the useful address.
1108 	 */
1109 	args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1110 	xfs_alloc_fix_len(args);
1111 	if (!xfs_alloc_fix_minleft(args)) {
1112 		trace_xfs_alloc_near_nominleft(args);
1113 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1114 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1115 		return 0;
1116 	}
1117 	rlen = args->len;
1118 	(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1119 				     ltbnoa, ltlena, &ltnew);
1120 	ASSERT(ltnew >= ltbno);
1121 	ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1122 	ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1123 	args->agbno = ltnew;
1124 
1125 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1126 			ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1127 		goto error0;
1128 
1129 	if (j)
1130 		trace_xfs_alloc_near_greater(args);
1131 	else
1132 		trace_xfs_alloc_near_lesser(args);
1133 
1134 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1135 	xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1136 	return 0;
1137 
1138  error0:
1139 	trace_xfs_alloc_near_error(args);
1140 	if (cnt_cur != NULL)
1141 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1142 	if (bno_cur_lt != NULL)
1143 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1144 	if (bno_cur_gt != NULL)
1145 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1146 	return error;
1147 }
1148 
1149 /*
1150  * Allocate a variable extent anywhere in the allocation group agno.
1151  * Extent's length (returned in len) will be between minlen and maxlen,
1152  * and of the form k * prod + mod unless there's nothing that large.
1153  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1154  */
1155 STATIC int				/* error */
xfs_alloc_ag_vextent_size(xfs_alloc_arg_t * args)1156 xfs_alloc_ag_vextent_size(
1157 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
1158 {
1159 	xfs_btree_cur_t	*bno_cur;	/* cursor for bno btree */
1160 	xfs_btree_cur_t	*cnt_cur;	/* cursor for cnt btree */
1161 	int		error;		/* error result */
1162 	xfs_agblock_t	fbno;		/* start of found freespace */
1163 	xfs_extlen_t	flen;		/* length of found freespace */
1164 	int		i;		/* temp status variable */
1165 	xfs_agblock_t	rbno;		/* returned block number */
1166 	xfs_extlen_t	rlen;		/* length of returned extent */
1167 	int		forced = 0;
1168 
1169 restart:
1170 	/*
1171 	 * Allocate and initialize a cursor for the by-size btree.
1172 	 */
1173 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1174 		args->agno, XFS_BTNUM_CNT);
1175 	bno_cur = NULL;
1176 
1177 	/*
1178 	 * Look for an entry >= maxlen+alignment-1 blocks.
1179 	 */
1180 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1181 			args->maxlen + args->alignment - 1, &i)))
1182 		goto error0;
1183 
1184 	/*
1185 	 * If none or we have busy extents that we cannot allocate from, then
1186 	 * we have to settle for a smaller extent. In the case that there are
1187 	 * no large extents, this will return the last entry in the tree unless
1188 	 * the tree is empty. In the case that there are only busy large
1189 	 * extents, this will return the largest small extent unless there
1190 	 * are no smaller extents available.
1191 	 */
1192 	if (!i || forced > 1) {
1193 		error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1194 						   &fbno, &flen, &i);
1195 		if (error)
1196 			goto error0;
1197 		if (i == 0 || flen == 0) {
1198 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1199 			trace_xfs_alloc_size_noentry(args);
1200 			return 0;
1201 		}
1202 		ASSERT(i == 1);
1203 		xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1204 	} else {
1205 		/*
1206 		 * Search for a non-busy extent that is large enough.
1207 		 * If we are at low space, don't check, or if we fall of
1208 		 * the end of the btree, turn off the busy check and
1209 		 * restart.
1210 		 */
1211 		for (;;) {
1212 			error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1213 			if (error)
1214 				goto error0;
1215 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1216 
1217 			xfs_alloc_compute_aligned(args, fbno, flen,
1218 						  &rbno, &rlen);
1219 
1220 			if (rlen >= args->maxlen)
1221 				break;
1222 
1223 			error = xfs_btree_increment(cnt_cur, 0, &i);
1224 			if (error)
1225 				goto error0;
1226 			if (i == 0) {
1227 				/*
1228 				 * Our only valid extents must have been busy.
1229 				 * Make it unbusy by forcing the log out and
1230 				 * retrying. If we've been here before, forcing
1231 				 * the log isn't making the extents available,
1232 				 * which means they have probably been freed in
1233 				 * this transaction.  In that case, we have to
1234 				 * give up on them and we'll attempt a minlen
1235 				 * allocation the next time around.
1236 				 */
1237 				xfs_btree_del_cursor(cnt_cur,
1238 						     XFS_BTREE_NOERROR);
1239 				trace_xfs_alloc_size_busy(args);
1240 				if (!forced++)
1241 					xfs_log_force(args->mp, XFS_LOG_SYNC);
1242 				goto restart;
1243 			}
1244 		}
1245 	}
1246 
1247 	/*
1248 	 * In the first case above, we got the last entry in the
1249 	 * by-size btree.  Now we check to see if the space hits maxlen
1250 	 * once aligned; if not, we search left for something better.
1251 	 * This can't happen in the second case above.
1252 	 */
1253 	rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1254 	XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1255 			(rlen <= flen && rbno + rlen <= fbno + flen), error0);
1256 	if (rlen < args->maxlen) {
1257 		xfs_agblock_t	bestfbno;
1258 		xfs_extlen_t	bestflen;
1259 		xfs_agblock_t	bestrbno;
1260 		xfs_extlen_t	bestrlen;
1261 
1262 		bestrlen = rlen;
1263 		bestrbno = rbno;
1264 		bestflen = flen;
1265 		bestfbno = fbno;
1266 		for (;;) {
1267 			if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1268 				goto error0;
1269 			if (i == 0)
1270 				break;
1271 			if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1272 					&i)))
1273 				goto error0;
1274 			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1275 			if (flen < bestrlen)
1276 				break;
1277 			xfs_alloc_compute_aligned(args, fbno, flen,
1278 						  &rbno, &rlen);
1279 			rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1280 			XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1281 				(rlen <= flen && rbno + rlen <= fbno + flen),
1282 				error0);
1283 			if (rlen > bestrlen) {
1284 				bestrlen = rlen;
1285 				bestrbno = rbno;
1286 				bestflen = flen;
1287 				bestfbno = fbno;
1288 				if (rlen == args->maxlen)
1289 					break;
1290 			}
1291 		}
1292 		if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1293 				&i)))
1294 			goto error0;
1295 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1296 		rlen = bestrlen;
1297 		rbno = bestrbno;
1298 		flen = bestflen;
1299 		fbno = bestfbno;
1300 	}
1301 	args->wasfromfl = 0;
1302 	/*
1303 	 * Fix up the length.
1304 	 */
1305 	args->len = rlen;
1306 	if (rlen < args->minlen) {
1307 		if (!forced++) {
1308 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1309 			trace_xfs_alloc_size_busy(args);
1310 			xfs_log_force(args->mp, XFS_LOG_SYNC);
1311 			goto restart;
1312 		}
1313 		goto out_nominleft;
1314 	}
1315 	xfs_alloc_fix_len(args);
1316 
1317 	if (!xfs_alloc_fix_minleft(args))
1318 		goto out_nominleft;
1319 	rlen = args->len;
1320 	XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1321 	/*
1322 	 * Allocate and initialize a cursor for the by-block tree.
1323 	 */
1324 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1325 		args->agno, XFS_BTNUM_BNO);
1326 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1327 			rbno, rlen, XFSA_FIXUP_CNT_OK)))
1328 		goto error0;
1329 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1330 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1331 	cnt_cur = bno_cur = NULL;
1332 	args->len = rlen;
1333 	args->agbno = rbno;
1334 	XFS_WANT_CORRUPTED_GOTO(
1335 		args->agbno + args->len <=
1336 			be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1337 		error0);
1338 	trace_xfs_alloc_size_done(args);
1339 	return 0;
1340 
1341 error0:
1342 	trace_xfs_alloc_size_error(args);
1343 	if (cnt_cur)
1344 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1345 	if (bno_cur)
1346 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1347 	return error;
1348 
1349 out_nominleft:
1350 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1351 	trace_xfs_alloc_size_nominleft(args);
1352 	args->agbno = NULLAGBLOCK;
1353 	return 0;
1354 }
1355 
1356 /*
1357  * Deal with the case where only small freespaces remain.
1358  * Either return the contents of the last freespace record,
1359  * or allocate space from the freelist if there is nothing in the tree.
1360  */
1361 STATIC int			/* error */
xfs_alloc_ag_vextent_small(xfs_alloc_arg_t * args,xfs_btree_cur_t * ccur,xfs_agblock_t * fbnop,xfs_extlen_t * flenp,int * stat)1362 xfs_alloc_ag_vextent_small(
1363 	xfs_alloc_arg_t	*args,	/* allocation argument structure */
1364 	xfs_btree_cur_t	*ccur,	/* by-size cursor */
1365 	xfs_agblock_t	*fbnop,	/* result block number */
1366 	xfs_extlen_t	*flenp,	/* result length */
1367 	int		*stat)	/* status: 0-freelist, 1-normal/none */
1368 {
1369 	int		error;
1370 	xfs_agblock_t	fbno;
1371 	xfs_extlen_t	flen;
1372 	int		i;
1373 
1374 	if ((error = xfs_btree_decrement(ccur, 0, &i)))
1375 		goto error0;
1376 	if (i) {
1377 		if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1378 			goto error0;
1379 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1380 	}
1381 	/*
1382 	 * Nothing in the btree, try the freelist.  Make sure
1383 	 * to respect minleft even when pulling from the
1384 	 * freelist.
1385 	 */
1386 	else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1387 		 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1388 		  > args->minleft)) {
1389 		error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1390 		if (error)
1391 			goto error0;
1392 		if (fbno != NULLAGBLOCK) {
1393 			xfs_alloc_busy_reuse(args->mp, args->agno, fbno, 1,
1394 					     args->userdata);
1395 
1396 			if (args->userdata) {
1397 				xfs_buf_t	*bp;
1398 
1399 				bp = xfs_btree_get_bufs(args->mp, args->tp,
1400 					args->agno, fbno, 0);
1401 				xfs_trans_binval(args->tp, bp);
1402 			}
1403 			args->len = 1;
1404 			args->agbno = fbno;
1405 			XFS_WANT_CORRUPTED_GOTO(
1406 				args->agbno + args->len <=
1407 				be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1408 				error0);
1409 			args->wasfromfl = 1;
1410 			trace_xfs_alloc_small_freelist(args);
1411 			*stat = 0;
1412 			return 0;
1413 		}
1414 		/*
1415 		 * Nothing in the freelist.
1416 		 */
1417 		else
1418 			flen = 0;
1419 	}
1420 	/*
1421 	 * Can't allocate from the freelist for some reason.
1422 	 */
1423 	else {
1424 		fbno = NULLAGBLOCK;
1425 		flen = 0;
1426 	}
1427 	/*
1428 	 * Can't do the allocation, give up.
1429 	 */
1430 	if (flen < args->minlen) {
1431 		args->agbno = NULLAGBLOCK;
1432 		trace_xfs_alloc_small_notenough(args);
1433 		flen = 0;
1434 	}
1435 	*fbnop = fbno;
1436 	*flenp = flen;
1437 	*stat = 1;
1438 	trace_xfs_alloc_small_done(args);
1439 	return 0;
1440 
1441 error0:
1442 	trace_xfs_alloc_small_error(args);
1443 	return error;
1444 }
1445 
1446 /*
1447  * Free the extent starting at agno/bno for length.
1448  */
1449 STATIC int			/* error */
xfs_free_ag_extent(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,int isfl)1450 xfs_free_ag_extent(
1451 	xfs_trans_t	*tp,	/* transaction pointer */
1452 	xfs_buf_t	*agbp,	/* buffer for a.g. freelist header */
1453 	xfs_agnumber_t	agno,	/* allocation group number */
1454 	xfs_agblock_t	bno,	/* starting block number */
1455 	xfs_extlen_t	len,	/* length of extent */
1456 	int		isfl)	/* set if is freelist blocks - no sb acctg */
1457 {
1458 	xfs_btree_cur_t	*bno_cur;	/* cursor for by-block btree */
1459 	xfs_btree_cur_t	*cnt_cur;	/* cursor for by-size btree */
1460 	int		error;		/* error return value */
1461 	xfs_agblock_t	gtbno;		/* start of right neighbor block */
1462 	xfs_extlen_t	gtlen;		/* length of right neighbor block */
1463 	int		haveleft;	/* have a left neighbor block */
1464 	int		haveright;	/* have a right neighbor block */
1465 	int		i;		/* temp, result code */
1466 	xfs_agblock_t	ltbno;		/* start of left neighbor block */
1467 	xfs_extlen_t	ltlen;		/* length of left neighbor block */
1468 	xfs_mount_t	*mp;		/* mount point struct for filesystem */
1469 	xfs_agblock_t	nbno;		/* new starting block of freespace */
1470 	xfs_extlen_t	nlen;		/* new length of freespace */
1471 	xfs_perag_t	*pag;		/* per allocation group data */
1472 
1473 	mp = tp->t_mountp;
1474 	/*
1475 	 * Allocate and initialize a cursor for the by-block btree.
1476 	 */
1477 	bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1478 	cnt_cur = NULL;
1479 	/*
1480 	 * Look for a neighboring block on the left (lower block numbers)
1481 	 * that is contiguous with this space.
1482 	 */
1483 	if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1484 		goto error0;
1485 	if (haveleft) {
1486 		/*
1487 		 * There is a block to our left.
1488 		 */
1489 		if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1490 			goto error0;
1491 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1492 		/*
1493 		 * It's not contiguous, though.
1494 		 */
1495 		if (ltbno + ltlen < bno)
1496 			haveleft = 0;
1497 		else {
1498 			/*
1499 			 * If this failure happens the request to free this
1500 			 * space was invalid, it's (partly) already free.
1501 			 * Very bad.
1502 			 */
1503 			XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1504 		}
1505 	}
1506 	/*
1507 	 * Look for a neighboring block on the right (higher block numbers)
1508 	 * that is contiguous with this space.
1509 	 */
1510 	if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1511 		goto error0;
1512 	if (haveright) {
1513 		/*
1514 		 * There is a block to our right.
1515 		 */
1516 		if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1517 			goto error0;
1518 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1519 		/*
1520 		 * It's not contiguous, though.
1521 		 */
1522 		if (bno + len < gtbno)
1523 			haveright = 0;
1524 		else {
1525 			/*
1526 			 * If this failure happens the request to free this
1527 			 * space was invalid, it's (partly) already free.
1528 			 * Very bad.
1529 			 */
1530 			XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1531 		}
1532 	}
1533 	/*
1534 	 * Now allocate and initialize a cursor for the by-size tree.
1535 	 */
1536 	cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1537 	/*
1538 	 * Have both left and right contiguous neighbors.
1539 	 * Merge all three into a single free block.
1540 	 */
1541 	if (haveleft && haveright) {
1542 		/*
1543 		 * Delete the old by-size entry on the left.
1544 		 */
1545 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1546 			goto error0;
1547 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1548 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1549 			goto error0;
1550 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1551 		/*
1552 		 * Delete the old by-size entry on the right.
1553 		 */
1554 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1555 			goto error0;
1556 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1557 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1558 			goto error0;
1559 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1560 		/*
1561 		 * Delete the old by-block entry for the right block.
1562 		 */
1563 		if ((error = xfs_btree_delete(bno_cur, &i)))
1564 			goto error0;
1565 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1566 		/*
1567 		 * Move the by-block cursor back to the left neighbor.
1568 		 */
1569 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1570 			goto error0;
1571 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1572 #ifdef DEBUG
1573 		/*
1574 		 * Check that this is the right record: delete didn't
1575 		 * mangle the cursor.
1576 		 */
1577 		{
1578 			xfs_agblock_t	xxbno;
1579 			xfs_extlen_t	xxlen;
1580 
1581 			if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1582 					&i)))
1583 				goto error0;
1584 			XFS_WANT_CORRUPTED_GOTO(
1585 				i == 1 && xxbno == ltbno && xxlen == ltlen,
1586 				error0);
1587 		}
1588 #endif
1589 		/*
1590 		 * Update remaining by-block entry to the new, joined block.
1591 		 */
1592 		nbno = ltbno;
1593 		nlen = len + ltlen + gtlen;
1594 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1595 			goto error0;
1596 	}
1597 	/*
1598 	 * Have only a left contiguous neighbor.
1599 	 * Merge it together with the new freespace.
1600 	 */
1601 	else if (haveleft) {
1602 		/*
1603 		 * Delete the old by-size entry on the left.
1604 		 */
1605 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1606 			goto error0;
1607 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1608 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1609 			goto error0;
1610 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1611 		/*
1612 		 * Back up the by-block cursor to the left neighbor, and
1613 		 * update its length.
1614 		 */
1615 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1616 			goto error0;
1617 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1618 		nbno = ltbno;
1619 		nlen = len + ltlen;
1620 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1621 			goto error0;
1622 	}
1623 	/*
1624 	 * Have only a right contiguous neighbor.
1625 	 * Merge it together with the new freespace.
1626 	 */
1627 	else if (haveright) {
1628 		/*
1629 		 * Delete the old by-size entry on the right.
1630 		 */
1631 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1632 			goto error0;
1633 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1634 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1635 			goto error0;
1636 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1637 		/*
1638 		 * Update the starting block and length of the right
1639 		 * neighbor in the by-block tree.
1640 		 */
1641 		nbno = bno;
1642 		nlen = len + gtlen;
1643 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1644 			goto error0;
1645 	}
1646 	/*
1647 	 * No contiguous neighbors.
1648 	 * Insert the new freespace into the by-block tree.
1649 	 */
1650 	else {
1651 		nbno = bno;
1652 		nlen = len;
1653 		if ((error = xfs_btree_insert(bno_cur, &i)))
1654 			goto error0;
1655 		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1656 	}
1657 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1658 	bno_cur = NULL;
1659 	/*
1660 	 * In all cases we need to insert the new freespace in the by-size tree.
1661 	 */
1662 	if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1663 		goto error0;
1664 	XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
1665 	if ((error = xfs_btree_insert(cnt_cur, &i)))
1666 		goto error0;
1667 	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1668 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1669 	cnt_cur = NULL;
1670 
1671 	/*
1672 	 * Update the freespace totals in the ag and superblock.
1673 	 */
1674 	pag = xfs_perag_get(mp, agno);
1675 	error = xfs_alloc_update_counters(tp, pag, agbp, len);
1676 	xfs_perag_put(pag);
1677 	if (error)
1678 		goto error0;
1679 
1680 	if (!isfl)
1681 		xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1682 	XFS_STATS_INC(xs_freex);
1683 	XFS_STATS_ADD(xs_freeb, len);
1684 
1685 	trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1686 
1687 	return 0;
1688 
1689  error0:
1690 	trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1691 	if (bno_cur)
1692 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1693 	if (cnt_cur)
1694 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1695 	return error;
1696 }
1697 
1698 /*
1699  * Visible (exported) allocation/free functions.
1700  * Some of these are used just by xfs_alloc_btree.c and this file.
1701  */
1702 
1703 /*
1704  * Compute and fill in value of m_ag_maxlevels.
1705  */
1706 void
xfs_alloc_compute_maxlevels(xfs_mount_t * mp)1707 xfs_alloc_compute_maxlevels(
1708 	xfs_mount_t	*mp)	/* file system mount structure */
1709 {
1710 	int		level;
1711 	uint		maxblocks;
1712 	uint		maxleafents;
1713 	int		minleafrecs;
1714 	int		minnoderecs;
1715 
1716 	maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1717 	minleafrecs = mp->m_alloc_mnr[0];
1718 	minnoderecs = mp->m_alloc_mnr[1];
1719 	maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1720 	for (level = 1; maxblocks > 1; level++)
1721 		maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1722 	mp->m_ag_maxlevels = level;
1723 }
1724 
1725 /*
1726  * Find the length of the longest extent in an AG.
1727  */
1728 xfs_extlen_t
xfs_alloc_longest_free_extent(struct xfs_mount * mp,struct xfs_perag * pag)1729 xfs_alloc_longest_free_extent(
1730 	struct xfs_mount	*mp,
1731 	struct xfs_perag	*pag)
1732 {
1733 	xfs_extlen_t		need, delta = 0;
1734 
1735 	need = XFS_MIN_FREELIST_PAG(pag, mp);
1736 	if (need > pag->pagf_flcount)
1737 		delta = need - pag->pagf_flcount;
1738 
1739 	if (pag->pagf_longest > delta)
1740 		return pag->pagf_longest - delta;
1741 	return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1742 }
1743 
1744 /*
1745  * Decide whether to use this allocation group for this allocation.
1746  * If so, fix up the btree freelist's size.
1747  */
1748 STATIC int			/* error */
xfs_alloc_fix_freelist(xfs_alloc_arg_t * args,int flags)1749 xfs_alloc_fix_freelist(
1750 	xfs_alloc_arg_t	*args,	/* allocation argument structure */
1751 	int		flags)	/* XFS_ALLOC_FLAG_... */
1752 {
1753 	xfs_buf_t	*agbp;	/* agf buffer pointer */
1754 	xfs_agf_t	*agf;	/* a.g. freespace structure pointer */
1755 	xfs_buf_t	*agflbp;/* agfl buffer pointer */
1756 	xfs_agblock_t	bno;	/* freelist block */
1757 	xfs_extlen_t	delta;	/* new blocks needed in freelist */
1758 	int		error;	/* error result code */
1759 	xfs_extlen_t	longest;/* longest extent in allocation group */
1760 	xfs_mount_t	*mp;	/* file system mount point structure */
1761 	xfs_extlen_t	need;	/* total blocks needed in freelist */
1762 	xfs_perag_t	*pag;	/* per-ag information structure */
1763 	xfs_alloc_arg_t	targs;	/* local allocation arguments */
1764 	xfs_trans_t	*tp;	/* transaction pointer */
1765 
1766 	mp = args->mp;
1767 
1768 	pag = args->pag;
1769 	tp = args->tp;
1770 	if (!pag->pagf_init) {
1771 		if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1772 				&agbp)))
1773 			return error;
1774 		if (!pag->pagf_init) {
1775 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1776 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1777 			args->agbp = NULL;
1778 			return 0;
1779 		}
1780 	} else
1781 		agbp = NULL;
1782 
1783 	/*
1784 	 * If this is a metadata preferred pag and we are user data
1785 	 * then try somewhere else if we are not being asked to
1786 	 * try harder at this point
1787 	 */
1788 	if (pag->pagf_metadata && args->userdata &&
1789 	    (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1790 		ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1791 		args->agbp = NULL;
1792 		return 0;
1793 	}
1794 
1795 	if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1796 		/*
1797 		 * If it looks like there isn't a long enough extent, or enough
1798 		 * total blocks, reject it.
1799 		 */
1800 		need = XFS_MIN_FREELIST_PAG(pag, mp);
1801 		longest = xfs_alloc_longest_free_extent(mp, pag);
1802 		if ((args->minlen + args->alignment + args->minalignslop - 1) >
1803 				longest ||
1804 		    ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1805 			   need - args->total) < (int)args->minleft)) {
1806 			if (agbp)
1807 				xfs_trans_brelse(tp, agbp);
1808 			args->agbp = NULL;
1809 			return 0;
1810 		}
1811 	}
1812 
1813 	/*
1814 	 * Get the a.g. freespace buffer.
1815 	 * Can fail if we're not blocking on locks, and it's held.
1816 	 */
1817 	if (agbp == NULL) {
1818 		if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1819 				&agbp)))
1820 			return error;
1821 		if (agbp == NULL) {
1822 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1823 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1824 			args->agbp = NULL;
1825 			return 0;
1826 		}
1827 	}
1828 	/*
1829 	 * Figure out how many blocks we should have in the freelist.
1830 	 */
1831 	agf = XFS_BUF_TO_AGF(agbp);
1832 	need = XFS_MIN_FREELIST(agf, mp);
1833 	/*
1834 	 * If there isn't enough total or single-extent, reject it.
1835 	 */
1836 	if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1837 		delta = need > be32_to_cpu(agf->agf_flcount) ?
1838 			(need - be32_to_cpu(agf->agf_flcount)) : 0;
1839 		longest = be32_to_cpu(agf->agf_longest);
1840 		longest = (longest > delta) ? (longest - delta) :
1841 			(be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1842 		if ((args->minlen + args->alignment + args->minalignslop - 1) >
1843 				longest ||
1844 		    ((int)(be32_to_cpu(agf->agf_freeblks) +
1845 		     be32_to_cpu(agf->agf_flcount) - need - args->total) <
1846 				(int)args->minleft)) {
1847 			xfs_trans_brelse(tp, agbp);
1848 			args->agbp = NULL;
1849 			return 0;
1850 		}
1851 	}
1852 	/*
1853 	 * Make the freelist shorter if it's too long.
1854 	 */
1855 	while (be32_to_cpu(agf->agf_flcount) > need) {
1856 		xfs_buf_t	*bp;
1857 
1858 		error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1859 		if (error)
1860 			return error;
1861 		if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1862 			return error;
1863 		bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1864 		xfs_trans_binval(tp, bp);
1865 	}
1866 	/*
1867 	 * Initialize the args structure.
1868 	 */
1869 	targs.tp = tp;
1870 	targs.mp = mp;
1871 	targs.agbp = agbp;
1872 	targs.agno = args->agno;
1873 	targs.mod = targs.minleft = targs.wasdel = targs.userdata =
1874 		targs.minalignslop = 0;
1875 	targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1876 	targs.type = XFS_ALLOCTYPE_THIS_AG;
1877 	targs.pag = pag;
1878 	if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1879 		return error;
1880 	/*
1881 	 * Make the freelist longer if it's too short.
1882 	 */
1883 	while (be32_to_cpu(agf->agf_flcount) < need) {
1884 		targs.agbno = 0;
1885 		targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
1886 		/*
1887 		 * Allocate as many blocks as possible at once.
1888 		 */
1889 		if ((error = xfs_alloc_ag_vextent(&targs))) {
1890 			xfs_trans_brelse(tp, agflbp);
1891 			return error;
1892 		}
1893 		/*
1894 		 * Stop if we run out.  Won't happen if callers are obeying
1895 		 * the restrictions correctly.  Can happen for free calls
1896 		 * on a completely full ag.
1897 		 */
1898 		if (targs.agbno == NULLAGBLOCK) {
1899 			if (flags & XFS_ALLOC_FLAG_FREEING)
1900 				break;
1901 			xfs_trans_brelse(tp, agflbp);
1902 			args->agbp = NULL;
1903 			return 0;
1904 		}
1905 		/*
1906 		 * Put each allocated block on the list.
1907 		 */
1908 		for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
1909 			error = xfs_alloc_put_freelist(tp, agbp,
1910 							agflbp, bno, 0);
1911 			if (error)
1912 				return error;
1913 		}
1914 	}
1915 	xfs_trans_brelse(tp, agflbp);
1916 	args->agbp = agbp;
1917 	return 0;
1918 }
1919 
1920 /*
1921  * Get a block from the freelist.
1922  * Returns with the buffer for the block gotten.
1923  */
1924 int				/* error */
xfs_alloc_get_freelist(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_agblock_t * bnop,int btreeblk)1925 xfs_alloc_get_freelist(
1926 	xfs_trans_t	*tp,	/* transaction pointer */
1927 	xfs_buf_t	*agbp,	/* buffer containing the agf structure */
1928 	xfs_agblock_t	*bnop,	/* block address retrieved from freelist */
1929 	int		btreeblk) /* destination is a AGF btree */
1930 {
1931 	xfs_agf_t	*agf;	/* a.g. freespace structure */
1932 	xfs_agfl_t	*agfl;	/* a.g. freelist structure */
1933 	xfs_buf_t	*agflbp;/* buffer for a.g. freelist structure */
1934 	xfs_agblock_t	bno;	/* block number returned */
1935 	int		error;
1936 	int		logflags;
1937 	xfs_mount_t	*mp;	/* mount structure */
1938 	xfs_perag_t	*pag;	/* per allocation group data */
1939 
1940 	agf = XFS_BUF_TO_AGF(agbp);
1941 	/*
1942 	 * Freelist is empty, give up.
1943 	 */
1944 	if (!agf->agf_flcount) {
1945 		*bnop = NULLAGBLOCK;
1946 		return 0;
1947 	}
1948 	/*
1949 	 * Read the array of free blocks.
1950 	 */
1951 	mp = tp->t_mountp;
1952 	if ((error = xfs_alloc_read_agfl(mp, tp,
1953 			be32_to_cpu(agf->agf_seqno), &agflbp)))
1954 		return error;
1955 	agfl = XFS_BUF_TO_AGFL(agflbp);
1956 	/*
1957 	 * Get the block number and update the data structures.
1958 	 */
1959 	bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
1960 	be32_add_cpu(&agf->agf_flfirst, 1);
1961 	xfs_trans_brelse(tp, agflbp);
1962 	if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1963 		agf->agf_flfirst = 0;
1964 
1965 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1966 	be32_add_cpu(&agf->agf_flcount, -1);
1967 	xfs_trans_agflist_delta(tp, -1);
1968 	pag->pagf_flcount--;
1969 	xfs_perag_put(pag);
1970 
1971 	logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
1972 	if (btreeblk) {
1973 		be32_add_cpu(&agf->agf_btreeblks, 1);
1974 		pag->pagf_btreeblks++;
1975 		logflags |= XFS_AGF_BTREEBLKS;
1976 	}
1977 
1978 	xfs_alloc_log_agf(tp, agbp, logflags);
1979 	*bnop = bno;
1980 
1981 	return 0;
1982 }
1983 
1984 /*
1985  * Log the given fields from the agf structure.
1986  */
1987 void
xfs_alloc_log_agf(xfs_trans_t * tp,xfs_buf_t * bp,int fields)1988 xfs_alloc_log_agf(
1989 	xfs_trans_t	*tp,	/* transaction pointer */
1990 	xfs_buf_t	*bp,	/* buffer for a.g. freelist header */
1991 	int		fields)	/* mask of fields to be logged (XFS_AGF_...) */
1992 {
1993 	int	first;		/* first byte offset */
1994 	int	last;		/* last byte offset */
1995 	static const short	offsets[] = {
1996 		offsetof(xfs_agf_t, agf_magicnum),
1997 		offsetof(xfs_agf_t, agf_versionnum),
1998 		offsetof(xfs_agf_t, agf_seqno),
1999 		offsetof(xfs_agf_t, agf_length),
2000 		offsetof(xfs_agf_t, agf_roots[0]),
2001 		offsetof(xfs_agf_t, agf_levels[0]),
2002 		offsetof(xfs_agf_t, agf_flfirst),
2003 		offsetof(xfs_agf_t, agf_fllast),
2004 		offsetof(xfs_agf_t, agf_flcount),
2005 		offsetof(xfs_agf_t, agf_freeblks),
2006 		offsetof(xfs_agf_t, agf_longest),
2007 		offsetof(xfs_agf_t, agf_btreeblks),
2008 		sizeof(xfs_agf_t)
2009 	};
2010 
2011 	trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2012 
2013 	xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2014 	xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2015 }
2016 
2017 /*
2018  * Interface for inode allocation to force the pag data to be initialized.
2019  */
2020 int					/* error */
xfs_alloc_pagf_init(xfs_mount_t * mp,xfs_trans_t * tp,xfs_agnumber_t agno,int flags)2021 xfs_alloc_pagf_init(
2022 	xfs_mount_t		*mp,	/* file system mount structure */
2023 	xfs_trans_t		*tp,	/* transaction pointer */
2024 	xfs_agnumber_t		agno,	/* allocation group number */
2025 	int			flags)	/* XFS_ALLOC_FLAGS_... */
2026 {
2027 	xfs_buf_t		*bp;
2028 	int			error;
2029 
2030 	if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2031 		return error;
2032 	if (bp)
2033 		xfs_trans_brelse(tp, bp);
2034 	return 0;
2035 }
2036 
2037 /*
2038  * Put the block on the freelist for the allocation group.
2039  */
2040 int					/* error */
xfs_alloc_put_freelist(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_buf_t * agflbp,xfs_agblock_t bno,int btreeblk)2041 xfs_alloc_put_freelist(
2042 	xfs_trans_t		*tp,	/* transaction pointer */
2043 	xfs_buf_t		*agbp,	/* buffer for a.g. freelist header */
2044 	xfs_buf_t		*agflbp,/* buffer for a.g. free block array */
2045 	xfs_agblock_t		bno,	/* block being freed */
2046 	int			btreeblk) /* block came from a AGF btree */
2047 {
2048 	xfs_agf_t		*agf;	/* a.g. freespace structure */
2049 	xfs_agfl_t		*agfl;	/* a.g. free block array */
2050 	__be32			*blockp;/* pointer to array entry */
2051 	int			error;
2052 	int			logflags;
2053 	xfs_mount_t		*mp;	/* mount structure */
2054 	xfs_perag_t		*pag;	/* per allocation group data */
2055 
2056 	agf = XFS_BUF_TO_AGF(agbp);
2057 	mp = tp->t_mountp;
2058 
2059 	if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2060 			be32_to_cpu(agf->agf_seqno), &agflbp)))
2061 		return error;
2062 	agfl = XFS_BUF_TO_AGFL(agflbp);
2063 	be32_add_cpu(&agf->agf_fllast, 1);
2064 	if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
2065 		agf->agf_fllast = 0;
2066 
2067 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2068 	be32_add_cpu(&agf->agf_flcount, 1);
2069 	xfs_trans_agflist_delta(tp, 1);
2070 	pag->pagf_flcount++;
2071 
2072 	logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2073 	if (btreeblk) {
2074 		be32_add_cpu(&agf->agf_btreeblks, -1);
2075 		pag->pagf_btreeblks--;
2076 		logflags |= XFS_AGF_BTREEBLKS;
2077 	}
2078 	xfs_perag_put(pag);
2079 
2080 	xfs_alloc_log_agf(tp, agbp, logflags);
2081 
2082 	ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2083 	blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2084 	*blockp = cpu_to_be32(bno);
2085 	xfs_alloc_log_agf(tp, agbp, logflags);
2086 	xfs_trans_log_buf(tp, agflbp,
2087 		(int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2088 		(int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2089 			sizeof(xfs_agblock_t) - 1));
2090 	return 0;
2091 }
2092 
2093 /*
2094  * Read in the allocation group header (free/alloc section).
2095  */
2096 int					/* error */
xfs_read_agf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,int flags,struct xfs_buf ** bpp)2097 xfs_read_agf(
2098 	struct xfs_mount	*mp,	/* mount point structure */
2099 	struct xfs_trans	*tp,	/* transaction pointer */
2100 	xfs_agnumber_t		agno,	/* allocation group number */
2101 	int			flags,	/* XFS_BUF_ */
2102 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2103 {
2104 	struct xfs_agf	*agf;		/* ag freelist header */
2105 	int		agf_ok;		/* set if agf is consistent */
2106 	int		error;
2107 
2108 	ASSERT(agno != NULLAGNUMBER);
2109 	error = xfs_trans_read_buf(
2110 			mp, tp, mp->m_ddev_targp,
2111 			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2112 			XFS_FSS_TO_BB(mp, 1), flags, bpp);
2113 	if (error)
2114 		return error;
2115 	if (!*bpp)
2116 		return 0;
2117 
2118 	ASSERT(!(*bpp)->b_error);
2119 	agf = XFS_BUF_TO_AGF(*bpp);
2120 
2121 	/*
2122 	 * Validate the magic number of the agf block.
2123 	 */
2124 	agf_ok =
2125 		agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2126 		XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2127 		be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2128 		be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2129 		be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2130 		be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) &&
2131 		be32_to_cpu(agf->agf_seqno) == agno;
2132 	if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2133 		agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
2134 						be32_to_cpu(agf->agf_length);
2135 	if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2136 			XFS_RANDOM_ALLOC_READ_AGF))) {
2137 		XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2138 				     XFS_ERRLEVEL_LOW, mp, agf);
2139 		xfs_trans_brelse(tp, *bpp);
2140 		return XFS_ERROR(EFSCORRUPTED);
2141 	}
2142 	xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2143 	return 0;
2144 }
2145 
2146 /*
2147  * Read in the allocation group header (free/alloc section).
2148  */
2149 int					/* error */
xfs_alloc_read_agf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,int flags,struct xfs_buf ** bpp)2150 xfs_alloc_read_agf(
2151 	struct xfs_mount	*mp,	/* mount point structure */
2152 	struct xfs_trans	*tp,	/* transaction pointer */
2153 	xfs_agnumber_t		agno,	/* allocation group number */
2154 	int			flags,	/* XFS_ALLOC_FLAG_... */
2155 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2156 {
2157 	struct xfs_agf		*agf;		/* ag freelist header */
2158 	struct xfs_perag	*pag;		/* per allocation group data */
2159 	int			error;
2160 
2161 	ASSERT(agno != NULLAGNUMBER);
2162 
2163 	error = xfs_read_agf(mp, tp, agno,
2164 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2165 			bpp);
2166 	if (error)
2167 		return error;
2168 	if (!*bpp)
2169 		return 0;
2170 	ASSERT(!(*bpp)->b_error);
2171 
2172 	agf = XFS_BUF_TO_AGF(*bpp);
2173 	pag = xfs_perag_get(mp, agno);
2174 	if (!pag->pagf_init) {
2175 		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2176 		pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2177 		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2178 		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2179 		pag->pagf_levels[XFS_BTNUM_BNOi] =
2180 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2181 		pag->pagf_levels[XFS_BTNUM_CNTi] =
2182 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2183 		spin_lock_init(&pag->pagb_lock);
2184 		pag->pagb_count = 0;
2185 		pag->pagb_tree = RB_ROOT;
2186 		pag->pagf_init = 1;
2187 	}
2188 #ifdef DEBUG
2189 	else if (!XFS_FORCED_SHUTDOWN(mp)) {
2190 		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2191 		ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2192 		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2193 		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2194 		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2195 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2196 		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2197 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2198 	}
2199 #endif
2200 	xfs_perag_put(pag);
2201 	return 0;
2202 }
2203 
2204 /*
2205  * Allocate an extent (variable-size).
2206  * Depending on the allocation type, we either look in a single allocation
2207  * group or loop over the allocation groups to find the result.
2208  */
2209 int				/* error */
xfs_alloc_vextent(xfs_alloc_arg_t * args)2210 xfs_alloc_vextent(
2211 	xfs_alloc_arg_t	*args)	/* allocation argument structure */
2212 {
2213 	xfs_agblock_t	agsize;	/* allocation group size */
2214 	int		error;
2215 	int		flags;	/* XFS_ALLOC_FLAG_... locking flags */
2216 	xfs_extlen_t	minleft;/* minimum left value, temp copy */
2217 	xfs_mount_t	*mp;	/* mount structure pointer */
2218 	xfs_agnumber_t	sagno;	/* starting allocation group number */
2219 	xfs_alloctype_t	type;	/* input allocation type */
2220 	int		bump_rotor = 0;
2221 	int		no_min = 0;
2222 	xfs_agnumber_t	rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2223 
2224 	mp = args->mp;
2225 	type = args->otype = args->type;
2226 	args->agbno = NULLAGBLOCK;
2227 	/*
2228 	 * Just fix this up, for the case where the last a.g. is shorter
2229 	 * (or there's only one a.g.) and the caller couldn't easily figure
2230 	 * that out (xfs_bmap_alloc).
2231 	 */
2232 	agsize = mp->m_sb.sb_agblocks;
2233 	if (args->maxlen > agsize)
2234 		args->maxlen = agsize;
2235 	if (args->alignment == 0)
2236 		args->alignment = 1;
2237 	ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2238 	ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2239 	ASSERT(args->minlen <= args->maxlen);
2240 	ASSERT(args->minlen <= agsize);
2241 	ASSERT(args->mod < args->prod);
2242 	if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2243 	    XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2244 	    args->minlen > args->maxlen || args->minlen > agsize ||
2245 	    args->mod >= args->prod) {
2246 		args->fsbno = NULLFSBLOCK;
2247 		trace_xfs_alloc_vextent_badargs(args);
2248 		return 0;
2249 	}
2250 	minleft = args->minleft;
2251 
2252 	switch (type) {
2253 	case XFS_ALLOCTYPE_THIS_AG:
2254 	case XFS_ALLOCTYPE_NEAR_BNO:
2255 	case XFS_ALLOCTYPE_THIS_BNO:
2256 		/*
2257 		 * These three force us into a single a.g.
2258 		 */
2259 		args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2260 		args->pag = xfs_perag_get(mp, args->agno);
2261 		args->minleft = 0;
2262 		error = xfs_alloc_fix_freelist(args, 0);
2263 		args->minleft = minleft;
2264 		if (error) {
2265 			trace_xfs_alloc_vextent_nofix(args);
2266 			goto error0;
2267 		}
2268 		if (!args->agbp) {
2269 			trace_xfs_alloc_vextent_noagbp(args);
2270 			break;
2271 		}
2272 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2273 		if ((error = xfs_alloc_ag_vextent(args)))
2274 			goto error0;
2275 		break;
2276 	case XFS_ALLOCTYPE_START_BNO:
2277 		/*
2278 		 * Try near allocation first, then anywhere-in-ag after
2279 		 * the first a.g. fails.
2280 		 */
2281 		if ((args->userdata  == XFS_ALLOC_INITIAL_USER_DATA) &&
2282 		    (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2283 			args->fsbno = XFS_AGB_TO_FSB(mp,
2284 					((mp->m_agfrotor / rotorstep) %
2285 					mp->m_sb.sb_agcount), 0);
2286 			bump_rotor = 1;
2287 		}
2288 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2289 		args->type = XFS_ALLOCTYPE_NEAR_BNO;
2290 		/* FALLTHROUGH */
2291 	case XFS_ALLOCTYPE_ANY_AG:
2292 	case XFS_ALLOCTYPE_START_AG:
2293 	case XFS_ALLOCTYPE_FIRST_AG:
2294 		/*
2295 		 * Rotate through the allocation groups looking for a winner.
2296 		 */
2297 		if (type == XFS_ALLOCTYPE_ANY_AG) {
2298 			/*
2299 			 * Start with the last place we left off.
2300 			 */
2301 			args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2302 					mp->m_sb.sb_agcount;
2303 			args->type = XFS_ALLOCTYPE_THIS_AG;
2304 			flags = XFS_ALLOC_FLAG_TRYLOCK;
2305 		} else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2306 			/*
2307 			 * Start with allocation group given by bno.
2308 			 */
2309 			args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2310 			args->type = XFS_ALLOCTYPE_THIS_AG;
2311 			sagno = 0;
2312 			flags = 0;
2313 		} else {
2314 			if (type == XFS_ALLOCTYPE_START_AG)
2315 				args->type = XFS_ALLOCTYPE_THIS_AG;
2316 			/*
2317 			 * Start with the given allocation group.
2318 			 */
2319 			args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2320 			flags = XFS_ALLOC_FLAG_TRYLOCK;
2321 		}
2322 		/*
2323 		 * Loop over allocation groups twice; first time with
2324 		 * trylock set, second time without.
2325 		 */
2326 		for (;;) {
2327 			args->pag = xfs_perag_get(mp, args->agno);
2328 			if (no_min) args->minleft = 0;
2329 			error = xfs_alloc_fix_freelist(args, flags);
2330 			args->minleft = minleft;
2331 			if (error) {
2332 				trace_xfs_alloc_vextent_nofix(args);
2333 				goto error0;
2334 			}
2335 			/*
2336 			 * If we get a buffer back then the allocation will fly.
2337 			 */
2338 			if (args->agbp) {
2339 				if ((error = xfs_alloc_ag_vextent(args)))
2340 					goto error0;
2341 				break;
2342 			}
2343 
2344 			trace_xfs_alloc_vextent_loopfailed(args);
2345 
2346 			/*
2347 			 * Didn't work, figure out the next iteration.
2348 			 */
2349 			if (args->agno == sagno &&
2350 			    type == XFS_ALLOCTYPE_START_BNO)
2351 				args->type = XFS_ALLOCTYPE_THIS_AG;
2352 			/*
2353 			* For the first allocation, we can try any AG to get
2354 			* space.  However, if we already have allocated a
2355 			* block, we don't want to try AGs whose number is below
2356 			* sagno. Otherwise, we may end up with out-of-order
2357 			* locking of AGF, which might cause deadlock.
2358 			*/
2359 			if (++(args->agno) == mp->m_sb.sb_agcount) {
2360 				if (args->firstblock != NULLFSBLOCK)
2361 					args->agno = sagno;
2362 				else
2363 					args->agno = 0;
2364 			}
2365 			/*
2366 			 * Reached the starting a.g., must either be done
2367 			 * or switch to non-trylock mode.
2368 			 */
2369 			if (args->agno == sagno) {
2370 				if (no_min == 1) {
2371 					args->agbno = NULLAGBLOCK;
2372 					trace_xfs_alloc_vextent_allfailed(args);
2373 					break;
2374 				}
2375 				if (flags == 0) {
2376 					no_min = 1;
2377 				} else {
2378 					flags = 0;
2379 					if (type == XFS_ALLOCTYPE_START_BNO) {
2380 						args->agbno = XFS_FSB_TO_AGBNO(mp,
2381 							args->fsbno);
2382 						args->type = XFS_ALLOCTYPE_NEAR_BNO;
2383 					}
2384 				}
2385 			}
2386 			xfs_perag_put(args->pag);
2387 		}
2388 		if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2389 			if (args->agno == sagno)
2390 				mp->m_agfrotor = (mp->m_agfrotor + 1) %
2391 					(mp->m_sb.sb_agcount * rotorstep);
2392 			else
2393 				mp->m_agfrotor = (args->agno * rotorstep + 1) %
2394 					(mp->m_sb.sb_agcount * rotorstep);
2395 		}
2396 		break;
2397 	default:
2398 		ASSERT(0);
2399 		/* NOTREACHED */
2400 	}
2401 	if (args->agbno == NULLAGBLOCK)
2402 		args->fsbno = NULLFSBLOCK;
2403 	else {
2404 		args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2405 #ifdef DEBUG
2406 		ASSERT(args->len >= args->minlen);
2407 		ASSERT(args->len <= args->maxlen);
2408 		ASSERT(args->agbno % args->alignment == 0);
2409 		XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2410 			args->len);
2411 #endif
2412 	}
2413 	xfs_perag_put(args->pag);
2414 	return 0;
2415 error0:
2416 	xfs_perag_put(args->pag);
2417 	return error;
2418 }
2419 
2420 /*
2421  * Free an extent.
2422  * Just break up the extent address and hand off to xfs_free_ag_extent
2423  * after fixing up the freelist.
2424  */
2425 int				/* error */
xfs_free_extent(xfs_trans_t * tp,xfs_fsblock_t bno,xfs_extlen_t len)2426 xfs_free_extent(
2427 	xfs_trans_t	*tp,	/* transaction pointer */
2428 	xfs_fsblock_t	bno,	/* starting block number of extent */
2429 	xfs_extlen_t	len)	/* length of extent */
2430 {
2431 	xfs_alloc_arg_t	args;
2432 	int		error;
2433 
2434 	ASSERT(len != 0);
2435 	memset(&args, 0, sizeof(xfs_alloc_arg_t));
2436 	args.tp = tp;
2437 	args.mp = tp->t_mountp;
2438 
2439 	/*
2440 	 * validate that the block number is legal - the enables us to detect
2441 	 * and handle a silent filesystem corruption rather than crashing.
2442 	 */
2443 	args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2444 	if (args.agno >= args.mp->m_sb.sb_agcount)
2445 		return EFSCORRUPTED;
2446 
2447 	args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2448 	if (args.agbno >= args.mp->m_sb.sb_agblocks)
2449 		return EFSCORRUPTED;
2450 
2451 	args.pag = xfs_perag_get(args.mp, args.agno);
2452 	ASSERT(args.pag);
2453 
2454 	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2455 	if (error)
2456 		goto error0;
2457 
2458 	/* validate the extent size is legal now we have the agf locked */
2459 	if (args.agbno + len >
2460 			be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2461 		error = EFSCORRUPTED;
2462 		goto error0;
2463 	}
2464 
2465 	error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2466 	if (!error)
2467 		xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0);
2468 error0:
2469 	xfs_perag_put(args.pag);
2470 	return error;
2471 }
2472 
2473 void
xfs_alloc_busy_insert(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,unsigned int flags)2474 xfs_alloc_busy_insert(
2475 	struct xfs_trans	*tp,
2476 	xfs_agnumber_t		agno,
2477 	xfs_agblock_t		bno,
2478 	xfs_extlen_t		len,
2479 	unsigned int		flags)
2480 {
2481 	struct xfs_busy_extent	*new;
2482 	struct xfs_busy_extent	*busyp;
2483 	struct xfs_perag	*pag;
2484 	struct rb_node		**rbp;
2485 	struct rb_node		*parent = NULL;
2486 
2487 	new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
2488 	if (!new) {
2489 		/*
2490 		 * No Memory!  Since it is now not possible to track the free
2491 		 * block, make this a synchronous transaction to insure that
2492 		 * the block is not reused before this transaction commits.
2493 		 */
2494 		trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len);
2495 		xfs_trans_set_sync(tp);
2496 		return;
2497 	}
2498 
2499 	new->agno = agno;
2500 	new->bno = bno;
2501 	new->length = len;
2502 	INIT_LIST_HEAD(&new->list);
2503 	new->flags = flags;
2504 
2505 	/* trace before insert to be able to see failed inserts */
2506 	trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
2507 
2508 	pag = xfs_perag_get(tp->t_mountp, new->agno);
2509 	spin_lock(&pag->pagb_lock);
2510 	rbp = &pag->pagb_tree.rb_node;
2511 	while (*rbp) {
2512 		parent = *rbp;
2513 		busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
2514 
2515 		if (new->bno < busyp->bno) {
2516 			rbp = &(*rbp)->rb_left;
2517 			ASSERT(new->bno + new->length <= busyp->bno);
2518 		} else if (new->bno > busyp->bno) {
2519 			rbp = &(*rbp)->rb_right;
2520 			ASSERT(bno >= busyp->bno + busyp->length);
2521 		} else {
2522 			ASSERT(0);
2523 		}
2524 	}
2525 
2526 	rb_link_node(&new->rb_node, parent, rbp);
2527 	rb_insert_color(&new->rb_node, &pag->pagb_tree);
2528 
2529 	list_add(&new->list, &tp->t_busy);
2530 	spin_unlock(&pag->pagb_lock);
2531 	xfs_perag_put(pag);
2532 }
2533 
2534 /*
2535  * Search for a busy extent within the range of the extent we are about to
2536  * allocate.  You need to be holding the busy extent tree lock when calling
2537  * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
2538  * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
2539  * match. This is done so that a non-zero return indicates an overlap that
2540  * will require a synchronous transaction, but it can still be
2541  * used to distinguish between a partial or exact match.
2542  */
2543 int
xfs_alloc_busy_search(struct xfs_mount * mp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len)2544 xfs_alloc_busy_search(
2545 	struct xfs_mount	*mp,
2546 	xfs_agnumber_t		agno,
2547 	xfs_agblock_t		bno,
2548 	xfs_extlen_t		len)
2549 {
2550 	struct xfs_perag	*pag;
2551 	struct rb_node		*rbp;
2552 	struct xfs_busy_extent	*busyp;
2553 	int			match = 0;
2554 
2555 	pag = xfs_perag_get(mp, agno);
2556 	spin_lock(&pag->pagb_lock);
2557 
2558 	rbp = pag->pagb_tree.rb_node;
2559 
2560 	/* find closest start bno overlap */
2561 	while (rbp) {
2562 		busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
2563 		if (bno < busyp->bno) {
2564 			/* may overlap, but exact start block is lower */
2565 			if (bno + len > busyp->bno)
2566 				match = -1;
2567 			rbp = rbp->rb_left;
2568 		} else if (bno > busyp->bno) {
2569 			/* may overlap, but exact start block is higher */
2570 			if (bno < busyp->bno + busyp->length)
2571 				match = -1;
2572 			rbp = rbp->rb_right;
2573 		} else {
2574 			/* bno matches busyp, length determines exact match */
2575 			match = (busyp->length == len) ? 1 : -1;
2576 			break;
2577 		}
2578 	}
2579 	spin_unlock(&pag->pagb_lock);
2580 	xfs_perag_put(pag);
2581 	return match;
2582 }
2583 
2584 /*
2585  * The found free extent [fbno, fend] overlaps part or all of the given busy
2586  * extent.  If the overlap covers the beginning, the end, or all of the busy
2587  * extent, the overlapping portion can be made unbusy and used for the
2588  * allocation.  We can't split a busy extent because we can't modify a
2589  * transaction/CIL context busy list, but we can update an entries block
2590  * number or length.
2591  *
2592  * Returns true if the extent can safely be reused, or false if the search
2593  * needs to be restarted.
2594  */
2595 STATIC bool
xfs_alloc_busy_update_extent(struct xfs_mount * mp,struct xfs_perag * pag,struct xfs_busy_extent * busyp,xfs_agblock_t fbno,xfs_extlen_t flen,bool userdata)2596 xfs_alloc_busy_update_extent(
2597 	struct xfs_mount	*mp,
2598 	struct xfs_perag	*pag,
2599 	struct xfs_busy_extent	*busyp,
2600 	xfs_agblock_t		fbno,
2601 	xfs_extlen_t		flen,
2602 	bool			userdata)
2603 {
2604 	xfs_agblock_t		fend = fbno + flen;
2605 	xfs_agblock_t		bbno = busyp->bno;
2606 	xfs_agblock_t		bend = bbno + busyp->length;
2607 
2608 	/*
2609 	 * This extent is currently being discarded.  Give the thread
2610 	 * performing the discard a chance to mark the extent unbusy
2611 	 * and retry.
2612 	 */
2613 	if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
2614 		spin_unlock(&pag->pagb_lock);
2615 		delay(1);
2616 		spin_lock(&pag->pagb_lock);
2617 		return false;
2618 	}
2619 
2620 	/*
2621 	 * If there is a busy extent overlapping a user allocation, we have
2622 	 * no choice but to force the log and retry the search.
2623 	 *
2624 	 * Fortunately this does not happen during normal operation, but
2625 	 * only if the filesystem is very low on space and has to dip into
2626 	 * the AGFL for normal allocations.
2627 	 */
2628 	if (userdata)
2629 		goto out_force_log;
2630 
2631 	if (bbno < fbno && bend > fend) {
2632 		/*
2633 		 * Case 1:
2634 		 *    bbno           bend
2635 		 *    +BBBBBBBBBBBBBBBBB+
2636 		 *        +---------+
2637 		 *        fbno   fend
2638 		 */
2639 
2640 		/*
2641 		 * We would have to split the busy extent to be able to track
2642 		 * it correct, which we cannot do because we would have to
2643 		 * modify the list of busy extents attached to the transaction
2644 		 * or CIL context, which is immutable.
2645 		 *
2646 		 * Force out the log to clear the busy extent and retry the
2647 		 * search.
2648 		 */
2649 		goto out_force_log;
2650 	} else if (bbno >= fbno && bend <= fend) {
2651 		/*
2652 		 * Case 2:
2653 		 *    bbno           bend
2654 		 *    +BBBBBBBBBBBBBBBBB+
2655 		 *    +-----------------+
2656 		 *    fbno           fend
2657 		 *
2658 		 * Case 3:
2659 		 *    bbno           bend
2660 		 *    +BBBBBBBBBBBBBBBBB+
2661 		 *    +--------------------------+
2662 		 *    fbno                    fend
2663 		 *
2664 		 * Case 4:
2665 		 *             bbno           bend
2666 		 *             +BBBBBBBBBBBBBBBBB+
2667 		 *    +--------------------------+
2668 		 *    fbno                    fend
2669 		 *
2670 		 * Case 5:
2671 		 *             bbno           bend
2672 		 *             +BBBBBBBBBBBBBBBBB+
2673 		 *    +-----------------------------------+
2674 		 *    fbno                             fend
2675 		 *
2676 		 */
2677 
2678 		/*
2679 		 * The busy extent is fully covered by the extent we are
2680 		 * allocating, and can simply be removed from the rbtree.
2681 		 * However we cannot remove it from the immutable list
2682 		 * tracking busy extents in the transaction or CIL context,
2683 		 * so set the length to zero to mark it invalid.
2684 		 *
2685 		 * We also need to restart the busy extent search from the
2686 		 * tree root, because erasing the node can rearrange the
2687 		 * tree topology.
2688 		 */
2689 		rb_erase(&busyp->rb_node, &pag->pagb_tree);
2690 		busyp->length = 0;
2691 		return false;
2692 	} else if (fend < bend) {
2693 		/*
2694 		 * Case 6:
2695 		 *              bbno           bend
2696 		 *             +BBBBBBBBBBBBBBBBB+
2697 		 *             +---------+
2698 		 *             fbno   fend
2699 		 *
2700 		 * Case 7:
2701 		 *             bbno           bend
2702 		 *             +BBBBBBBBBBBBBBBBB+
2703 		 *    +------------------+
2704 		 *    fbno            fend
2705 		 *
2706 		 */
2707 		busyp->bno = fend;
2708 	} else if (bbno < fbno) {
2709 		/*
2710 		 * Case 8:
2711 		 *    bbno           bend
2712 		 *    +BBBBBBBBBBBBBBBBB+
2713 		 *        +-------------+
2714 		 *        fbno       fend
2715 		 *
2716 		 * Case 9:
2717 		 *    bbno           bend
2718 		 *    +BBBBBBBBBBBBBBBBB+
2719 		 *        +----------------------+
2720 		 *        fbno                fend
2721 		 */
2722 		busyp->length = fbno - busyp->bno;
2723 	} else {
2724 		ASSERT(0);
2725 	}
2726 
2727 	trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen);
2728 	return true;
2729 
2730 out_force_log:
2731 	spin_unlock(&pag->pagb_lock);
2732 	xfs_log_force(mp, XFS_LOG_SYNC);
2733 	trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen);
2734 	spin_lock(&pag->pagb_lock);
2735 	return false;
2736 }
2737 
2738 
2739 /*
2740  * For a given extent [fbno, flen], make sure we can reuse it safely.
2741  */
2742 void
xfs_alloc_busy_reuse(struct xfs_mount * mp,xfs_agnumber_t agno,xfs_agblock_t fbno,xfs_extlen_t flen,bool userdata)2743 xfs_alloc_busy_reuse(
2744 	struct xfs_mount	*mp,
2745 	xfs_agnumber_t		agno,
2746 	xfs_agblock_t		fbno,
2747 	xfs_extlen_t		flen,
2748 	bool			userdata)
2749 {
2750 	struct xfs_perag	*pag;
2751 	struct rb_node		*rbp;
2752 
2753 	ASSERT(flen > 0);
2754 
2755 	pag = xfs_perag_get(mp, agno);
2756 	spin_lock(&pag->pagb_lock);
2757 restart:
2758 	rbp = pag->pagb_tree.rb_node;
2759 	while (rbp) {
2760 		struct xfs_busy_extent *busyp =
2761 			rb_entry(rbp, struct xfs_busy_extent, rb_node);
2762 		xfs_agblock_t	bbno = busyp->bno;
2763 		xfs_agblock_t	bend = bbno + busyp->length;
2764 
2765 		if (fbno + flen <= bbno) {
2766 			rbp = rbp->rb_left;
2767 			continue;
2768 		} else if (fbno >= bend) {
2769 			rbp = rbp->rb_right;
2770 			continue;
2771 		}
2772 
2773 		if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen,
2774 						  userdata))
2775 			goto restart;
2776 	}
2777 	spin_unlock(&pag->pagb_lock);
2778 	xfs_perag_put(pag);
2779 }
2780 
2781 /*
2782  * For a given extent [fbno, flen], search the busy extent list to find a
2783  * subset of the extent that is not busy.  If *rlen is smaller than
2784  * args->minlen no suitable extent could be found, and the higher level
2785  * code needs to force out the log and retry the allocation.
2786  */
2787 STATIC void
xfs_alloc_busy_trim(struct xfs_alloc_arg * args,xfs_agblock_t bno,xfs_extlen_t len,xfs_agblock_t * rbno,xfs_extlen_t * rlen)2788 xfs_alloc_busy_trim(
2789 	struct xfs_alloc_arg	*args,
2790 	xfs_agblock_t		bno,
2791 	xfs_extlen_t		len,
2792 	xfs_agblock_t		*rbno,
2793 	xfs_extlen_t		*rlen)
2794 {
2795 	xfs_agblock_t		fbno;
2796 	xfs_extlen_t		flen;
2797 	struct rb_node		*rbp;
2798 
2799 	ASSERT(len > 0);
2800 
2801 	spin_lock(&args->pag->pagb_lock);
2802 restart:
2803 	fbno = bno;
2804 	flen = len;
2805 	rbp = args->pag->pagb_tree.rb_node;
2806 	while (rbp && flen >= args->minlen) {
2807 		struct xfs_busy_extent *busyp =
2808 			rb_entry(rbp, struct xfs_busy_extent, rb_node);
2809 		xfs_agblock_t	fend = fbno + flen;
2810 		xfs_agblock_t	bbno = busyp->bno;
2811 		xfs_agblock_t	bend = bbno + busyp->length;
2812 
2813 		if (fend <= bbno) {
2814 			rbp = rbp->rb_left;
2815 			continue;
2816 		} else if (fbno >= bend) {
2817 			rbp = rbp->rb_right;
2818 			continue;
2819 		}
2820 
2821 		/*
2822 		 * If this is a metadata allocation, try to reuse the busy
2823 		 * extent instead of trimming the allocation.
2824 		 */
2825 		if (!args->userdata &&
2826 		    !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
2827 			if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
2828 							  busyp, fbno, flen,
2829 							  false))
2830 				goto restart;
2831 			continue;
2832 		}
2833 
2834 		if (bbno <= fbno) {
2835 			/* start overlap */
2836 
2837 			/*
2838 			 * Case 1:
2839 			 *    bbno           bend
2840 			 *    +BBBBBBBBBBBBBBBBB+
2841 			 *        +---------+
2842 			 *        fbno   fend
2843 			 *
2844 			 * Case 2:
2845 			 *    bbno           bend
2846 			 *    +BBBBBBBBBBBBBBBBB+
2847 			 *    +-------------+
2848 			 *    fbno       fend
2849 			 *
2850 			 * Case 3:
2851 			 *    bbno           bend
2852 			 *    +BBBBBBBBBBBBBBBBB+
2853 			 *        +-------------+
2854 			 *        fbno       fend
2855 			 *
2856 			 * Case 4:
2857 			 *    bbno           bend
2858 			 *    +BBBBBBBBBBBBBBBBB+
2859 			 *    +-----------------+
2860 			 *    fbno           fend
2861 			 *
2862 			 * No unbusy region in extent, return failure.
2863 			 */
2864 			if (fend <= bend)
2865 				goto fail;
2866 
2867 			/*
2868 			 * Case 5:
2869 			 *    bbno           bend
2870 			 *    +BBBBBBBBBBBBBBBBB+
2871 			 *        +----------------------+
2872 			 *        fbno                fend
2873 			 *
2874 			 * Case 6:
2875 			 *    bbno           bend
2876 			 *    +BBBBBBBBBBBBBBBBB+
2877 			 *    +--------------------------+
2878 			 *    fbno                    fend
2879 			 *
2880 			 * Needs to be trimmed to:
2881 			 *                       +-------+
2882 			 *                       fbno fend
2883 			 */
2884 			fbno = bend;
2885 		} else if (bend >= fend) {
2886 			/* end overlap */
2887 
2888 			/*
2889 			 * Case 7:
2890 			 *             bbno           bend
2891 			 *             +BBBBBBBBBBBBBBBBB+
2892 			 *    +------------------+
2893 			 *    fbno            fend
2894 			 *
2895 			 * Case 8:
2896 			 *             bbno           bend
2897 			 *             +BBBBBBBBBBBBBBBBB+
2898 			 *    +--------------------------+
2899 			 *    fbno                    fend
2900 			 *
2901 			 * Needs to be trimmed to:
2902 			 *    +-------+
2903 			 *    fbno fend
2904 			 */
2905 			fend = bbno;
2906 		} else {
2907 			/* middle overlap */
2908 
2909 			/*
2910 			 * Case 9:
2911 			 *             bbno           bend
2912 			 *             +BBBBBBBBBBBBBBBBB+
2913 			 *    +-----------------------------------+
2914 			 *    fbno                             fend
2915 			 *
2916 			 * Can be trimmed to:
2917 			 *    +-------+        OR         +-------+
2918 			 *    fbno fend                   fbno fend
2919 			 *
2920 			 * Backward allocation leads to significant
2921 			 * fragmentation of directories, which degrades
2922 			 * directory performance, therefore we always want to
2923 			 * choose the option that produces forward allocation
2924 			 * patterns.
2925 			 * Preferring the lower bno extent will make the next
2926 			 * request use "fend" as the start of the next
2927 			 * allocation;  if the segment is no longer busy at
2928 			 * that point, we'll get a contiguous allocation, but
2929 			 * even if it is still busy, we will get a forward
2930 			 * allocation.
2931 			 * We try to avoid choosing the segment at "bend",
2932 			 * because that can lead to the next allocation
2933 			 * taking the segment at "fbno", which would be a
2934 			 * backward allocation.  We only use the segment at
2935 			 * "fbno" if it is much larger than the current
2936 			 * requested size, because in that case there's a
2937 			 * good chance subsequent allocations will be
2938 			 * contiguous.
2939 			 */
2940 			if (bbno - fbno >= args->maxlen) {
2941 				/* left candidate fits perfect */
2942 				fend = bbno;
2943 			} else if (fend - bend >= args->maxlen * 4) {
2944 				/* right candidate has enough free space */
2945 				fbno = bend;
2946 			} else if (bbno - fbno >= args->minlen) {
2947 				/* left candidate fits minimum requirement */
2948 				fend = bbno;
2949 			} else {
2950 				goto fail;
2951 			}
2952 		}
2953 
2954 		flen = fend - fbno;
2955 	}
2956 	spin_unlock(&args->pag->pagb_lock);
2957 
2958 	if (fbno != bno || flen != len) {
2959 		trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len,
2960 					  fbno, flen);
2961 	}
2962 	*rbno = fbno;
2963 	*rlen = flen;
2964 	return;
2965 fail:
2966 	/*
2967 	 * Return a zero extent length as failure indications.  All callers
2968 	 * re-check if the trimmed extent satisfies the minlen requirement.
2969 	 */
2970 	spin_unlock(&args->pag->pagb_lock);
2971 	trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
2972 	*rbno = fbno;
2973 	*rlen = 0;
2974 }
2975 
2976 static void
xfs_alloc_busy_clear_one(struct xfs_mount * mp,struct xfs_perag * pag,struct xfs_busy_extent * busyp)2977 xfs_alloc_busy_clear_one(
2978 	struct xfs_mount	*mp,
2979 	struct xfs_perag	*pag,
2980 	struct xfs_busy_extent	*busyp)
2981 {
2982 	if (busyp->length) {
2983 		trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
2984 						busyp->length);
2985 		rb_erase(&busyp->rb_node, &pag->pagb_tree);
2986 	}
2987 
2988 	list_del_init(&busyp->list);
2989 	kmem_free(busyp);
2990 }
2991 
2992 /*
2993  * Remove all extents on the passed in list from the busy extents tree.
2994  * If do_discard is set skip extents that need to be discarded, and mark
2995  * these as undergoing a discard operation instead.
2996  */
2997 void
xfs_alloc_busy_clear(struct xfs_mount * mp,struct list_head * list,bool do_discard)2998 xfs_alloc_busy_clear(
2999 	struct xfs_mount	*mp,
3000 	struct list_head	*list,
3001 	bool			do_discard)
3002 {
3003 	struct xfs_busy_extent	*busyp, *n;
3004 	struct xfs_perag	*pag = NULL;
3005 	xfs_agnumber_t		agno = NULLAGNUMBER;
3006 
3007 	list_for_each_entry_safe(busyp, n, list, list) {
3008 		if (busyp->agno != agno) {
3009 			if (pag) {
3010 				spin_unlock(&pag->pagb_lock);
3011 				xfs_perag_put(pag);
3012 			}
3013 			pag = xfs_perag_get(mp, busyp->agno);
3014 			spin_lock(&pag->pagb_lock);
3015 			agno = busyp->agno;
3016 		}
3017 
3018 		if (do_discard && busyp->length &&
3019 		    !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
3020 			busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
3021 		else
3022 			xfs_alloc_busy_clear_one(mp, pag, busyp);
3023 	}
3024 
3025 	if (pag) {
3026 		spin_unlock(&pag->pagb_lock);
3027 		xfs_perag_put(pag);
3028 	}
3029 }
3030 
3031 /*
3032  * Callback for list_sort to sort busy extents by the AG they reside in.
3033  */
3034 int
xfs_busy_extent_ag_cmp(void * priv,struct list_head * a,struct list_head * b)3035 xfs_busy_extent_ag_cmp(
3036 	void			*priv,
3037 	struct list_head	*a,
3038 	struct list_head	*b)
3039 {
3040 	return container_of(a, struct xfs_busy_extent, list)->agno -
3041 		container_of(b, struct xfs_busy_extent, list)->agno;
3042 }
3043