1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2018 Red Hat, Inc.
4  */
5 #ifndef __LIBXFS_GROUP_H
6 #define __LIBXFS_GROUP_H 1
7 
8 struct xfs_group {
9 	struct xfs_mount	*xg_mount;
10 	uint32_t		xg_gno;
11 	enum xfs_group_type	xg_type;
12 	atomic_t		xg_ref;		/* passive reference count */
13 	atomic_t		xg_active_ref;	/* active reference count */
14 
15 	/* Precalculated geometry info */
16 	uint32_t		xg_block_count;	/* max usable gbno */
17 	uint32_t		xg_min_gbno;	/* min usable gbno */
18 
19 #ifdef __KERNEL__
20 	/* -- kernel only structures below this line -- */
21 
22 	union {
23 		/*
24 		 * For perags and non-zoned RT groups:
25 		 * Track freed but not yet committed extents.
26 		 */
27 		struct xfs_extent_busy_tree	*xg_busy_extents;
28 
29 		/*
30 		 * For zoned RT groups:
31 		 * List of groups that need a zone reset.
32 		 *
33 		 * The zonegc code forces a log flush of the rtrmap inode before
34 		 * resetting the write pointer, so there is no need for
35 		 * individual busy extent tracking.
36 		 */
37 		struct xfs_group		*xg_next_reset;
38 	};
39 
40 	/*
41 	 * Bitsets of per-ag metadata that have been checked and/or are sick.
42 	 * Callers should hold xg_state_lock before accessing this field.
43 	 */
44 	uint16_t		xg_checked;
45 	uint16_t		xg_sick;
46 	spinlock_t		xg_state_lock;
47 
48 	/*
49 	 * We use xfs_drain to track the number of deferred log intent items
50 	 * that have been queued (but not yet processed) so that waiters (e.g.
51 	 * scrub) will not lock resources when other threads are in the middle
52 	 * of processing a chain of intent items only to find momentary
53 	 * inconsistencies.
54 	 */
55 	struct xfs_defer_drain	xg_intents_drain;
56 
57 	/*
58 	 * Hook to feed rmapbt updates to an active online repair.
59 	 */
60 	struct xfs_hooks	xg_rmap_update_hooks;
61 #endif /* __KERNEL__ */
62 };
63 
64 struct xfs_group *xfs_group_get(struct xfs_mount *mp, uint32_t index,
65 		enum xfs_group_type type);
66 struct xfs_group *xfs_group_get_by_fsb(struct xfs_mount *mp,
67 		xfs_fsblock_t fsbno, enum xfs_group_type type);
68 struct xfs_group *xfs_group_hold(struct xfs_group *xg);
69 void xfs_group_put(struct xfs_group *xg);
70 
71 struct xfs_group *xfs_group_grab(struct xfs_mount *mp, uint32_t index,
72 		enum xfs_group_type type);
73 struct xfs_group *xfs_group_next_range(struct xfs_mount *mp,
74 		struct xfs_group *xg, uint32_t start_index, uint32_t end_index,
75 		enum xfs_group_type type);
76 struct xfs_group *xfs_group_grab_next_mark(struct xfs_mount *mp,
77 		struct xfs_group *xg, xa_mark_t mark, enum xfs_group_type type);
78 void xfs_group_rele(struct xfs_group *xg);
79 
80 void xfs_group_free(struct xfs_mount *mp, uint32_t index,
81 		enum xfs_group_type type, void (*uninit)(struct xfs_group *xg));
82 int xfs_group_insert(struct xfs_mount *mp, struct xfs_group *xg,
83 		uint32_t index, enum xfs_group_type);
84 
85 #define xfs_group_set_mark(_xg, _mark) \
86 	xa_set_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \
87 			(_xg)->xg_gno, (_mark))
88 #define xfs_group_clear_mark(_xg, _mark) \
89 	xa_clear_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \
90 			(_xg)->xg_gno, (_mark))
91 #define xfs_group_marked(_mp, _type, _mark) \
92 	xa_marked(&(_mp)->m_groups[(_type)].xa, (_mark))
93 
94 static inline xfs_agblock_t
95 xfs_group_max_blocks(
96 	struct xfs_group	*xg)
97 {
98 	return xg->xg_mount->m_groups[xg->xg_type].blocks;
99 }
100 
101 static inline xfs_fsblock_t
102 xfs_group_start_fsb(
103 	struct xfs_group	*xg)
104 {
105 	return ((xfs_fsblock_t)xg->xg_gno) <<
106 		xg->xg_mount->m_groups[xg->xg_type].blklog;
107 }
108 
109 static inline xfs_fsblock_t
110 xfs_gbno_to_fsb(
111 	struct xfs_group	*xg,
112 	xfs_agblock_t		gbno)
113 {
114 	return xfs_group_start_fsb(xg) | gbno;
115 }
116 
117 static inline xfs_daddr_t
118 xfs_gbno_to_daddr(
119 	struct xfs_group	*xg,
120 	xfs_agblock_t		gbno)
121 {
122 	struct xfs_mount	*mp = xg->xg_mount;
123 	struct xfs_groups	*g = &mp->m_groups[xg->xg_type];
124 	xfs_fsblock_t		fsbno;
125 
126 	if (g->has_daddr_gaps)
127 		fsbno = xfs_gbno_to_fsb(xg, gbno);
128 	else
129 		fsbno = (xfs_fsblock_t)xg->xg_gno * g->blocks + gbno;
130 
131 	return XFS_FSB_TO_BB(mp, g->start_fsb + fsbno);
132 }
133 
134 static inline uint32_t
135 xfs_fsb_to_gno(
136 	struct xfs_mount	*mp,
137 	xfs_fsblock_t		fsbno,
138 	enum xfs_group_type	type)
139 {
140 	if (!mp->m_groups[type].blklog)
141 		return 0;
142 	return fsbno >> mp->m_groups[type].blklog;
143 }
144 
145 static inline xfs_agblock_t
146 xfs_fsb_to_gbno(
147 	struct xfs_mount	*mp,
148 	xfs_fsblock_t		fsbno,
149 	enum xfs_group_type	type)
150 {
151 	return fsbno & mp->m_groups[type].blkmask;
152 }
153 
154 static inline bool
155 xfs_verify_gbno(
156 	struct xfs_group	*xg,
157 	uint32_t		gbno)
158 {
159 	if (gbno >= xg->xg_block_count)
160 		return false;
161 	if (gbno < xg->xg_min_gbno)
162 		return false;
163 	return true;
164 }
165 
166 static inline bool
167 xfs_verify_gbext(
168 	struct xfs_group	*xg,
169 	uint32_t		gbno,
170 	uint32_t		glen)
171 {
172 	uint32_t		end;
173 
174 	if (!xfs_verify_gbno(xg, gbno))
175 		return false;
176 	if (glen == 0 || check_add_overflow(gbno, glen - 1, &end))
177 		return false;
178 	if (!xfs_verify_gbno(xg, end))
179 		return false;
180 	return true;
181 }
182 
183 #endif /* __LIBXFS_GROUP_H */
184