Lines Matching +full:counter +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0
22 if (!c->parent) in propagate_protected_usage()
25 protected = min(usage, READ_ONCE(c->min)); in propagate_protected_usage()
26 old_protected = atomic_long_read(&c->min_usage); in propagate_protected_usage()
28 old_protected = atomic_long_xchg(&c->min_usage, protected); in propagate_protected_usage()
29 delta = protected - old_protected; in propagate_protected_usage()
31 atomic_long_add(delta, &c->parent->children_min_usage); in propagate_protected_usage()
34 protected = min(usage, READ_ONCE(c->low)); in propagate_protected_usage()
35 old_protected = atomic_long_read(&c->low_usage); in propagate_protected_usage()
37 old_protected = atomic_long_xchg(&c->low_usage, protected); in propagate_protected_usage()
38 delta = protected - old_protected; in propagate_protected_usage()
40 atomic_long_add(delta, &c->parent->children_low_usage); in propagate_protected_usage()
45 * page_counter_cancel - take pages out of the local counter
46 * @counter: counter
49 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument
53 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel()
55 if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n", in page_counter_cancel()
57 new = 0; in page_counter_cancel()
58 atomic_long_set(&counter->usage, new); in page_counter_cancel()
60 propagate_protected_usage(counter, new); in page_counter_cancel()
64 * page_counter_charge - hierarchically charge pages
65 * @counter: counter
68 * NOTE: This does not consider any configured counter limits.
70 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument
74 for (c = counter; c; c = c->parent) { in page_counter_charge()
77 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge()
83 if (new > READ_ONCE(c->watermark)) in page_counter_charge()
84 WRITE_ONCE(c->watermark, new); in page_counter_charge()
89 * page_counter_try_charge - try to hierarchically charge pages
90 * @counter: counter
92 * @fail: points first counter to hit its limit, if any
94 * Returns %true on success, or %false and @fail if the counter or one
97 bool page_counter_try_charge(struct page_counter *counter, in page_counter_try_charge() argument
103 for (c = counter; c; c = c->parent) { in page_counter_try_charge()
117 * counter has changed and retries. in page_counter_try_charge()
119 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge()
120 if (new > c->max) { in page_counter_try_charge()
121 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge()
127 data_race(c->failcnt++); in page_counter_try_charge()
136 if (new > READ_ONCE(c->watermark)) in page_counter_try_charge()
137 WRITE_ONCE(c->watermark, new); in page_counter_try_charge()
142 for (c = counter; c != *fail; c = c->parent) in page_counter_try_charge()
149 * page_counter_uncharge - hierarchically uncharge pages
150 * @counter: counter
153 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument
157 for (c = counter; c; c = c->parent) in page_counter_uncharge()
162 * page_counter_set_max - set the maximum number of pages allowed
163 * @counter: counter
166 * Returns 0 on success, -EBUSY if the current number of pages on the
167 * counter already exceeds the specified limit.
169 * The caller must serialize invocations on the same counter.
171 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_max() argument
179 * below the concurrently-changing counter value. in page_counter_set_max()
182 * and after, so the read-swap-read is ordered and in page_counter_set_max()
186 * modified counter and retry. in page_counter_set_max()
188 usage = page_counter_read(counter); in page_counter_set_max()
191 return -EBUSY; in page_counter_set_max()
193 old = xchg(&counter->max, nr_pages); in page_counter_set_max()
195 if (page_counter_read(counter) <= usage || nr_pages >= old) in page_counter_set_max()
196 return 0; in page_counter_set_max()
198 counter->max = old; in page_counter_set_max()
204 * page_counter_set_min - set the amount of protected memory
205 * @counter: counter
208 * The caller must serialize invocations on the same counter.
210 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_min() argument
214 WRITE_ONCE(counter->min, nr_pages); in page_counter_set_min()
216 for (c = counter; c; c = c->parent) in page_counter_set_min()
217 propagate_protected_usage(c, atomic_long_read(&c->usage)); in page_counter_set_min()
221 * page_counter_set_low - set the amount of protected memory
222 * @counter: counter
225 * The caller must serialize invocations on the same counter.
227 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_low() argument
231 WRITE_ONCE(counter->low, nr_pages); in page_counter_set_low()
233 for (c = counter; c; c = c->parent) in page_counter_set_low()
234 propagate_protected_usage(c, atomic_long_read(&c->usage)); in page_counter_set_low()
238 * page_counter_memparse - memparse() for page counter limits
243 * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
254 return 0; in page_counter_memparse()
258 if (*end != '\0') in page_counter_memparse()
259 return -EINVAL; in page_counter_memparse()
263 return 0; in page_counter_memparse()