1 /**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31 #ifndef _TTM_BO_API_H_
32 #define _TTM_BO_API_H_
33
34 #include <drm/drm_gem.h>
35
36 #include <linux/kref.h>
37 #include <linux/list.h>
38
39 #include "ttm_device.h"
40
41 /* Default number of pre-faulted pages in the TTM fault handler */
42 #define TTM_BO_VM_NUM_PREFAULT 16
43
44 struct iosys_map;
45
46 struct ttm_global;
47 struct ttm_device;
48 struct ttm_placement;
49 struct ttm_place;
50 struct ttm_resource;
51 struct ttm_resource_manager;
52 struct ttm_tt;
53
54 /**
55 * enum ttm_bo_type
56 *
57 * @ttm_bo_type_device: These are 'normal' buffers that can
58 * be mmapped by user space. Each of these bos occupy a slot in the
59 * device address space, that can be used for normal vm operations.
60 *
61 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
62 * but they cannot be accessed from user-space. For kernel-only use.
63 *
64 * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
65 * driver.
66 */
67 enum ttm_bo_type {
68 ttm_bo_type_device,
69 ttm_bo_type_kernel,
70 ttm_bo_type_sg
71 };
72
73 /**
74 * struct ttm_buffer_object
75 *
76 * @base: drm_gem_object superclass data.
77 * @bdev: Pointer to the buffer object device structure.
78 * @type: The bo type.
79 * @page_alignment: Page alignment.
80 * @destroy: Destruction function. If NULL, kfree is used.
81 * @kref: Reference count of this buffer object. When this refcount reaches
82 * zero, the object is destroyed or put on the delayed delete list.
83 * @resource: structure describing current placement.
84 * @ttm: TTM structure holding system pages.
85 * @deleted: True if the object is only a zombie and already deleted.
86 * @bulk_move: The bulk move object.
87 * @priority: Priority for LRU, BOs with lower priority are evicted first.
88 * @pin_count: Pin count.
89 *
90 * Base class for TTM buffer object, that deals with data placement and CPU
91 * mappings. GPU mappings are really up to the driver, but for simpler GPUs
92 * the driver can usually use the placement offset @offset directly as the
93 * GPU virtual address. For drivers implementing multiple
94 * GPU memory manager contexts, the driver should manage the address space
95 * in these contexts separately and use these objects to get the correct
96 * placement and caching for these GPU maps. This makes it possible to use
97 * these objects for even quite elaborate memory management schemes.
98 * The destroy member, the API visibility of this object makes it possible
99 * to derive driver specific types.
100 */
101 struct ttm_buffer_object {
102 struct drm_gem_object base;
103
104 /*
105 * Members constant at init.
106 */
107 struct ttm_device *bdev;
108 enum ttm_bo_type type;
109 uint32_t page_alignment;
110 void (*destroy) (struct ttm_buffer_object *);
111
112 /*
113 * Members not needing protection.
114 */
115 struct kref kref;
116
117 /*
118 * Members protected by the bo::resv::reserved lock.
119 */
120 struct ttm_resource *resource;
121 struct ttm_tt *ttm;
122 bool deleted;
123 struct ttm_lru_bulk_move *bulk_move;
124 unsigned priority;
125 unsigned pin_count;
126
127 /**
128 * @delayed_delete: Work item used when we can't delete the BO
129 * immediately
130 */
131 struct work_struct delayed_delete;
132
133 /**
134 * @sg: external source of pages and DMA addresses, protected by the
135 * reservation lock.
136 */
137 struct sg_table *sg;
138 };
139
140 #define TTM_BO_MAP_IOMEM_MASK 0x80
141
142 /**
143 * struct ttm_bo_kmap_obj
144 *
145 * @virtual: The current kernel virtual address.
146 * @page: The page when kmap'ing a single page.
147 * @bo_kmap_type: Type of bo_kmap.
148 * @bo: The TTM BO.
149 *
150 * Object describing a kernel mapping. Since a TTM bo may be located
151 * in various memory types with various caching policies, the
152 * mapping can either be an ioremap, a vmap, a kmap or part of a
153 * premapped region.
154 */
155 struct ttm_bo_kmap_obj {
156 void *virtual;
157 struct page *page;
158 enum {
159 ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
160 ttm_bo_map_vmap = 2,
161 ttm_bo_map_kmap = 3,
162 ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
163 } bo_kmap_type;
164 struct ttm_buffer_object *bo;
165 };
166
167 /**
168 * struct ttm_operation_ctx
169 *
170 * @interruptible: Sleep interruptible if sleeping.
171 * @no_wait_gpu: Return immediately if the GPU is busy.
172 * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
173 * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
174 * BOs share the same reservation object.
175 * @force_alloc: Don't check the memory account during suspend or CPU page
176 * faults. Should only be used by TTM internally.
177 * @resv: Reservation object to allow reserved evictions with.
178 * @bytes_moved: Statistics on how many bytes have been moved.
179 *
180 * Context for TTM operations like changing buffer placement or general memory
181 * allocation.
182 */
183 struct ttm_operation_ctx {
184 bool interruptible;
185 bool no_wait_gpu;
186 bool gfp_retry_mayfail;
187 bool allow_res_evict;
188 bool force_alloc;
189 struct dma_resv *resv;
190 uint64_t bytes_moved;
191 };
192
193 struct ttm_lru_walk;
194
195 /** struct ttm_lru_walk_ops - Operations for a LRU walk. */
196 struct ttm_lru_walk_ops {
197 /**
198 * process_bo - Process this bo.
199 * @walk: struct ttm_lru_walk describing the walk.
200 * @bo: A locked and referenced buffer object.
201 *
202 * Return: Negative error code on error, User-defined positive value
203 * (typically, but not always, size of the processed bo) on success.
204 * On success, the returned values are summed by the walk and the
205 * walk exits when its target is met.
206 * 0 also indicates success, -EBUSY means this bo was skipped.
207 */
208 s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
209 };
210
211 /**
212 * struct ttm_lru_walk - Structure describing a LRU walk.
213 */
214 struct ttm_lru_walk {
215 /** @ops: Pointer to the ops structure. */
216 const struct ttm_lru_walk_ops *ops;
217 /** @ctx: Pointer to the struct ttm_operation_ctx. */
218 struct ttm_operation_ctx *ctx;
219 /** @ticket: The struct ww_acquire_ctx if any. */
220 struct ww_acquire_ctx *ticket;
221 /** @trylock_only: Only use trylock for locking. */
222 bool trylock_only;
223 };
224
225 s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
226 struct ttm_resource_manager *man, s64 target);
227
228 /**
229 * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour
230 * @purge: Purge the content rather than backing it up.
231 * @writeback: Attempt to immediately write content to swap space.
232 * @allow_move: Allow moving to system before shrinking. This is typically
233 * not desired for zombie- or ghost objects (with zombie object meaning
234 * objects with a zero gem object refcount)
235 */
236 struct ttm_bo_shrink_flags {
237 u32 purge : 1;
238 u32 writeback : 1;
239 u32 allow_move : 1;
240 };
241
242 long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
243 const struct ttm_bo_shrink_flags flags);
244
245 bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx);
246
247 bool ttm_bo_shrink_avoid_wait(void);
248
249 /**
250 * ttm_bo_get - reference a struct ttm_buffer_object
251 *
252 * @bo: The buffer object.
253 */
ttm_bo_get(struct ttm_buffer_object * bo)254 static inline void ttm_bo_get(struct ttm_buffer_object *bo)
255 {
256 kref_get(&bo->kref);
257 }
258
259 /**
260 * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
261 * its refcount has already reached zero.
262 * @bo: The buffer object.
263 *
264 * Used to reference a TTM buffer object in lookups where the object is removed
265 * from the lookup structure during the destructor and for RCU lookups.
266 *
267 * Returns: @bo if the referencing was successful, NULL otherwise.
268 */
269 static inline __must_check struct ttm_buffer_object *
ttm_bo_get_unless_zero(struct ttm_buffer_object * bo)270 ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
271 {
272 if (!kref_get_unless_zero(&bo->kref))
273 return NULL;
274 return bo;
275 }
276
277 /**
278 * ttm_bo_reserve:
279 *
280 * @bo: A pointer to a struct ttm_buffer_object.
281 * @interruptible: Sleep interruptible if waiting.
282 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
283 * @ticket: ticket used to acquire the ww_mutex.
284 *
285 * Locks a buffer object for validation. (Or prevents other processes from
286 * locking it for validation), while taking a number of measures to prevent
287 * deadlocks.
288 *
289 * Returns:
290 * -EDEADLK: The reservation may cause a deadlock.
291 * Release all buffer reservations, wait for @bo to become unreserved and
292 * try again.
293 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
294 * a signal. Release all buffer reservations and return to user-space.
295 * -EBUSY: The function needed to sleep, but @no_wait was true
296 * -EALREADY: Bo already reserved using @ticket. This error code will only
297 * be returned if @use_ticket is set to true.
298 */
ttm_bo_reserve(struct ttm_buffer_object * bo,bool interruptible,bool no_wait,struct ww_acquire_ctx * ticket)299 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
300 bool interruptible, bool no_wait,
301 struct ww_acquire_ctx *ticket)
302 {
303 int ret = 0;
304
305 if (no_wait) {
306 bool success;
307
308 if (WARN_ON(ticket))
309 return -EBUSY;
310
311 success = dma_resv_trylock(bo->base.resv);
312 return success ? 0 : -EBUSY;
313 }
314
315 if (interruptible)
316 ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
317 else
318 ret = dma_resv_lock(bo->base.resv, ticket);
319 if (ret == -EINTR)
320 return -ERESTARTSYS;
321 return ret;
322 }
323
324 /**
325 * ttm_bo_reserve_slowpath:
326 * @bo: A pointer to a struct ttm_buffer_object.
327 * @interruptible: Sleep interruptible if waiting.
328 * @ticket: Ticket used to acquire the ww_mutex.
329 *
330 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
331 * from all our other reservations. Because there are no other reservations
332 * held by us, this function cannot deadlock any more.
333 */
ttm_bo_reserve_slowpath(struct ttm_buffer_object * bo,bool interruptible,struct ww_acquire_ctx * ticket)334 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
335 bool interruptible,
336 struct ww_acquire_ctx *ticket)
337 {
338 if (interruptible) {
339 int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
340 ticket);
341 if (ret == -EINTR)
342 ret = -ERESTARTSYS;
343 return ret;
344 }
345 dma_resv_lock_slow(bo->base.resv, ticket);
346 return 0;
347 }
348
349 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
350
351 static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object * bo)352 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
353 {
354 spin_lock(&bo->bdev->lru_lock);
355 ttm_bo_move_to_lru_tail(bo);
356 spin_unlock(&bo->bdev->lru_lock);
357 }
358
ttm_bo_assign_mem(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)359 static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
360 struct ttm_resource *new_mem)
361 {
362 WARN_ON(bo->resource);
363 bo->resource = new_mem;
364 }
365
366 /**
367 * ttm_bo_move_null - assign memory for a buffer object.
368 * @bo: The bo to assign the memory to
369 * @new_mem: The memory to be assigned.
370 *
371 * Assign the memory from new_mem to the memory of the buffer object bo.
372 */
ttm_bo_move_null(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)373 static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
374 struct ttm_resource *new_mem)
375 {
376 ttm_resource_free(bo, &bo->resource);
377 ttm_bo_assign_mem(bo, new_mem);
378 }
379
380 /**
381 * ttm_bo_unreserve
382 *
383 * @bo: A pointer to a struct ttm_buffer_object.
384 *
385 * Unreserve a previous reservation of @bo.
386 */
ttm_bo_unreserve(struct ttm_buffer_object * bo)387 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
388 {
389 ttm_bo_move_to_lru_tail_unlocked(bo);
390 dma_resv_unlock(bo->base.resv);
391 }
392
393 /**
394 * ttm_kmap_obj_virtual
395 *
396 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
397 * @is_iomem: Pointer to an integer that on return indicates 1 if the
398 * virtual map is io memory, 0 if normal memory.
399 *
400 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
401 * If *is_iomem is 1 on return, the virtual address points to an io memory area,
402 * that should strictly be accessed by the iowriteXX() and similar functions.
403 */
ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj * map,bool * is_iomem)404 static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
405 bool *is_iomem)
406 {
407 *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
408 return map->virtual;
409 }
410
411 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
412 struct ttm_operation_ctx *ctx);
413 int ttm_bo_validate(struct ttm_buffer_object *bo,
414 struct ttm_placement *placement,
415 struct ttm_operation_ctx *ctx);
416 void ttm_bo_put(struct ttm_buffer_object *bo);
417 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
418 struct ttm_lru_bulk_move *bulk);
419 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
420 const struct ttm_place *place);
421 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
422 enum ttm_bo_type type, struct ttm_placement *placement,
423 uint32_t alignment, struct ttm_operation_ctx *ctx,
424 struct sg_table *sg, struct dma_resv *resv,
425 void (*destroy)(struct ttm_buffer_object *));
426 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
427 enum ttm_bo_type type, struct ttm_placement *placement,
428 uint32_t alignment, bool interruptible,
429 struct sg_table *sg, struct dma_resv *resv,
430 void (*destroy)(struct ttm_buffer_object *));
431 int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
432 unsigned long num_pages, struct ttm_bo_kmap_obj *map);
433 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
434 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
435 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
436 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
437 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
438 struct ttm_resource_manager *man, gfp_t gfp_flags,
439 s64 target);
440 void ttm_bo_pin(struct ttm_buffer_object *bo);
441 void ttm_bo_unpin(struct ttm_buffer_object *bo);
442 int ttm_bo_evict_first(struct ttm_device *bdev,
443 struct ttm_resource_manager *man,
444 struct ttm_operation_ctx *ctx);
445 int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
446 void *buf, int len, int write);
447 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
448 struct vm_fault *vmf);
449 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
450 pgprot_t prot,
451 pgoff_t num_prefault);
452 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
453 void ttm_bo_vm_open(struct vm_area_struct *vma);
454 void ttm_bo_vm_close(struct vm_area_struct *vma);
455 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
456 void *buf, int len, int write);
457 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
458
459 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
460 struct ttm_placement *placement,
461 struct ttm_resource **mem,
462 struct ttm_operation_ctx *ctx);
463
464 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
465 /*
466 * ttm_bo_util.c
467 */
468 int ttm_mem_io_reserve(struct ttm_device *bdev,
469 struct ttm_resource *mem);
470 void ttm_mem_io_free(struct ttm_device *bdev,
471 struct ttm_resource *mem);
472 void ttm_move_memcpy(bool clear, u32 num_pages,
473 struct ttm_kmap_iter *dst_iter,
474 struct ttm_kmap_iter *src_iter);
475 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
476 struct ttm_operation_ctx *ctx,
477 struct ttm_resource *new_mem);
478 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
479 struct dma_fence *fence, bool evict,
480 bool pipeline,
481 struct ttm_resource *new_mem);
482 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
483 struct ttm_resource *new_mem);
484 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
485 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
486 pgprot_t tmp);
487 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
488 int ttm_bo_populate(struct ttm_buffer_object *bo,
489 struct ttm_operation_ctx *ctx);
490
491 /* Driver LRU walk helpers initially targeted for shrinking. */
492
493 /**
494 * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping
495 */
496 struct ttm_bo_lru_cursor {
497 /** @res_curs: Embedded struct ttm_resource_cursor. */
498 struct ttm_resource_cursor res_curs;
499 /**
500 * @ctx: The struct ttm_operation_ctx used while looping.
501 * governs the locking mode.
502 */
503 struct ttm_operation_ctx *ctx;
504 /**
505 * @bo: Buffer object pointer if a buffer object is refcounted,
506 * NULL otherwise.
507 */
508 struct ttm_buffer_object *bo;
509 /**
510 * @needs_unlock: Valid iff @bo != NULL. The bo resv needs
511 * unlock before the next iteration or after loop exit.
512 */
513 bool needs_unlock;
514 };
515
516 void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
517
518 struct ttm_bo_lru_cursor *
519 ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
520 struct ttm_resource_manager *man,
521 struct ttm_operation_ctx *ctx);
522
523 struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);
524
525 struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs);
526
527 /*
528 * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor.
529 */
530 DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
531 if (_T) {ttm_bo_lru_cursor_fini(_T); },
532 ttm_bo_lru_cursor_init(curs, man, ctx),
533 struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
534 struct ttm_operation_ctx *ctx);
535 static inline void *
class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t * _T)536 class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
537 { return *_T; }
538 #define class_ttm_bo_lru_cursor_is_conditional false
539
540 /**
541 * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning
542 * resources on LRU lists.
543 * @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
544 * @_man: The resource manager whose LRU lists to iterate over.
545 * @_ctx: The struct ttm_operation_context to govern the @_bo locking.
546 * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
547 * for the current iteration.
548 *
549 * Iterate over all resources of @_man and for each resource, attempt to
550 * reference and lock (using the locking mode detailed in @_ctx) the buffer
551 * object it points to. If successful, assign @_bo to the address of the
552 * buffer object and update @_cursor. The iteration is guarded in the
553 * sense that @_cursor will be initialized before looping start and cleaned
554 * up at looping termination, even if terminated prematurely by, for
555 * example a return or break statement. Exiting the loop will also unlock
556 * (if needed) and unreference @_bo.
557 */
558 #define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \
559 scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \
560 for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \
561 (_bo) = ttm_bo_lru_cursor_next(_cursor))
562
563 #endif
564