Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * EMU10K1 memory page allocation (PTB area)
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
21 #define __set_ptb_entry(emu,page,addr) \ argument
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \ argument
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
30 /* get aligned page from offset address */
32 /* get offset address from aligned page */
33 #define aligned_page_offset(page) ((page) << PAGE_SHIFT) argument
36 /* fill PTB entrie(s) corresponding to page with addr */
37 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) argument
38 /* fill PTB entrie(s) corresponding to page with silence pointer */
39 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) argument
41 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
42 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) in set_ptb_entry() argument
45 page *= UNIT_PAGES; in set_ptb_entry()
46 for (i = 0; i < UNIT_PAGES; i++, page++) { in set_ptb_entry()
47 __set_ptb_entry(emu, page, addr); in set_ptb_entry()
48 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page, in set_ptb_entry()
49 (unsigned int)__get_ptb_entry(emu, page)); in set_ptb_entry()
53 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) in set_silent_ptb() argument
56 page *= UNIT_PAGES; in set_silent_ptb()
57 for (i = 0; i < UNIT_PAGES; i++, page++) { in set_silent_ptb()
59 __set_ptb_entry(emu, page, emu->silent_page.addr); in set_silent_ptb()
60 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n", in set_silent_ptb()
61 page, (unsigned int)__get_ptb_entry(emu, page)); in set_silent_ptb()
78 blk->mapped_page = -1; in emu10k1_memblk_init()
79 INIT_LIST_HEAD(&blk->mapped_link); in emu10k1_memblk_init()
80 INIT_LIST_HEAD(&blk->mapped_order_link); in emu10k1_memblk_init()
81 blk->map_locked = 0; in emu10k1_memblk_init()
83 blk->first_page = get_aligned_page(blk->mem.offset); in emu10k1_memblk_init()
84 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1); in emu10k1_memblk_init()
85 blk->pages = blk->last_page - blk->first_page + 1; in emu10k1_memblk_init()
89 * search empty region on PTB with the given size
91 * if an empty region is found, return the page and store the next mapped block
97 int page = 1, found_page = -ENOMEM; in search_empty_map_area() local
99 int size; in search_empty_map_area() local
100 struct list_head *candidate = &emu->mapped_link_head; in search_empty_map_area()
103 list_for_each (pos, &emu->mapped_link_head) { in search_empty_map_area()
105 if (blk->mapped_page < 0) in search_empty_map_area()
107 size = blk->mapped_page - page; in search_empty_map_area()
108 if (size == npages) { in search_empty_map_area()
110 return page; in search_empty_map_area()
112 else if (size > max_size) { in search_empty_map_area()
114 max_size = size; in search_empty_map_area()
116 found_page = page; in search_empty_map_area()
118 page = blk->mapped_page + blk->pages; in search_empty_map_area()
120 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; in search_empty_map_area()
121 if (size >= max_size) { in search_empty_map_area()
123 return page; in search_empty_map_area()
136 int page, pg; in map_memblk() local
139 page = search_empty_map_area(emu, blk->pages, &next); in map_memblk()
140 if (page < 0) /* not found */ in map_memblk()
141 return page; in map_memblk()
142 if (page == 0) { in map_memblk()
143 dev_err(emu->card->dev, "trying to map zero (reserved) page\n"); in map_memblk()
144 return -EINVAL; in map_memblk()
147 list_add_tail(&blk->mapped_link, next); in map_memblk()
149 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); in map_memblk()
150 blk->mapped_page = page; in map_memblk()
152 for (pg = blk->first_page; pg <= blk->last_page; pg++) { in map_memblk()
153 set_ptb_entry(emu, page, emu->page_addr_table[pg]); in map_memblk()
154 page++; in map_memblk()
161 * return the size of resultant empty pages
171 /* calculate the expected size of empty region */ in unmap_memblk()
172 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { in unmap_memblk()
174 start_page = q->mapped_page + q->pages; in unmap_memblk()
177 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { in unmap_memblk()
179 end_page = q->mapped_page; in unmap_memblk()
181 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0); in unmap_memblk()
184 list_del(&blk->mapped_link); in unmap_memblk()
185 list_del(&blk->mapped_order_link); in unmap_memblk()
187 mpage = blk->mapped_page; in unmap_memblk()
188 for (pg = blk->first_page; pg <= blk->last_page; pg++) { in unmap_memblk()
192 blk->mapped_page = -1; in unmap_memblk()
193 return end_page - start_page; /* return the new empty size */ in unmap_memblk()
197 * search empty pages with the given size, and create a memory block
199 * unlike synth_alloc the memory block is aligned to the page start
202 search_empty(struct snd_emu10k1 *emu, int size) in search_empty() argument
206 int page, psize; in search_empty() local
208 psize = get_aligned_page(size + PAGE_SIZE -1); in search_empty()
209 page = 0; in search_empty()
210 list_for_each(p, &emu->memhdr->block) { in search_empty()
212 if (page + psize <= blk->first_page) in search_empty()
214 page = blk->last_page + 1; in search_empty()
216 if (page + psize > emu->max_cache_pages) in search_empty()
221 …blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev… in search_empty()
224 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ in search_empty()
235 if (addr & ~emu->dma_mask) { in is_valid_page()
236 dev_err_ratelimited(emu->card->dev, in is_valid_page()
237 "max memory size is 0x%lx (addr = 0x%lx)!!\n", in is_valid_page()
238 emu->dma_mask, (unsigned long)addr); in is_valid_page()
241 if (addr & (EMUPAGESIZE-1)) { in is_valid_page()
242 dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); in is_valid_page()
257 int size; in snd_emu10k1_memblk_map() local
262 spin_lock_irqsave(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
263 if (blk->mapped_page >= 0) { in snd_emu10k1_memblk_map()
265 list_move_tail(&blk->mapped_order_link, in snd_emu10k1_memblk_map()
266 &emu->mapped_order_link_head); in snd_emu10k1_memblk_map()
267 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
271 /* no enough page - try to unmap some blocks */ in snd_emu10k1_memblk_map()
273 p = emu->mapped_order_link_head.next; in snd_emu10k1_memblk_map()
274 for (; p != &emu->mapped_order_link_head; p = nextp) { in snd_emu10k1_memblk_map()
275 nextp = p->next; in snd_emu10k1_memblk_map()
277 if (deleted->map_locked) in snd_emu10k1_memblk_map()
279 size = unmap_memblk(emu, deleted); in snd_emu10k1_memblk_map()
280 if (size >= blk->pages) { in snd_emu10k1_memblk_map()
287 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
294 * page allocation for DMA
299 struct snd_pcm_runtime *runtime = substream->runtime; in snd_emu10k1_alloc_pages()
302 int page, err, idx; in snd_emu10k1_alloc_pages() local
306 if (snd_BUG_ON(runtime->dma_bytes <= 0 || in snd_emu10k1_alloc_pages()
307 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) in snd_emu10k1_alloc_pages()
309 hdr = emu->memhdr; in snd_emu10k1_alloc_pages()
313 idx = runtime->period_size >= runtime->buffer_size ? in snd_emu10k1_alloc_pages()
314 (emu->delay_pcm_irq * 2) : 0; in snd_emu10k1_alloc_pages()
315 mutex_lock(&hdr->block_mutex); in snd_emu10k1_alloc_pages()
316 blk = search_empty(emu, runtime->dma_bytes + idx); in snd_emu10k1_alloc_pages()
318 mutex_unlock(&hdr->block_mutex); in snd_emu10k1_alloc_pages()
325 for (page = blk->first_page; page <= blk->last_page; page++, idx++) { in snd_emu10k1_alloc_pages()
328 if (ofs >= runtime->dma_bytes) in snd_emu10k1_alloc_pages()
329 addr = emu->silent_page.addr; in snd_emu10k1_alloc_pages()
333 dev_err_ratelimited(emu->card->dev, in snd_emu10k1_alloc_pages()
334 "emu: failure page = %d\n", idx); in snd_emu10k1_alloc_pages()
335 mutex_unlock(&hdr->block_mutex); in snd_emu10k1_alloc_pages()
338 emu->page_addr_table[page] = addr; in snd_emu10k1_alloc_pages()
339 emu->page_ptr_table[page] = NULL; in snd_emu10k1_alloc_pages()
343 blk->map_locked = 1; /* do not unmap this block! */ in snd_emu10k1_alloc_pages()
347 mutex_unlock(&hdr->block_mutex); in snd_emu10k1_alloc_pages()
350 mutex_unlock(&hdr->block_mutex); in snd_emu10k1_alloc_pages()
356 * release DMA buffer from page table
361 return -EINVAL; in snd_emu10k1_free_pages()
374 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size, in snd_emu10k1_alloc_pages_maybe_wider() argument
377 if (emu->iommu_workaround) { in snd_emu10k1_alloc_pages_maybe_wider()
378 size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in snd_emu10k1_alloc_pages_maybe_wider()
385 if (size_real < size + 1024) in snd_emu10k1_alloc_pages_maybe_wider()
386 size += PAGE_SIZE; in snd_emu10k1_alloc_pages_maybe_wider()
390 &emu->pci->dev, size, dmab); in snd_emu10k1_alloc_pages_maybe_wider()
395 * Unlike the DMA allocation above, non-contiguous pages are assined.
402 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size) in snd_emu10k1_synth_alloc() argument
405 struct snd_util_memhdr *hdr = hw->memhdr; in snd_emu10k1_synth_alloc()
407 mutex_lock(&hdr->block_mutex); in snd_emu10k1_synth_alloc()
408 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size); in snd_emu10k1_synth_alloc()
410 mutex_unlock(&hdr->block_mutex); in snd_emu10k1_synth_alloc()
415 mutex_unlock(&hdr->block_mutex); in snd_emu10k1_synth_alloc()
419 mutex_unlock(&hdr->block_mutex); in snd_emu10k1_synth_alloc()
431 struct snd_util_memhdr *hdr = emu->memhdr; in snd_emu10k1_synth_free()
435 mutex_lock(&hdr->block_mutex); in snd_emu10k1_synth_free()
436 spin_lock_irqsave(&emu->memblk_lock, flags); in snd_emu10k1_synth_free()
437 if (blk->mapped_page >= 0) in snd_emu10k1_synth_free()
439 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_synth_free()
442 mutex_unlock(&hdr->block_mutex); in snd_emu10k1_synth_free()
456 first_page = blk->first_page; in get_single_page_range()
457 if ((p = blk->mem.list.prev) != &hdr->block) { in get_single_page_range()
459 if (q->last_page == first_page) in get_single_page_range()
460 first_page++; /* first page was already allocated */ in get_single_page_range()
462 last_page = blk->last_page; in get_single_page_range()
463 if ((p = blk->mem.list.next) != &hdr->block) { in get_single_page_range()
465 if (q->first_page == last_page) in get_single_page_range()
466 last_page--; /* last page was already allocated */ in get_single_page_range()
477 int page; in __synth_free_pages() local
480 dmab.dev.dev = &emu->pci->dev; in __synth_free_pages()
482 for (page = first_page; page <= last_page; page++) { in __synth_free_pages()
483 if (emu->page_ptr_table[page] == NULL) in __synth_free_pages()
485 dmab.area = emu->page_ptr_table[page]; in __synth_free_pages()
486 dmab.addr = emu->page_addr_table[page]; in __synth_free_pages()
493 if (emu->iommu_workaround) in __synth_free_pages()
497 emu->page_addr_table[page] = 0; in __synth_free_pages()
498 emu->page_ptr_table[page] = NULL; in __synth_free_pages()
507 int page, first_page, last_page; in synth_alloc_pages() local
511 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); in synth_alloc_pages()
513 for (page = first_page; page <= last_page; page++) { in synth_alloc_pages()
521 emu->page_addr_table[page] = dmab.addr; in synth_alloc_pages()
522 emu->page_ptr_table[page] = dmab.area; in synth_alloc_pages()
528 last_page = page - 1; in synth_alloc_pages()
531 return -ENOMEM; in synth_alloc_pages()
541 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); in synth_free_pages()
547 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) in offset_ptr() argument
550 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages)) in offset_ptr()
552 ptr = emu->page_ptr_table[page]; in offset_ptr()
554 dev_err(emu->card->dev, in offset_ptr()
555 "access to NULL ptr: page = %d\n", page); in offset_ptr()
558 ptr += offset & (PAGE_SIZE - 1); in offset_ptr()
563 * bzero(blk + offset, size)
566 int offset, int size) in snd_emu10k1_synth_bzero() argument
568 int page, nextofs, end_offset, temp, temp1; in snd_emu10k1_synth_bzero() local
572 offset += blk->offset & (PAGE_SIZE - 1); in snd_emu10k1_synth_bzero()
573 end_offset = offset + size; in snd_emu10k1_synth_bzero()
574 page = get_aligned_page(offset); in snd_emu10k1_synth_bzero()
576 nextofs = aligned_page_offset(page + 1); in snd_emu10k1_synth_bzero()
577 temp = nextofs - offset; in snd_emu10k1_synth_bzero()
578 temp1 = end_offset - offset; in snd_emu10k1_synth_bzero()
581 ptr = offset_ptr(emu, page + p->first_page, offset); in snd_emu10k1_synth_bzero()
585 page++; in snd_emu10k1_synth_bzero()
593 * copy_from_user(blk + offset, data, size)
596 int offset, const char __user *data, int size) in snd_emu10k1_synth_copy_from_user() argument
598 int page, nextofs, end_offset, temp, temp1; in snd_emu10k1_synth_copy_from_user() local
602 offset += blk->offset & (PAGE_SIZE - 1); in snd_emu10k1_synth_copy_from_user()
603 end_offset = offset + size; in snd_emu10k1_synth_copy_from_user()
604 page = get_aligned_page(offset); in snd_emu10k1_synth_copy_from_user()
606 nextofs = aligned_page_offset(page + 1); in snd_emu10k1_synth_copy_from_user()
607 temp = nextofs - offset; in snd_emu10k1_synth_copy_from_user()
608 temp1 = end_offset - offset; in snd_emu10k1_synth_copy_from_user()
611 ptr = offset_ptr(emu, page + p->first_page, offset); in snd_emu10k1_synth_copy_from_user()
613 return -EFAULT; in snd_emu10k1_synth_copy_from_user()
616 page++; in snd_emu10k1_synth_copy_from_user()