1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments CPDMA Driver 4 * 5 * Copyright (C) 2010 Texas Instruments 6 * 7 */ 8 #include <linux/kernel.h> 9 #include <linux/spinlock.h> 10 #include <linux/device.h> 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/err.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/io.h> 16 #include <linux/delay.h> 17 #include <linux/genalloc.h> 18 #include "davinci_cpdma.h" 19 20 /* DMA Registers */ 21 #define CPDMA_TXIDVER 0x00 22 #define CPDMA_TXCONTROL 0x04 23 #define CPDMA_TXTEARDOWN 0x08 24 #define CPDMA_RXIDVER 0x10 25 #define CPDMA_RXCONTROL 0x14 26 #define CPDMA_SOFTRESET 0x1c 27 #define CPDMA_RXTEARDOWN 0x18 28 #define CPDMA_TX_PRI0_RATE 0x30 29 #define CPDMA_TXINTSTATRAW 0x80 30 #define CPDMA_TXINTSTATMASKED 0x84 31 #define CPDMA_TXINTMASKSET 0x88 32 #define CPDMA_TXINTMASKCLEAR 0x8c 33 #define CPDMA_MACINVECTOR 0x90 34 #define CPDMA_MACEOIVECTOR 0x94 35 #define CPDMA_RXINTSTATRAW 0xa0 36 #define CPDMA_RXINTSTATMASKED 0xa4 37 #define CPDMA_RXINTMASKSET 0xa8 38 #define CPDMA_RXINTMASKCLEAR 0xac 39 #define CPDMA_DMAINTSTATRAW 0xb0 40 #define CPDMA_DMAINTSTATMASKED 0xb4 41 #define CPDMA_DMAINTMASKSET 0xb8 42 #define CPDMA_DMAINTMASKCLEAR 0xbc 43 #define CPDMA_DMAINT_HOSTERR BIT(1) 44 45 /* the following exist only if has_ext_regs is set */ 46 #define CPDMA_DMACONTROL 0x20 47 #define CPDMA_DMASTATUS 0x24 48 #define CPDMA_RXBUFFOFS 0x28 49 #define CPDMA_EM_CONTROL 0x2c 50 51 /* Descriptor mode bits */ 52 #define CPDMA_DESC_SOP BIT(31) 53 #define CPDMA_DESC_EOP BIT(30) 54 #define CPDMA_DESC_OWNER BIT(29) 55 #define CPDMA_DESC_EOQ BIT(28) 56 #define CPDMA_DESC_TD_COMPLETE BIT(27) 57 #define CPDMA_DESC_PASS_CRC BIT(26) 58 #define CPDMA_DESC_TO_PORT_EN BIT(20) 59 #define CPDMA_TO_PORT_SHIFT 16 60 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) 61 #define CPDMA_DESC_CRC_LEN 4 62 63 #define CPDMA_TEARDOWN_VALUE 0xfffffffc 64 65 #define CPDMA_MAX_RLIM_CNT 16384 66 67 struct cpdma_desc { 68 /* hardware fields */ 69 u32 hw_next; 70 u32 hw_buffer; 71 u32 hw_len; 72 u32 hw_mode; 73 /* software fields */ 74 void *sw_token; 75 u32 sw_buffer; 76 u32 sw_len; 77 }; 78 79 struct cpdma_desc_pool { 80 phys_addr_t phys; 81 dma_addr_t hw_addr; 82 void __iomem *iomap; /* ioremap map */ 83 void *cpumap; /* dma_alloc map */ 84 int desc_size, mem_size; 85 int num_desc; 86 struct device *dev; 87 struct gen_pool *gen_pool; 88 }; 89 90 enum cpdma_state { 91 CPDMA_STATE_IDLE, 92 CPDMA_STATE_ACTIVE, 93 CPDMA_STATE_TEARDOWN, 94 }; 95 96 struct cpdma_ctlr { 97 enum cpdma_state state; 98 struct cpdma_params params; 99 struct device *dev; 100 struct cpdma_desc_pool *pool; 101 spinlock_t lock; 102 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; 103 int chan_num; 104 int num_rx_desc; /* RX descriptors number */ 105 int num_tx_desc; /* TX descriptors number */ 106 }; 107 108 struct cpdma_chan { 109 struct cpdma_desc __iomem *head, *tail; 110 void __iomem *hdp, *cp, *rxfree; 111 enum cpdma_state state; 112 struct cpdma_ctlr *ctlr; 113 int chan_num; 114 spinlock_t lock; 115 int count; 116 u32 desc_num; 117 u32 mask; 118 cpdma_handler_fn handler; 119 enum dma_data_direction dir; 120 struct cpdma_chan_stats stats; 121 /* offsets into dmaregs */ 122 int int_set, int_clear, td; 123 int weight; 124 u32 rate_factor; 125 u32 rate; 126 }; 127 128 struct cpdma_control_info { 129 u32 reg; 130 u32 shift, mask; 131 int access; 132 #define ACCESS_RO BIT(0) 133 #define ACCESS_WO BIT(1) 134 #define ACCESS_RW (ACCESS_RO | ACCESS_WO) 135 }; 136 137 struct submit_info { 138 struct cpdma_chan *chan; 139 int directed; 140 void *token; 141 void *data_virt; 142 dma_addr_t data_dma; 143 int len; 144 }; 145 146 static struct cpdma_control_info controls[] = { 147 [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW}, 148 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, 149 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, 150 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, 151 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, 152 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, 153 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, 154 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, 155 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, 156 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, 157 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, 158 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, 159 }; 160 161 #define tx_chan_num(chan) (chan) 162 #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) 163 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) 164 #define is_tx_chan(chan) (!is_rx_chan(chan)) 165 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) 166 #define chan_linear(chan) __chan_linear((chan)->chan_num) 167 168 /* The following make access to common cpdma_ctlr params more readable */ 169 #define dmaregs params.dmaregs 170 #define num_chan params.num_chan 171 172 /* various accessors */ 173 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs)) 174 #define chan_read(chan, fld) readl((chan)->fld) 175 #define desc_read(desc, fld) readl(&(desc)->fld) 176 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs)) 177 #define chan_write(chan, fld, v) writel(v, (chan)->fld) 178 #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld) 179 180 #define cpdma_desc_to_port(chan, mode, directed) \ 181 do { \ 182 if (!is_rx_chan(chan) && ((directed == 1) || \ 183 (directed == 2))) \ 184 mode |= (CPDMA_DESC_TO_PORT_EN | \ 185 (directed << CPDMA_TO_PORT_SHIFT)); \ 186 } while (0) 187 188 #define CPDMA_DMA_EXT_MAP BIT(16) 189 190 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) 191 { 192 struct cpdma_desc_pool *pool = ctlr->pool; 193 194 if (!pool) 195 return; 196 197 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), 198 "cpdma_desc_pool size %zd != avail %zd", 199 gen_pool_size(pool->gen_pool), 200 gen_pool_avail(pool->gen_pool)); 201 if (pool->cpumap) 202 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, 203 pool->phys); 204 } 205 206 /* 207 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 208 * emac) have dedicated on-chip memory for these descriptors. Some other 209 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 210 * abstract out these details 211 */ 212 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) 213 { 214 struct cpdma_params *cpdma_params = &ctlr->params; 215 struct cpdma_desc_pool *pool; 216 int ret = -ENOMEM; 217 218 pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL); 219 if (!pool) 220 goto gen_pool_create_fail; 221 ctlr->pool = pool; 222 223 pool->mem_size = cpdma_params->desc_mem_size; 224 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), 225 cpdma_params->desc_align); 226 pool->num_desc = pool->mem_size / pool->desc_size; 227 228 if (cpdma_params->descs_pool_size) { 229 /* recalculate memory size required cpdma descriptor pool 230 * basing on number of descriptors specified by user and 231 * if memory size > CPPI internal RAM size (desc_mem_size) 232 * then switch to use DDR 233 */ 234 pool->num_desc = cpdma_params->descs_pool_size; 235 pool->mem_size = pool->desc_size * pool->num_desc; 236 if (pool->mem_size > cpdma_params->desc_mem_size) 237 cpdma_params->desc_mem_phys = 0; 238 } 239 240 pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), 241 -1, "cpdma"); 242 if (IS_ERR(pool->gen_pool)) { 243 ret = PTR_ERR(pool->gen_pool); 244 dev_err(ctlr->dev, "pool create failed %d\n", ret); 245 goto gen_pool_create_fail; 246 } 247 248 if (cpdma_params->desc_mem_phys) { 249 pool->phys = cpdma_params->desc_mem_phys; 250 pool->iomap = devm_ioremap(ctlr->dev, pool->phys, 251 pool->mem_size); 252 pool->hw_addr = cpdma_params->desc_hw_addr; 253 } else { 254 pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, 255 &pool->hw_addr, GFP_KERNEL); 256 pool->iomap = (void __iomem __force *)pool->cpumap; 257 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ 258 } 259 260 if (!pool->iomap) 261 goto gen_pool_create_fail; 262 263 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, 264 pool->phys, pool->mem_size, -1); 265 if (ret < 0) { 266 dev_err(ctlr->dev, "pool add failed %d\n", ret); 267 goto gen_pool_add_virt_fail; 268 } 269 270 return 0; 271 272 gen_pool_add_virt_fail: 273 cpdma_desc_pool_destroy(ctlr); 274 gen_pool_create_fail: 275 ctlr->pool = NULL; 276 return ret; 277 } 278 279 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 280 struct cpdma_desc __iomem *desc) 281 { 282 if (!desc) 283 return 0; 284 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; 285 } 286 287 static inline struct cpdma_desc __iomem * 288 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 289 { 290 return dma ? pool->iomap + dma - pool->hw_addr : NULL; 291 } 292 293 static struct cpdma_desc __iomem * 294 cpdma_desc_alloc(struct cpdma_desc_pool *pool) 295 { 296 return (struct cpdma_desc __iomem *) 297 gen_pool_alloc(pool->gen_pool, pool->desc_size); 298 } 299 300 static void cpdma_desc_free(struct cpdma_desc_pool *pool, 301 struct cpdma_desc __iomem *desc, int num_desc) 302 { 303 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); 304 } 305 306 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 307 { 308 struct cpdma_control_info *info = &controls[control]; 309 u32 val; 310 311 if (!ctlr->params.has_ext_regs) 312 return -ENOTSUPP; 313 314 if (ctlr->state != CPDMA_STATE_ACTIVE) 315 return -EINVAL; 316 317 if (control < 0 || control >= ARRAY_SIZE(controls)) 318 return -ENOENT; 319 320 if ((info->access & ACCESS_WO) != ACCESS_WO) 321 return -EPERM; 322 323 val = dma_reg_read(ctlr, info->reg); 324 val &= ~(info->mask << info->shift); 325 val |= (value & info->mask) << info->shift; 326 dma_reg_write(ctlr, info->reg, val); 327 328 return 0; 329 } 330 331 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 332 { 333 struct cpdma_control_info *info = &controls[control]; 334 int ret; 335 336 if (!ctlr->params.has_ext_regs) 337 return -ENOTSUPP; 338 339 if (ctlr->state != CPDMA_STATE_ACTIVE) 340 return -EINVAL; 341 342 if (control < 0 || control >= ARRAY_SIZE(controls)) 343 return -ENOENT; 344 345 if ((info->access & ACCESS_RO) != ACCESS_RO) 346 return -EPERM; 347 348 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; 349 return ret; 350 } 351 352 /* cpdma_chan_set_chan_shaper - set shaper for a channel 353 * Has to be called under ctlr lock 354 */ 355 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan) 356 { 357 struct cpdma_ctlr *ctlr = chan->ctlr; 358 u32 rate_reg; 359 u32 rmask; 360 int ret; 361 362 if (!chan->rate) 363 return 0; 364 365 rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num; 366 dma_reg_write(ctlr, rate_reg, chan->rate_factor); 367 368 rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM); 369 rmask |= chan->mask; 370 371 ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); 372 return ret; 373 } 374 375 static int cpdma_chan_on(struct cpdma_chan *chan) 376 { 377 struct cpdma_ctlr *ctlr = chan->ctlr; 378 struct cpdma_desc_pool *pool = ctlr->pool; 379 unsigned long flags; 380 381 spin_lock_irqsave(&chan->lock, flags); 382 if (chan->state != CPDMA_STATE_IDLE) { 383 spin_unlock_irqrestore(&chan->lock, flags); 384 return -EBUSY; 385 } 386 if (ctlr->state != CPDMA_STATE_ACTIVE) { 387 spin_unlock_irqrestore(&chan->lock, flags); 388 return -EINVAL; 389 } 390 dma_reg_write(ctlr, chan->int_set, chan->mask); 391 chan->state = CPDMA_STATE_ACTIVE; 392 if (chan->head) { 393 chan_write(chan, hdp, desc_phys(pool, chan->head)); 394 if (chan->rxfree) 395 chan_write(chan, rxfree, chan->count); 396 } 397 398 spin_unlock_irqrestore(&chan->lock, flags); 399 return 0; 400 } 401 402 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible. 403 * rmask - mask of rate limited channels 404 * Returns min rate in Kb/s 405 */ 406 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate, 407 u32 *rmask, int *prio_mode) 408 { 409 struct cpdma_ctlr *ctlr = ch->ctlr; 410 struct cpdma_chan *chan; 411 u32 old_rate = ch->rate; 412 u32 new_rmask = 0; 413 int rlim = 0; 414 int i; 415 416 for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) { 417 chan = ctlr->channels[i]; 418 if (!chan) 419 continue; 420 421 if (chan == ch) 422 chan->rate = rate; 423 424 if (chan->rate) { 425 rlim = 1; 426 new_rmask |= chan->mask; 427 continue; 428 } 429 430 if (rlim) 431 goto err; 432 } 433 434 *rmask = new_rmask; 435 *prio_mode = rlim; 436 return 0; 437 438 err: 439 ch->rate = old_rate; 440 dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n", 441 chan->chan_num); 442 return -EINVAL; 443 } 444 445 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr, 446 struct cpdma_chan *ch) 447 { 448 u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX; 449 u32 best_send_cnt = 0, best_idle_cnt = 0; 450 u32 new_rate, best_rate = 0, rate_reg; 451 u64 send_cnt, idle_cnt; 452 u32 min_send_cnt, freq; 453 u64 divident, divisor; 454 455 if (!ch->rate) { 456 ch->rate_factor = 0; 457 goto set_factor; 458 } 459 460 freq = ctlr->params.bus_freq_mhz * 1000 * 32; 461 if (!freq) { 462 dev_err(ctlr->dev, "The bus frequency is not set\n"); 463 return -EINVAL; 464 } 465 466 min_send_cnt = freq - ch->rate; 467 send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate); 468 while (send_cnt <= CPDMA_MAX_RLIM_CNT) { 469 divident = ch->rate * send_cnt; 470 divisor = min_send_cnt; 471 idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor); 472 473 divident = freq * idle_cnt; 474 divisor = idle_cnt + send_cnt; 475 new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor); 476 477 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta; 478 if (delta < best_delta) { 479 best_delta = delta; 480 best_send_cnt = send_cnt; 481 best_idle_cnt = idle_cnt; 482 best_rate = new_rate; 483 484 if (!delta) 485 break; 486 } 487 488 if (prev_delta >= delta) { 489 prev_delta = delta; 490 send_cnt++; 491 continue; 492 } 493 494 idle_cnt++; 495 divident = freq * idle_cnt; 496 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate); 497 send_cnt -= idle_cnt; 498 prev_delta = UINT_MAX; 499 } 500 501 ch->rate = best_rate; 502 ch->rate_factor = best_send_cnt | (best_idle_cnt << 16); 503 504 set_factor: 505 rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num; 506 dma_reg_write(ctlr, rate_reg, ch->rate_factor); 507 return 0; 508 } 509 510 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 511 { 512 struct cpdma_ctlr *ctlr; 513 514 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); 515 if (!ctlr) 516 return NULL; 517 518 ctlr->state = CPDMA_STATE_IDLE; 519 ctlr->params = *params; 520 ctlr->dev = params->dev; 521 ctlr->chan_num = 0; 522 spin_lock_init(&ctlr->lock); 523 524 if (cpdma_desc_pool_create(ctlr)) 525 return NULL; 526 /* split pool equally between RX/TX by default */ 527 ctlr->num_tx_desc = ctlr->pool->num_desc / 2; 528 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; 529 530 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 531 ctlr->num_chan = CPDMA_MAX_CHANNELS; 532 return ctlr; 533 } 534 EXPORT_SYMBOL_GPL(cpdma_ctlr_create); 535 536 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 537 { 538 struct cpdma_chan *chan; 539 unsigned long flags; 540 int i, prio_mode; 541 542 spin_lock_irqsave(&ctlr->lock, flags); 543 if (ctlr->state != CPDMA_STATE_IDLE) { 544 spin_unlock_irqrestore(&ctlr->lock, flags); 545 return -EBUSY; 546 } 547 548 if (ctlr->params.has_soft_reset) { 549 unsigned timeout = 10 * 100; 550 551 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 552 while (timeout) { 553 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 554 break; 555 udelay(10); 556 timeout--; 557 } 558 WARN_ON(!timeout); 559 } 560 561 for (i = 0; i < ctlr->num_chan; i++) { 562 writel(0, ctlr->params.txhdp + 4 * i); 563 writel(0, ctlr->params.rxhdp + 4 * i); 564 writel(0, ctlr->params.txcp + 4 * i); 565 writel(0, ctlr->params.rxcp + 4 * i); 566 } 567 568 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 569 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 570 571 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); 572 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); 573 574 ctlr->state = CPDMA_STATE_ACTIVE; 575 576 prio_mode = 0; 577 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 578 chan = ctlr->channels[i]; 579 if (chan) { 580 cpdma_chan_set_chan_shaper(chan); 581 cpdma_chan_on(chan); 582 583 /* off prio mode if all tx channels are rate limited */ 584 if (is_tx_chan(chan) && !chan->rate) 585 prio_mode = 1; 586 } 587 } 588 589 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); 590 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0); 591 592 spin_unlock_irqrestore(&ctlr->lock, flags); 593 return 0; 594 } 595 EXPORT_SYMBOL_GPL(cpdma_ctlr_start); 596 597 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 598 { 599 unsigned long flags; 600 int i; 601 602 spin_lock_irqsave(&ctlr->lock, flags); 603 if (ctlr->state != CPDMA_STATE_ACTIVE) { 604 spin_unlock_irqrestore(&ctlr->lock, flags); 605 return -EINVAL; 606 } 607 608 ctlr->state = CPDMA_STATE_TEARDOWN; 609 spin_unlock_irqrestore(&ctlr->lock, flags); 610 611 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 612 if (ctlr->channels[i]) 613 cpdma_chan_stop(ctlr->channels[i]); 614 } 615 616 spin_lock_irqsave(&ctlr->lock, flags); 617 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 618 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 619 620 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); 621 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); 622 623 ctlr->state = CPDMA_STATE_IDLE; 624 625 spin_unlock_irqrestore(&ctlr->lock, flags); 626 return 0; 627 } 628 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); 629 630 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 631 { 632 int ret = 0, i; 633 634 if (!ctlr) 635 return -EINVAL; 636 637 if (ctlr->state != CPDMA_STATE_IDLE) 638 cpdma_ctlr_stop(ctlr); 639 640 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 641 cpdma_chan_destroy(ctlr->channels[i]); 642 643 cpdma_desc_pool_destroy(ctlr); 644 return ret; 645 } 646 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 647 648 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 649 { 650 unsigned long flags; 651 int i; 652 653 spin_lock_irqsave(&ctlr->lock, flags); 654 if (ctlr->state != CPDMA_STATE_ACTIVE) { 655 spin_unlock_irqrestore(&ctlr->lock, flags); 656 return -EINVAL; 657 } 658 659 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 660 if (ctlr->channels[i]) 661 cpdma_chan_int_ctrl(ctlr->channels[i], enable); 662 } 663 664 spin_unlock_irqrestore(&ctlr->lock, flags); 665 return 0; 666 } 667 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); 668 669 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) 670 { 671 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); 672 } 673 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); 674 675 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) 676 { 677 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); 678 } 679 EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state); 680 681 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) 682 { 683 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); 684 } 685 EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state); 686 687 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, 688 int rx, int desc_num, 689 int per_ch_desc) 690 { 691 struct cpdma_chan *chan, *most_chan = NULL; 692 int desc_cnt = desc_num; 693 int most_dnum = 0; 694 int min, max, i; 695 696 if (!desc_num) 697 return; 698 699 if (rx) { 700 min = rx_chan_num(0); 701 max = rx_chan_num(CPDMA_MAX_CHANNELS); 702 } else { 703 min = tx_chan_num(0); 704 max = tx_chan_num(CPDMA_MAX_CHANNELS); 705 } 706 707 for (i = min; i < max; i++) { 708 chan = ctlr->channels[i]; 709 if (!chan) 710 continue; 711 712 if (chan->weight) 713 chan->desc_num = (chan->weight * desc_num) / 100; 714 else 715 chan->desc_num = per_ch_desc; 716 717 desc_cnt -= chan->desc_num; 718 719 if (most_dnum < chan->desc_num) { 720 most_dnum = chan->desc_num; 721 most_chan = chan; 722 } 723 } 724 /* use remains */ 725 if (most_chan) 726 most_chan->desc_num += desc_cnt; 727 } 728 729 /* 730 * cpdma_chan_split_pool - Splits ctrl pool between all channels. 731 * Has to be called under ctlr lock 732 */ 733 static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) 734 { 735 int tx_per_ch_desc = 0, rx_per_ch_desc = 0; 736 int free_rx_num = 0, free_tx_num = 0; 737 int rx_weight = 0, tx_weight = 0; 738 int tx_desc_num, rx_desc_num; 739 struct cpdma_chan *chan; 740 int i; 741 742 if (!ctlr->chan_num) 743 return 0; 744 745 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 746 chan = ctlr->channels[i]; 747 if (!chan) 748 continue; 749 750 if (is_rx_chan(chan)) { 751 if (!chan->weight) 752 free_rx_num++; 753 rx_weight += chan->weight; 754 } else { 755 if (!chan->weight) 756 free_tx_num++; 757 tx_weight += chan->weight; 758 } 759 } 760 761 if (rx_weight > 100 || tx_weight > 100) 762 return -EINVAL; 763 764 tx_desc_num = ctlr->num_tx_desc; 765 rx_desc_num = ctlr->num_rx_desc; 766 767 if (free_tx_num) { 768 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; 769 tx_per_ch_desc /= free_tx_num; 770 } 771 if (free_rx_num) { 772 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100; 773 rx_per_ch_desc /= free_rx_num; 774 } 775 776 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc); 777 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc); 778 779 return 0; 780 } 781 782 783 /* cpdma_chan_set_weight - set weight of a channel in percentage. 784 * Tx and Rx channels have separate weights. That is 100% for RX 785 * and 100% for Tx. The weight is used to split cpdma resources 786 * in correct proportion required by the channels, including number 787 * of descriptors. The channel rate is not enough to know the 788 * weight of a channel as the maximum rate of an interface is needed. 789 * If weight = 0, then channel uses rest of descriptors leaved by 790 * weighted channels. 791 */ 792 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight) 793 { 794 struct cpdma_ctlr *ctlr = ch->ctlr; 795 unsigned long flags, ch_flags; 796 int ret; 797 798 spin_lock_irqsave(&ctlr->lock, flags); 799 spin_lock_irqsave(&ch->lock, ch_flags); 800 if (ch->weight == weight) { 801 spin_unlock_irqrestore(&ch->lock, ch_flags); 802 spin_unlock_irqrestore(&ctlr->lock, flags); 803 return 0; 804 } 805 ch->weight = weight; 806 spin_unlock_irqrestore(&ch->lock, ch_flags); 807 808 /* re-split pool using new channel weight */ 809 ret = cpdma_chan_split_pool(ctlr); 810 spin_unlock_irqrestore(&ctlr->lock, flags); 811 return ret; 812 } 813 EXPORT_SYMBOL_GPL(cpdma_chan_set_weight); 814 815 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel 816 * Should be called before cpdma_chan_set_rate. 817 * Returns min rate in Kb/s 818 */ 819 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr) 820 { 821 unsigned int divident, divisor; 822 823 divident = ctlr->params.bus_freq_mhz * 32 * 1000; 824 divisor = 1 + CPDMA_MAX_RLIM_CNT; 825 826 return DIV_ROUND_UP(divident, divisor); 827 } 828 EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); 829 830 /* cpdma_chan_set_rate - limits bandwidth for transmit channel. 831 * The bandwidth * limited channels have to be in order beginning from lowest. 832 * ch - transmit channel the bandwidth is configured for 833 * rate - bandwidth in Kb/s, if 0 - then off shaper 834 */ 835 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) 836 { 837 unsigned long flags, ch_flags; 838 struct cpdma_ctlr *ctlr; 839 int ret, prio_mode; 840 u32 rmask; 841 842 if (!ch || !is_tx_chan(ch)) 843 return -EINVAL; 844 845 if (ch->rate == rate) 846 return rate; 847 848 ctlr = ch->ctlr; 849 spin_lock_irqsave(&ctlr->lock, flags); 850 spin_lock_irqsave(&ch->lock, ch_flags); 851 852 ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode); 853 if (ret) 854 goto err; 855 856 ret = cpdma_chan_set_factors(ctlr, ch); 857 if (ret) 858 goto err; 859 860 spin_unlock_irqrestore(&ch->lock, ch_flags); 861 862 /* on shapers */ 863 _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); 864 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); 865 spin_unlock_irqrestore(&ctlr->lock, flags); 866 return ret; 867 868 err: 869 spin_unlock_irqrestore(&ch->lock, ch_flags); 870 spin_unlock_irqrestore(&ctlr->lock, flags); 871 return ret; 872 } 873 EXPORT_SYMBOL_GPL(cpdma_chan_set_rate); 874 875 u32 cpdma_chan_get_rate(struct cpdma_chan *ch) 876 { 877 unsigned long flags; 878 u32 rate; 879 880 spin_lock_irqsave(&ch->lock, flags); 881 rate = ch->rate; 882 spin_unlock_irqrestore(&ch->lock, flags); 883 884 return rate; 885 } 886 EXPORT_SYMBOL_GPL(cpdma_chan_get_rate); 887 888 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 889 cpdma_handler_fn handler, int rx_type) 890 { 891 int offset = chan_num * 4; 892 struct cpdma_chan *chan; 893 unsigned long flags; 894 895 chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num); 896 897 if (__chan_linear(chan_num) >= ctlr->num_chan) 898 return ERR_PTR(-EINVAL); 899 900 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); 901 if (!chan) 902 return ERR_PTR(-ENOMEM); 903 904 spin_lock_irqsave(&ctlr->lock, flags); 905 if (ctlr->channels[chan_num]) { 906 spin_unlock_irqrestore(&ctlr->lock, flags); 907 devm_kfree(ctlr->dev, chan); 908 return ERR_PTR(-EBUSY); 909 } 910 911 chan->ctlr = ctlr; 912 chan->state = CPDMA_STATE_IDLE; 913 chan->chan_num = chan_num; 914 chan->handler = handler; 915 chan->rate = 0; 916 chan->weight = 0; 917 918 if (is_rx_chan(chan)) { 919 chan->hdp = ctlr->params.rxhdp + offset; 920 chan->cp = ctlr->params.rxcp + offset; 921 chan->rxfree = ctlr->params.rxfree + offset; 922 chan->int_set = CPDMA_RXINTMASKSET; 923 chan->int_clear = CPDMA_RXINTMASKCLEAR; 924 chan->td = CPDMA_RXTEARDOWN; 925 chan->dir = DMA_FROM_DEVICE; 926 } else { 927 chan->hdp = ctlr->params.txhdp + offset; 928 chan->cp = ctlr->params.txcp + offset; 929 chan->int_set = CPDMA_TXINTMASKSET; 930 chan->int_clear = CPDMA_TXINTMASKCLEAR; 931 chan->td = CPDMA_TXTEARDOWN; 932 chan->dir = DMA_TO_DEVICE; 933 } 934 chan->mask = BIT(chan_linear(chan)); 935 936 spin_lock_init(&chan->lock); 937 938 ctlr->channels[chan_num] = chan; 939 ctlr->chan_num++; 940 941 cpdma_chan_split_pool(ctlr); 942 943 spin_unlock_irqrestore(&ctlr->lock, flags); 944 return chan; 945 } 946 EXPORT_SYMBOL_GPL(cpdma_chan_create); 947 948 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) 949 { 950 unsigned long flags; 951 int desc_num; 952 953 spin_lock_irqsave(&chan->lock, flags); 954 desc_num = chan->desc_num; 955 spin_unlock_irqrestore(&chan->lock, flags); 956 957 return desc_num; 958 } 959 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); 960 961 int cpdma_chan_destroy(struct cpdma_chan *chan) 962 { 963 struct cpdma_ctlr *ctlr; 964 unsigned long flags; 965 966 if (!chan) 967 return -EINVAL; 968 ctlr = chan->ctlr; 969 970 spin_lock_irqsave(&ctlr->lock, flags); 971 if (chan->state != CPDMA_STATE_IDLE) 972 cpdma_chan_stop(chan); 973 ctlr->channels[chan->chan_num] = NULL; 974 ctlr->chan_num--; 975 devm_kfree(ctlr->dev, chan); 976 cpdma_chan_split_pool(ctlr); 977 978 spin_unlock_irqrestore(&ctlr->lock, flags); 979 return 0; 980 } 981 EXPORT_SYMBOL_GPL(cpdma_chan_destroy); 982 983 int cpdma_chan_get_stats(struct cpdma_chan *chan, 984 struct cpdma_chan_stats *stats) 985 { 986 unsigned long flags; 987 if (!chan) 988 return -EINVAL; 989 spin_lock_irqsave(&chan->lock, flags); 990 memcpy(stats, &chan->stats, sizeof(*stats)); 991 spin_unlock_irqrestore(&chan->lock, flags); 992 return 0; 993 } 994 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); 995 996 static void __cpdma_chan_submit(struct cpdma_chan *chan, 997 struct cpdma_desc __iomem *desc) 998 { 999 struct cpdma_ctlr *ctlr = chan->ctlr; 1000 struct cpdma_desc __iomem *prev = chan->tail; 1001 struct cpdma_desc_pool *pool = ctlr->pool; 1002 dma_addr_t desc_dma; 1003 u32 mode; 1004 1005 desc_dma = desc_phys(pool, desc); 1006 1007 /* simple case - idle channel */ 1008 if (!chan->head) { 1009 chan->stats.head_enqueue++; 1010 chan->head = desc; 1011 chan->tail = desc; 1012 if (chan->state == CPDMA_STATE_ACTIVE) 1013 chan_write(chan, hdp, desc_dma); 1014 return; 1015 } 1016 1017 /* first chain the descriptor at the tail of the list */ 1018 desc_write(prev, hw_next, desc_dma); 1019 chan->tail = desc; 1020 chan->stats.tail_enqueue++; 1021 1022 /* next check if EOQ has been triggered already */ 1023 mode = desc_read(prev, hw_mode); 1024 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && 1025 (chan->state == CPDMA_STATE_ACTIVE)) { 1026 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); 1027 chan_write(chan, hdp, desc_dma); 1028 chan->stats.misqueued++; 1029 } 1030 } 1031 1032 static int cpdma_chan_submit_si(struct submit_info *si) 1033 { 1034 struct cpdma_chan *chan = si->chan; 1035 struct cpdma_ctlr *ctlr = chan->ctlr; 1036 int len = si->len; 1037 struct cpdma_desc __iomem *desc; 1038 dma_addr_t buffer; 1039 u32 mode; 1040 int ret; 1041 1042 if (chan->count >= chan->desc_num) { 1043 chan->stats.desc_alloc_fail++; 1044 return -ENOMEM; 1045 } 1046 1047 desc = cpdma_desc_alloc(ctlr->pool); 1048 if (!desc) { 1049 chan->stats.desc_alloc_fail++; 1050 return -ENOMEM; 1051 } 1052 1053 if (len < ctlr->params.min_packet_size) { 1054 len = ctlr->params.min_packet_size; 1055 chan->stats.runt_transmit_buff++; 1056 } 1057 1058 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 1059 cpdma_desc_to_port(chan, mode, si->directed); 1060 1061 if (si->data_dma) { 1062 buffer = si->data_dma; 1063 dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); 1064 } else { 1065 buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); 1066 ret = dma_mapping_error(ctlr->dev, buffer); 1067 if (ret) { 1068 cpdma_desc_free(ctlr->pool, desc, 1); 1069 return -EINVAL; 1070 } 1071 } 1072 1073 /* Relaxed IO accessors can be used here as there is read barrier 1074 * at the end of write sequence. 1075 */ 1076 writel_relaxed(0, &desc->hw_next); 1077 writel_relaxed(buffer, &desc->hw_buffer); 1078 writel_relaxed(len, &desc->hw_len); 1079 writel_relaxed(mode | len, &desc->hw_mode); 1080 writel_relaxed((uintptr_t)si->token, &desc->sw_token); 1081 writel_relaxed(buffer, &desc->sw_buffer); 1082 writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len, 1083 &desc->sw_len); 1084 desc_read(desc, sw_len); 1085 1086 __cpdma_chan_submit(chan, desc); 1087 1088 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) 1089 chan_write(chan, rxfree, 1); 1090 1091 chan->count++; 1092 return 0; 1093 } 1094 1095 int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data, 1096 int len, int directed) 1097 { 1098 struct submit_info si; 1099 unsigned long flags; 1100 int ret; 1101 1102 si.chan = chan; 1103 si.token = token; 1104 si.data_virt = data; 1105 si.data_dma = 0; 1106 si.len = len; 1107 si.directed = directed; 1108 1109 spin_lock_irqsave(&chan->lock, flags); 1110 if (chan->state == CPDMA_STATE_TEARDOWN) { 1111 spin_unlock_irqrestore(&chan->lock, flags); 1112 return -EINVAL; 1113 } 1114 1115 ret = cpdma_chan_submit_si(&si); 1116 spin_unlock_irqrestore(&chan->lock, flags); 1117 return ret; 1118 } 1119 EXPORT_SYMBOL_GPL(cpdma_chan_idle_submit); 1120 1121 int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token, 1122 dma_addr_t data, int len, int directed) 1123 { 1124 struct submit_info si; 1125 unsigned long flags; 1126 int ret; 1127 1128 si.chan = chan; 1129 si.token = token; 1130 si.data_virt = NULL; 1131 si.data_dma = data; 1132 si.len = len; 1133 si.directed = directed; 1134 1135 spin_lock_irqsave(&chan->lock, flags); 1136 if (chan->state == CPDMA_STATE_TEARDOWN) { 1137 spin_unlock_irqrestore(&chan->lock, flags); 1138 return -EINVAL; 1139 } 1140 1141 ret = cpdma_chan_submit_si(&si); 1142 spin_unlock_irqrestore(&chan->lock, flags); 1143 return ret; 1144 } 1145 EXPORT_SYMBOL_GPL(cpdma_chan_idle_submit_mapped); 1146 1147 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 1148 int len, int directed) 1149 { 1150 struct submit_info si; 1151 unsigned long flags; 1152 int ret; 1153 1154 si.chan = chan; 1155 si.token = token; 1156 si.data_virt = data; 1157 si.data_dma = 0; 1158 si.len = len; 1159 si.directed = directed; 1160 1161 spin_lock_irqsave(&chan->lock, flags); 1162 if (chan->state != CPDMA_STATE_ACTIVE) { 1163 spin_unlock_irqrestore(&chan->lock, flags); 1164 return -EINVAL; 1165 } 1166 1167 ret = cpdma_chan_submit_si(&si); 1168 spin_unlock_irqrestore(&chan->lock, flags); 1169 return ret; 1170 } 1171 EXPORT_SYMBOL_GPL(cpdma_chan_submit); 1172 1173 int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token, 1174 dma_addr_t data, int len, int directed) 1175 { 1176 struct submit_info si; 1177 unsigned long flags; 1178 int ret; 1179 1180 si.chan = chan; 1181 si.token = token; 1182 si.data_virt = NULL; 1183 si.data_dma = data; 1184 si.len = len; 1185 si.directed = directed; 1186 1187 spin_lock_irqsave(&chan->lock, flags); 1188 if (chan->state != CPDMA_STATE_ACTIVE) { 1189 spin_unlock_irqrestore(&chan->lock, flags); 1190 return -EINVAL; 1191 } 1192 1193 ret = cpdma_chan_submit_si(&si); 1194 spin_unlock_irqrestore(&chan->lock, flags); 1195 return ret; 1196 } 1197 EXPORT_SYMBOL_GPL(cpdma_chan_submit_mapped); 1198 1199 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 1200 { 1201 struct cpdma_ctlr *ctlr = chan->ctlr; 1202 struct cpdma_desc_pool *pool = ctlr->pool; 1203 bool free_tx_desc; 1204 unsigned long flags; 1205 1206 spin_lock_irqsave(&chan->lock, flags); 1207 free_tx_desc = (chan->count < chan->desc_num) && 1208 gen_pool_avail(pool->gen_pool); 1209 spin_unlock_irqrestore(&chan->lock, flags); 1210 return free_tx_desc; 1211 } 1212 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 1213 1214 static void __cpdma_chan_free(struct cpdma_chan *chan, 1215 struct cpdma_desc __iomem *desc, 1216 int outlen, int status) 1217 { 1218 struct cpdma_ctlr *ctlr = chan->ctlr; 1219 struct cpdma_desc_pool *pool = ctlr->pool; 1220 dma_addr_t buff_dma; 1221 int origlen; 1222 uintptr_t token; 1223 1224 token = desc_read(desc, sw_token); 1225 origlen = desc_read(desc, sw_len); 1226 1227 buff_dma = desc_read(desc, sw_buffer); 1228 if (origlen & CPDMA_DMA_EXT_MAP) { 1229 origlen &= ~CPDMA_DMA_EXT_MAP; 1230 dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen, 1231 chan->dir); 1232 } else { 1233 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); 1234 } 1235 1236 cpdma_desc_free(pool, desc, 1); 1237 (*chan->handler)((void *)token, outlen, status); 1238 } 1239 1240 static int __cpdma_chan_process(struct cpdma_chan *chan) 1241 { 1242 struct cpdma_ctlr *ctlr = chan->ctlr; 1243 struct cpdma_desc __iomem *desc; 1244 int status, outlen; 1245 int cb_status = 0; 1246 struct cpdma_desc_pool *pool = ctlr->pool; 1247 dma_addr_t desc_dma; 1248 unsigned long flags; 1249 1250 spin_lock_irqsave(&chan->lock, flags); 1251 1252 desc = chan->head; 1253 if (!desc) { 1254 chan->stats.empty_dequeue++; 1255 status = -ENOENT; 1256 goto unlock_ret; 1257 } 1258 desc_dma = desc_phys(pool, desc); 1259 1260 status = desc_read(desc, hw_mode); 1261 outlen = status & 0x7ff; 1262 if (status & CPDMA_DESC_OWNER) { 1263 chan->stats.busy_dequeue++; 1264 status = -EBUSY; 1265 goto unlock_ret; 1266 } 1267 1268 if (status & CPDMA_DESC_PASS_CRC) 1269 outlen -= CPDMA_DESC_CRC_LEN; 1270 1271 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | 1272 CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP); 1273 1274 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); 1275 chan_write(chan, cp, desc_dma); 1276 chan->count--; 1277 chan->stats.good_dequeue++; 1278 1279 if ((status & CPDMA_DESC_EOQ) && chan->head) { 1280 chan->stats.requeue++; 1281 chan_write(chan, hdp, desc_phys(pool, chan->head)); 1282 } 1283 1284 spin_unlock_irqrestore(&chan->lock, flags); 1285 if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) 1286 cb_status = -ENOSYS; 1287 else 1288 cb_status = status; 1289 1290 __cpdma_chan_free(chan, desc, outlen, cb_status); 1291 return status; 1292 1293 unlock_ret: 1294 spin_unlock_irqrestore(&chan->lock, flags); 1295 return status; 1296 } 1297 1298 int cpdma_chan_process(struct cpdma_chan *chan, int quota) 1299 { 1300 int used = 0, ret = 0; 1301 1302 if (chan->state != CPDMA_STATE_ACTIVE) 1303 return -EINVAL; 1304 1305 while (used < quota) { 1306 ret = __cpdma_chan_process(chan); 1307 if (ret < 0) 1308 break; 1309 used++; 1310 } 1311 return used; 1312 } 1313 EXPORT_SYMBOL_GPL(cpdma_chan_process); 1314 1315 int cpdma_chan_start(struct cpdma_chan *chan) 1316 { 1317 struct cpdma_ctlr *ctlr = chan->ctlr; 1318 unsigned long flags; 1319 int ret; 1320 1321 spin_lock_irqsave(&ctlr->lock, flags); 1322 ret = cpdma_chan_set_chan_shaper(chan); 1323 spin_unlock_irqrestore(&ctlr->lock, flags); 1324 if (ret) 1325 return ret; 1326 1327 ret = cpdma_chan_on(chan); 1328 if (ret) 1329 return ret; 1330 1331 return 0; 1332 } 1333 EXPORT_SYMBOL_GPL(cpdma_chan_start); 1334 1335 int cpdma_chan_stop(struct cpdma_chan *chan) 1336 { 1337 struct cpdma_ctlr *ctlr = chan->ctlr; 1338 struct cpdma_desc_pool *pool = ctlr->pool; 1339 unsigned long flags; 1340 int ret; 1341 unsigned timeout; 1342 1343 spin_lock_irqsave(&chan->lock, flags); 1344 if (chan->state == CPDMA_STATE_TEARDOWN) { 1345 spin_unlock_irqrestore(&chan->lock, flags); 1346 return -EINVAL; 1347 } 1348 1349 chan->state = CPDMA_STATE_TEARDOWN; 1350 dma_reg_write(ctlr, chan->int_clear, chan->mask); 1351 1352 /* trigger teardown */ 1353 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 1354 1355 /* wait for teardown complete */ 1356 timeout = 100 * 100; /* 100 ms */ 1357 while (timeout) { 1358 u32 cp = chan_read(chan, cp); 1359 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 1360 break; 1361 udelay(10); 1362 timeout--; 1363 } 1364 WARN_ON(!timeout); 1365 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 1366 1367 /* handle completed packets */ 1368 spin_unlock_irqrestore(&chan->lock, flags); 1369 do { 1370 ret = __cpdma_chan_process(chan); 1371 if (ret < 0) 1372 break; 1373 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 1374 spin_lock_irqsave(&chan->lock, flags); 1375 1376 /* remaining packets haven't been tx/rx'ed, clean them up */ 1377 while (chan->head) { 1378 struct cpdma_desc __iomem *desc = chan->head; 1379 dma_addr_t next_dma; 1380 1381 next_dma = desc_read(desc, hw_next); 1382 chan->head = desc_from_phys(pool, next_dma); 1383 chan->count--; 1384 chan->stats.teardown_dequeue++; 1385 1386 /* issue callback without locks held */ 1387 spin_unlock_irqrestore(&chan->lock, flags); 1388 __cpdma_chan_free(chan, desc, 0, -ENOSYS); 1389 spin_lock_irqsave(&chan->lock, flags); 1390 } 1391 1392 chan->state = CPDMA_STATE_IDLE; 1393 spin_unlock_irqrestore(&chan->lock, flags); 1394 return 0; 1395 } 1396 EXPORT_SYMBOL_GPL(cpdma_chan_stop); 1397 1398 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 1399 { 1400 unsigned long flags; 1401 1402 spin_lock_irqsave(&chan->lock, flags); 1403 if (chan->state != CPDMA_STATE_ACTIVE) { 1404 spin_unlock_irqrestore(&chan->lock, flags); 1405 return -EINVAL; 1406 } 1407 1408 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, 1409 chan->mask); 1410 spin_unlock_irqrestore(&chan->lock, flags); 1411 1412 return 0; 1413 } 1414 1415 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 1416 { 1417 unsigned long flags; 1418 int ret; 1419 1420 spin_lock_irqsave(&ctlr->lock, flags); 1421 ret = _cpdma_control_get(ctlr, control); 1422 spin_unlock_irqrestore(&ctlr->lock, flags); 1423 1424 return ret; 1425 } 1426 1427 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 1428 { 1429 unsigned long flags; 1430 int ret; 1431 1432 spin_lock_irqsave(&ctlr->lock, flags); 1433 ret = _cpdma_control_set(ctlr, control, value); 1434 spin_unlock_irqrestore(&ctlr->lock, flags); 1435 1436 return ret; 1437 } 1438 1439 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) 1440 { 1441 return ctlr->num_rx_desc; 1442 } 1443 EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs); 1444 1445 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) 1446 { 1447 return ctlr->num_tx_desc; 1448 } 1449 EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs); 1450 1451 int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) 1452 { 1453 unsigned long flags; 1454 int temp, ret; 1455 1456 spin_lock_irqsave(&ctlr->lock, flags); 1457 1458 temp = ctlr->num_rx_desc; 1459 ctlr->num_rx_desc = num_rx_desc; 1460 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; 1461 ret = cpdma_chan_split_pool(ctlr); 1462 if (ret) { 1463 ctlr->num_rx_desc = temp; 1464 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; 1465 } 1466 1467 spin_unlock_irqrestore(&ctlr->lock, flags); 1468 1469 return ret; 1470 } 1471 EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs); 1472