Lines Matching +full:rx +full:- +full:ctrl

1 // SPDX-License-Identifier: GPL-2.0-or-later
4 Copyright (C) 1995-1999 Madge Networks Ltd.
95 comes with the revision 0 (140-025-01) ASIC.
99 Madge's SAMBA framer or a SUNI-lite device (early versions). It
100 comes with the revision 1 (140-027-01) ASIC.
104 All Horizon-based cards present with the same PCI Vendor and Device
106 to enable bus-mastering (with appropriate latency).
115 up for loop-timing.
122 line-based timing; the internal RAM is zeroed and the allocation of
123 buffers for RX and TX is made; the Burnt In Address is read and
124 copied to the ATM ESI; various policy settings for RX (VPI bits,
126 configurable at module load (if not actually on-demand), however,
174 be implemented as a (real-number) leaky bucket. The GCRA can be used
175 in complicated ways by switches and in simpler ways by end-stations.
176 It can be used both to filter incoming cells and shape out-going
217 (determined by the clock crystal, a fixed (?) per-device divider, a
230 (enforced by driver). So TX is single-threaded.
232 Apart from a minor optimisation to not re-select the last channel,
246 unset, exit). We also re-schedule further transfers for the same
252 (vcc->dev_data) structure and is "cached" on the card.
256 2. RX (Data Available and RX transfer)
258 The RX half of the driver owns the RX registers. There are two RX
268 suitable for our existing RX channels or we cannot allocate a buffer
269 it is flushed. Otherwise an RX receive is scheduled. Multiple RX
272 RX setup in more detail:
274 RX open...
275 RX close...
286 non-four-byte boundary in host memory. Instead the host should
289 boundary. RX is OK.
324 . Allow users to specify buffer allocation split for TX and RX.
328 . Handle interrupted and/or non-blocking operations.
357 outl (cpu_to_le32 (data), dev->iobase + reg); in wr_regl()
361 return le32_to_cpu (inl (dev->iobase + reg)); in rd_regl()
365 outw (cpu_to_le16 (data), dev->iobase + reg); in wr_regw()
369 return le16_to_cpu (inw (dev->iobase + reg)); in rd_regw()
373 outsb (dev->iobase + reg, addr, len); in wrs_regb()
377 insb (dev->iobase + reg, addr, len); in rds_regb()
385 wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW)); in wr_mem()
391 wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW)); in rd_mem()
407 /* RX */
452 unsigned char * data = skb->data; in dump_skb()
454 for (i=0; i<skb->len && i < 256;i++) in dump_skb()
468 PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF)); in dump_regs()
492 /********** VPI/VCI <-> (RX) channel conversions **********/
494 /* RX channels are 10 bit integers, these fns are quite paranoid */
497 unsigned short vci_bits = 10 - vpi_bits; in vpivci_to_channel()
500 return *channel ? 0 : -EINVAL; in vpivci_to_channel()
502 return -EINVAL; in vpivci_to_channel()
505 /********** decode RX queue entries **********/
536 * know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until
576 const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ; in make_rate()
594 return -EINVAL; in make_rate()
604 if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) { in make_rate()
610 // but p must be non-zero in make_rate()
616 // but p must be non-zero in make_rate()
622 // but p must be non-zero in make_rate()
624 return -EINVAL; in make_rate()
634 if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) { in make_rate()
636 // c << (MAXPEXP+d-1) < B <= c << (MAXPEXP+d) in make_rate()
637 // 1 << (MAXPEXP-1) < B/2^d/c <= 1 << MAXPEXP in make_rate()
659 return -EINVAL; in make_rate()
667 return -EINVAL; in make_rate()
670 *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1); in make_rate()
692 return -1; in make_rate_with_tolerance()
694 if (c - tol <= *actual && *actual <= c + tol) in make_rate_with_tolerance()
713 rx_ch_desc * rx_desc = &memmap->rx_descs[channel]; in hrz_open_rx()
717 spin_lock_irqsave (&dev->mem_lock, flags); in hrz_open_rx()
718 channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK; in hrz_open_rx()
719 spin_unlock_irqrestore (&dev->mem_lock, flags); in hrz_open_rx()
723 PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open"); in hrz_open_rx()
724 return -EBUSY; // clean up? in hrz_open_rx()
728 if (dev->noof_spare_buffers) { in hrz_open_rx()
729 buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers]; in hrz_open_rx()
744 spin_lock_irqsave (&dev->mem_lock, flags); in hrz_open_rx()
746 wr_mem (dev, &rx_desc->wr_buf_type, in hrz_open_rx()
749 wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr); in hrz_open_rx()
751 spin_unlock_irqrestore (&dev->mem_lock, flags); in hrz_open_rx()
753 // rxer->rate = make_rate (qos->peak_cells); in hrz_open_rx()
764 rxer->rate = make_rate (qos->peak_cells);
771 if (ATM_SKB(skb)->vcc->pop) { in hrz_kfree_skb()
772 ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb); in hrz_kfree_skb()
787 rx_ch_desc * rx_desc = &memmap->rx_descs[vc]; in hrz_close_rx()
791 spin_lock_irqsave (&dev->mem_lock, flags); in hrz_close_rx()
792 value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK; in hrz_close_rx()
793 spin_unlock_irqrestore (&dev->mem_lock, flags); in hrz_close_rx()
797 PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc); in hrz_close_rx()
803 spin_lock_irqsave (&dev->mem_lock, flags); in hrz_close_rx()
806 wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED); in hrz_close_rx()
808 if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED) in hrz_close_rx()
815 spin_unlock_irqrestore (&dev->mem_lock, flags); in hrz_close_rx()
824 // different process) may cause some data to be mis-delivered then in hrz_close_rx()
825 // there may still be a simpler solution (such as busy-waiting on in hrz_close_rx()
827 // opened - does this leave any holes?). Arguably setting up and in hrz_close_rx()
828 // tearing down the TX and RX halves of each virtual circuit could in hrz_close_rx()
834 // just disabled - the cell gets relinked at the next vc_open. in hrz_close_rx()
843 // Change the rx channel port to something different to the RX in hrz_close_rx()
844 // channel we are trying to close to force Horizon to flush the rx in hrz_close_rx()
852 r1 = rd_mem (dev, &rx_desc->rd_buf_type); in hrz_close_rx()
854 // Select this RX channel. Flush doesn't seem to work unless we in hrz_close_rx()
855 // select an RX channel before hand in hrz_close_rx()
860 // Attempt to flush a frame on this RX channel in hrz_close_rx()
865 // Force Horizon to flush rx channel read and write pointers as before in hrz_close_rx()
870 r2 = rd_mem (dev, &rx_desc->rd_buf_type); in hrz_close_rx()
875 dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1; in hrz_close_rx()
882 rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)]; in hrz_close_rx()
883 rx_q_entry * rd_ptr = dev->rx_q_entry; in hrz_close_rx()
898 if (rd_ptr == dev->rx_q_wrap) in hrz_close_rx()
899 rd_ptr = dev->rx_q_reset; in hrz_close_rx()
906 spin_unlock_irqrestore (&dev->mem_lock, flags); in hrz_close_rx()
911 /********** schedule RX transfers **********/
927 // bytes waiting for RX transfer in rx_schedule()
928 rx_bytes = dev->rx_bytes; in rx_schedule()
933 PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!"); in rx_schedule()
935 PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion"); in rx_schedule()
937 clear_bit (rx_busy, &dev->flags); in rx_schedule()
938 hrz_kfree_skb (dev->rx_skb); in rx_schedule()
945 // one region - the skb itself. I don't know if this will change, in rx_schedule()
956 dev->rx_bytes = 0; in rx_schedule()
959 dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT; in rx_schedule()
963 // rx_bytes == 0 -- we're between regions in rx_schedule()
966 unsigned int rx_regions = dev->rx_regions; in rx_schedule()
974 dev->rx_addr = dev->rx_iovec->iov_base; in rx_schedule()
975 rx_bytes = dev->rx_iovec->iov_len; in rx_schedule()
976 ++dev->rx_iovec; in rx_schedule()
977 dev->rx_regions = rx_regions - 1; in rx_schedule()
985 dev->rx_bytes = 0; in rx_schedule()
988 dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT; in rx_schedule()
994 // that's all folks - end of frame in rx_schedule()
995 struct sk_buff * skb = dev->rx_skb; in rx_schedule()
996 // dev->rx_iovec = 0; in rx_schedule()
998 FLUSH_RX_CHANNEL (dev, dev->rx_channel); in rx_schedule()
1000 dump_skb ("<<<", dev->rx_channel, skb); in rx_schedule()
1002 PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len); in rx_schedule()
1005 struct atm_vcc * vcc = ATM_SKB(skb)->vcc; in rx_schedule()
1007 atomic_inc(&vcc->stats->rx); in rx_schedule()
1010 vcc->push (vcc, skb); in rx_schedule()
1020 rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes); in rx_schedule()
1022 wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr)); in rx_schedule()
1025 dev->rx_addr += rx_bytes; in rx_schedule()
1029 // allow another RX thread to start in rx_schedule()
1031 clear_bit (rx_busy, &dev->flags); in rx_schedule()
1048 /********** handle RX bus master complete events **********/
1051 if (test_bit (rx_busy, &dev->flags)) { in rx_bus_master_complete_handler()
1054 PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion"); in rx_bus_master_complete_handler()
1064 PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags); in tx_hold()
1065 wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags))); in tx_hold()
1066 PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags); in tx_hold()
1068 return -1; in tx_hold()
1076 clear_bit (tx_busy, &dev->flags); in tx_release()
1078 wake_up_interruptible (&dev->tx_queue); in tx_release()
1094 tx_bytes = dev->tx_bytes; in tx_schedule()
1104 hrz_kfree_skb (dev->tx_skb); in tx_schedule()
1112 if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) { in tx_schedule()
1118 if (!dev->tx_iovec) { in tx_schedule()
1122 dev->tx_bytes = 0; in tx_schedule()
1125 dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT; in tx_schedule()
1129 // tx_bytes == 0 -- we're between regions in tx_schedule()
1131 unsigned int tx_regions = dev->tx_regions; in tx_schedule()
1135 dev->tx_addr = dev->tx_iovec->iov_base; in tx_schedule()
1136 tx_bytes = dev->tx_iovec->iov_len; in tx_schedule()
1137 ++dev->tx_iovec; in tx_schedule()
1138 dev->tx_regions = tx_regions - 1; in tx_schedule()
1140 if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) { in tx_schedule()
1146 dev->tx_bytes = 0; in tx_schedule()
1149 dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT; in tx_schedule()
1154 // that's all folks - end of frame in tx_schedule()
1155 struct sk_buff * skb = dev->tx_skb; in tx_schedule()
1156 dev->tx_iovec = NULL; in tx_schedule()
1159 atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); in tx_schedule()
1171 wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes); in tx_schedule()
1173 wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len)); in tx_schedule()
1175 wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr)); in tx_schedule()
1177 wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len)); in tx_schedule()
1183 dev->tx_addr += tx_bytes; in tx_schedule()
1207 if (test_bit (tx_busy, &dev->flags)) { in tx_bus_master_complete_handler()
1217 /********** move RX Q pointer to next item in circular buffer **********/
1219 // called only from IRQ sub-handler
1222 spin_lock (&dev->mem_lock); in rx_queue_entry_next()
1223 rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry); in rx_queue_entry_next()
1224 if (dev->rx_q_entry == dev->rx_q_wrap) in rx_queue_entry_next()
1225 dev->rx_q_entry = dev->rx_q_reset; in rx_queue_entry_next()
1227 dev->rx_q_entry++; in rx_queue_entry_next()
1228 wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset); in rx_queue_entry_next()
1229 spin_unlock (&dev->mem_lock); in rx_queue_entry_next()
1233 /********** handle RX data received by device **********/
1244 // try to grab rx lock (not possible during RX bus mastering) in rx_data_av_handler()
1245 if (test_and_set_bit (rx_busy, &dev->flags)) { in rx_data_av_handler()
1246 PRINTD (DBG_RX, "locked out of rx lock"); in rx_data_av_handler()
1267 // (at least) bus-mastering breaks if we try to handle a in rx_data_av_handler()
1268 // zero-length frame, besides AAL5 does not support them in rx_data_av_handler()
1269 PRINTK (KERN_ERR, "zero-length frame!"); in rx_data_av_handler()
1281 atm_vcc = dev->rxer[rx_channel]; in rx_data_av_handler()
1287 if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { in rx_data_av_handler()
1289 if (rx_len <= atm_vcc->qos.rxtp.max_sdu) { in rx_data_av_handler()
1294 dev->rx_skb = skb; in rx_data_av_handler()
1296 dev->rx_channel = rx_channel; in rx_data_av_handler()
1300 ATM_SKB(skb)->vcc = atm_vcc; in rx_data_av_handler()
1303 // dev->rx_regions = 0; in rx_data_av_handler()
1304 // dev->rx_iovec = 0; in rx_data_av_handler()
1305 dev->rx_bytes = rx_len; in rx_data_av_handler()
1306 dev->rx_addr = skb->data; in rx_data_av_handler()
1307 PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)", in rx_data_av_handler()
1308 skb->data, rx_len); in rx_data_av_handler()
1319 PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel); in rx_data_av_handler()
1324 PRINTK (KERN_WARNING, "dropped over-size frame"); in rx_data_av_handler()
1337 // RX was aborted in rx_data_av_handler()
1341 clear_bit (rx_busy, &dev->flags); in rx_data_av_handler()
1366 // (only an issue for slow hosts) RX completion goes before in interrupt_handler()
1371 // (only an issue for slow hosts) TX completion goes before RX in interrupt_handler()
1372 // data available as it is a much shorter routine - there is the in interrupt_handler()
1410 // collect device-specific (not driver/atm-linux) stats here in do_housekeeping()
1411 dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF); in do_housekeeping()
1412 dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF); in do_housekeeping()
1413 dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF); in do_housekeeping()
1414 dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF); in do_housekeeping()
1416 mod_timer (&dev->housekeeping, jiffies + HZ/10); in do_housekeeping()
1426 short tx_channel = -1; in setup_idle_tx_channel()
1438 return -EBUSY; in setup_idle_tx_channel()
1445 int chan = dev->tx_idle; in setup_idle_tx_channel()
1458 dev->tx_idle = chan; in setup_idle_tx_channel()
1466 tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel]; in setup_idle_tx_channel()
1469 u16 channel = vcc->channel; in setup_idle_tx_channel()
1472 spin_lock_irqsave (&dev->mem_lock, flags); in setup_idle_tx_channel()
1475 dev->tx_channel_record[tx_channel] = channel; in setup_idle_tx_channel()
1479 vcc->tx_xbr_bits); in setup_idle_tx_channel()
1483 vcc->tx_pcr_bits); in setup_idle_tx_channel()
1486 if (vcc->tx_xbr_bits == VBR_RATE_TYPE) { in setup_idle_tx_channel()
1489 vcc->tx_scr_bits); in setup_idle_tx_channel()
1493 vcc->tx_bucket_bits); in setup_idle_tx_channel()
1497 vcc->tx_bucket_bits); in setup_idle_tx_channel()
1502 rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK; in setup_idle_tx_channel()
1503 wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK; in setup_idle_tx_channel()
1508 // spin_unlock... return -E... in setup_idle_tx_channel()
1514 switch (vcc->aal) { in setup_idle_tx_channel()
1529 wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC); in setup_idle_tx_channel()
1533 wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr); in setup_idle_tx_channel()
1534 wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr); in setup_idle_tx_channel()
1537 // Payload Type, CLP and GFC would go here if non-zero in setup_idle_tx_channel()
1538 wr_mem (dev, &tx_desc->cell_header, channel); in setup_idle_tx_channel()
1540 spin_unlock_irqrestore (&dev->mem_lock, flags); in setup_idle_tx_channel()
1551 hrz_dev * dev = HRZ_DEV(atm_vcc->dev); in hrz_send()
1553 u16 channel = vcc->channel; in hrz_send()
1561 channel, skb->data, skb->len); in hrz_send()
1565 if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) { in hrz_send()
1566 PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel); in hrz_send()
1568 return -EIO; in hrz_send()
1572 ATM_SKB(skb)->vcc = atm_vcc; in hrz_send()
1574 if (skb->len > atm_vcc->qos.txtp.max_sdu) { in hrz_send()
1577 return -EIO; in hrz_send()
1583 return -EIO; in hrz_send()
1590 pci_read_config_word (dev->pci_dev, PCI_STATUS, &status); in hrz_send()
1594 pci_write_config_word (dev->pci_dev, PCI_STATUS, status); in hrz_send()
1595 if (test_bit (tx_busy, &dev->flags)) { in hrz_send()
1596 hrz_kfree_skb (dev->tx_skb); in hrz_send()
1604 /* wey-hey! */ in hrz_send()
1608 char * s = skb->data; in hrz_send()
1620 return -ERESTARTSYS; in hrz_send()
1628 buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3; in hrz_send()
1630 // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry) in hrz_send()
1644 return -ERESTARTSYS; in hrz_send()
1649 if (channel == dev->last_vc) { in hrz_send()
1651 tx_channel = dev->tx_last; in hrz_send()
1656 if (dev->tx_channel_record[tx_channel] == channel) { in hrz_send()
1674 dev->last_vc = channel; in hrz_send()
1675 dev->tx_last = tx_channel; in hrz_send()
1685 unsigned int tx_len = skb->len; in hrz_send()
1686 unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags; in hrz_send()
1688 dev->tx_skb = skb; in hrz_send()
1692 dev->tx_regions = tx_iovcnt; in hrz_send()
1693 dev->tx_iovec = NULL; /* @@@ needs rewritten */ in hrz_send()
1694 dev->tx_bytes = 0; in hrz_send()
1695 PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)", in hrz_send()
1696 skb->data, tx_len); in hrz_send()
1699 return -EIO; in hrz_send()
1702 dev->tx_regions = 0; in hrz_send()
1703 dev->tx_iovec = NULL; in hrz_send()
1704 dev->tx_bytes = tx_len; in hrz_send()
1705 dev->tx_addr = skb->data; in hrz_send()
1707 skb->data, tx_len); in hrz_send()
1741 static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl) in WRITE_IT_WAIT() argument
1743 wr_regl (dev, CONTROL_0_REG, ctrl); in WRITE_IT_WAIT()
1747 static void CLOCK_IT (const hrz_dev *dev, u32 ctrl) in CLOCK_IT() argument
1750 WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK); in CLOCK_IT()
1751 WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK); in CLOCK_IT()
1756 u32 ctrl = rd_regl (dev, CONTROL_0_REG); in read_bia() local
1765 ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI); in read_bia()
1766 WRITE_IT_WAIT(dev, ctrl); in read_bia()
1769 ctrl |= (SEEPROM_CS | SEEPROM_DI); in read_bia()
1770 CLOCK_IT(dev, ctrl); in read_bia()
1772 ctrl |= SEEPROM_DI; in read_bia()
1773 CLOCK_IT(dev, ctrl); in read_bia()
1775 ctrl &= ~SEEPROM_DI; in read_bia()
1776 CLOCK_IT(dev, ctrl); in read_bia()
1779 if (addr & (1 << (addr_bits-1))) in read_bia()
1780 ctrl |= SEEPROM_DI; in read_bia()
1782 ctrl &= ~SEEPROM_DI; in read_bia()
1784 CLOCK_IT(dev, ctrl); in read_bia()
1790 ctrl &= ~SEEPROM_DI; in read_bia()
1796 CLOCK_IT(dev, ctrl); in read_bia()
1799 res |= (1 << (data_bits-1)); in read_bia()
1802 ctrl &= ~(SEEPROM_SK | SEEPROM_CS); in read_bia()
1803 WRITE_IT_WAIT(dev, ctrl); in read_bia()
1823 u32 ctrl; in hrz_init() local
1825 ctrl = rd_regl (dev, CONTROL_0_REG); in hrz_init()
1826 PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl); in hrz_init()
1827 onefivefive = ctrl & ATM_LAYER_STATUS; in hrz_init()
1856 tx_ch_desc * tx_desc = &memmap->tx_descs[chan]; in hrz_init()
1857 cell_buf * buf = &memmap->inittxbufs[chan]; in hrz_init()
1860 wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf)); in hrz_init()
1861 wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf)); in hrz_init()
1864 wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY); in hrz_init()
1871 tx_desc = memmap->bufn3; in hrz_init()
1873 wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY); in hrz_init()
1875 for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) { in hrz_init()
1876 wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY); in hrz_init()
1880 wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY); in hrz_init()
1885 printk (" rx channels"); in hrz_init()
1891 rx_ch_desc * rx_desc = &memmap->rx_descs[chan]; in hrz_init()
1893 wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED); in hrz_init()
1896 printk (" rx buffers"); in hrz_init()
1898 // Use space bufn4 at the moment for rx buffers in hrz_init()
1900 rx_desc = memmap->bufn4; in hrz_init()
1902 wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY); in hrz_init()
1904 for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) { in hrz_init()
1905 wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY); in hrz_init()
1910 wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY); in hrz_init()
1921 // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0. in hrz_init()
1925 // RX line config in hrz_init()
1940 ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED; in hrz_init()
1941 wr_regl (dev, CONTROL_0_REG, ctrl); in hrz_init()
1943 // Test for a 155-capable card in hrz_init()
1948 ctrl |= ATM_LAYER_SELECT; in hrz_init()
1949 wr_regl (dev, CONTROL_0_REG, ctrl); in hrz_init()
1951 // test SUNI-lite vs SAMBA in hrz_init()
1953 // Register 0x00 in the SUNI will have some of bits 3-7 set, and in hrz_init()
1978 // Turn off diagnostic loopback and enable line-timed mode in hrz_init()
1986 ctrl &= ~ATM_LAYER_SELECT; in hrz_init()
2002 u8 * esi = dev->atm_dev->esi; in hrz_init()
2036 if (!(tp->max_sdu)) { in check_max_sdu()
2038 tp->max_sdu = ATM_AAL0_SDU; in check_max_sdu()
2039 } else if (tp->max_sdu != ATM_AAL0_SDU) { in check_max_sdu()
2041 return -EINVAL; in check_max_sdu()
2045 if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) { in check_max_sdu()
2046 PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default"); in check_max_sdu()
2047 tp->max_sdu = ATM_MAX_AAL34_PDU; in check_max_sdu()
2051 if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) { in check_max_sdu()
2052 PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default"); in check_max_sdu()
2053 tp->max_sdu = max_frame_size; in check_max_sdu()
2064 // we are assuming non-UBR, and non-special values of pcr in atm_pcr_check()
2065 if (tp->min_pcr == ATM_MAX_PCR) in atm_pcr_check()
2067 else if (tp->min_pcr < 0) in atm_pcr_check()
2069 else if (tp->min_pcr && tp->min_pcr > pcr) in atm_pcr_check()
2072 // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1) in atm_pcr_check()
2075 if ((0) && tp->max_pcr == ATM_MAX_PCR) in atm_pcr_check()
2077 else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0) in atm_pcr_check()
2079 else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr) in atm_pcr_check()
2087 pcr, tp->min_pcr, tp->pcr, tp->max_pcr); in atm_pcr_check()
2088 return -EINVAL; in atm_pcr_check()
2102 hrz_dev * dev = HRZ_DEV(atm_vcc->dev); in hrz_open()
2105 short vpi = atm_vcc->vpi; in hrz_open()
2106 int vci = atm_vcc->vci; in hrz_open()
2113 return -EINVAL; in hrz_open()
2127 qos = &atm_vcc->qos; in hrz_open()
2130 switch (qos->aal) { in hrz_open()
2147 return -EINVAL; in hrz_open()
2186 txtp = &qos->txtp; in hrz_open()
2198 if (txtp->traffic_class != ATM_NONE) { in hrz_open()
2205 switch (txtp->traffic_class) { in hrz_open()
2207 // we take "the PCR" as a rate-cap in hrz_open()
2229 // that no more non-UBR channels can be opened until the in hrz_open()
2233 // slight race (no locking) here so we may get -EAGAIN in hrz_open()
2236 pcr = dev->tx_avail; in hrz_open()
2239 pcr = -pcr; in hrz_open()
2272 pcr = -pcr; in hrz_open()
2281 // slight race (no locking) here so we may get -EAGAIN in hrz_open()
2284 scr = dev->tx_avail; in hrz_open()
2287 scr = -scr; in hrz_open()
2304 // capacity must be largest integer smaller than m(p-s)/p + 1 in hrz_open()
2306 bucket = mbs*(pcr-scr)/pcr; in hrz_open()
2307 if (bucket*pcr != mbs*(pcr-scr)) in hrz_open()
2321 return -EINVAL; in hrz_open()
2326 // RX traffic parameters in hrz_open()
2328 PRINTD (DBG_QOS, "RX:"); in hrz_open()
2330 rxtp = &qos->rxtp; in hrz_open()
2335 if (rxtp->traffic_class != ATM_NONE) { in hrz_open()
2338 PRINTD (DBG_QOS, "RX max_sdu check failed"); in hrz_open()
2341 switch (rxtp->traffic_class) { in hrz_open()
2356 // slight race (no locking) here so we may get -EAGAIN in hrz_open()
2358 PRINTD (DBG_QOS, "snatching all remaining RX bandwidth"); in hrz_open()
2359 pcr = dev->rx_avail; in hrz_open()
2361 pcr = -pcr; in hrz_open()
2367 PRINTD (DBG_QOS, "RX PCR failed consistency check"); in hrz_open()
2377 // slight race (no locking) here so we may get -EAGAIN in hrz_open()
2379 PRINTD (DBG_QOS, "snatching all remaining RX bandwidth"); in hrz_open()
2380 scr = dev->rx_avail; in hrz_open()
2382 scr = -scr; in hrz_open()
2388 PRINTD (DBG_QOS, "RX SCR failed consistency check"); in hrz_open()
2395 PRINTD (DBG_QOS, "unsupported RX traffic class"); in hrz_open()
2396 return -EINVAL; in hrz_open()
2405 return -EINVAL; in hrz_open()
2412 return -ENOMEM; in hrz_open()
2418 spin_lock (&dev->rate_lock); in hrz_open()
2420 if (vcc.tx_rate > dev->tx_avail) { in hrz_open()
2422 error = -EAGAIN; in hrz_open()
2425 if (vcc.rx_rate > dev->rx_avail) { in hrz_open()
2426 PRINTD (DBG_QOS, "not enough RX PCR left"); in hrz_open()
2427 error = -EAGAIN; in hrz_open()
2432 dev->tx_avail -= vcc.tx_rate; in hrz_open()
2433 dev->rx_avail -= vcc.rx_rate; in hrz_open()
2434 PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR", in hrz_open()
2439 spin_unlock (&dev->rate_lock); in hrz_open()
2447 // in hardware" - so long as the next call does not fail :) in hrz_open()
2448 set_bit(ATM_VF_ADDR,&atm_vcc->flags); in hrz_open()
2452 if (rxtp->traffic_class != ATM_NONE) { in hrz_open()
2453 if (dev->rxer[channel]) { in hrz_open()
2454 PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX"); in hrz_open()
2455 error = -EBUSY; in hrz_open()
2463 // this link allows RX frames through in hrz_open()
2464 dev->rxer[channel] = atm_vcc; in hrz_open()
2468 atm_vcc->dev_data = (void *) vccp; in hrz_open()
2471 set_bit(ATM_VF_READY,&atm_vcc->flags); in hrz_open()
2479 hrz_dev * dev = HRZ_DEV(atm_vcc->dev); in hrz_close()
2481 u16 channel = vcc->channel; in hrz_close()
2485 clear_bit(ATM_VF_READY,&atm_vcc->flags); in hrz_close()
2487 if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { in hrz_close()
2496 if (dev->tx_channel_record[i] == channel) { in hrz_close()
2497 dev->tx_channel_record[i] = -1; in hrz_close()
2500 if (dev->last_vc == channel) in hrz_close()
2501 dev->tx_last = -1; in hrz_close()
2505 if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { in hrz_close()
2506 // disable RXing - it tries quite hard in hrz_close()
2508 // forget the vcc - no more skbs will be pushed in hrz_close()
2509 if (atm_vcc != dev->rxer[channel]) in hrz_close()
2512 atm_vcc, dev->rxer[channel]); in hrz_close()
2513 dev->rxer[channel] = NULL; in hrz_close()
2517 spin_lock (&dev->rate_lock); in hrz_close()
2518 PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR", in hrz_close()
2519 vcc->tx_rate, vcc->rx_rate); in hrz_close()
2520 dev->tx_avail += vcc->tx_rate; in hrz_close()
2521 dev->rx_avail += vcc->rx_rate; in hrz_close()
2522 spin_unlock (&dev->rate_lock); in hrz_close()
2527 clear_bit(ATM_VF_ADDR,&atm_vcc->flags); in hrz_close()
2534 return -1;
2550 hrz_dev * dev = HRZ_DEV(vcc->dev);
2552 return -1;
2566 if (!left--) { in hrz_proc_read()
2578 if (!left--) in hrz_proc_read()
2580 "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n", in hrz_proc_read()
2581 dev->tx_cell_count, dev->rx_cell_count, in hrz_proc_read()
2582 dev->hec_error_count, dev->unassigned_cell_count); in hrz_proc_read()
2584 if (!left--) in hrz_proc_read()
2586 "free cell buffers: TX %hu, RX %hu+%hu.\n", in hrz_proc_read()
2589 dev->noof_spare_buffers); in hrz_proc_read()
2591 if (!left--) in hrz_proc_read()
2593 "cps remaining: TX %u, RX %u\n", in hrz_proc_read()
2594 dev->tx_avail, dev->rx_avail); in hrz_proc_read()
2622 return -EINVAL; in hrz_probe()
2626 err = -EINVAL; in hrz_probe()
2634 err = -ENOMEM; in hrz_probe()
2640 // grab IRQ and install handler - move this someplace more sensible in hrz_probe()
2641 irq = pci_dev->irq; in hrz_probe()
2648 err = -EINVAL; in hrz_probe()
2655 dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1, in hrz_probe()
2657 if (!(dev->atm_dev)) { in hrz_probe()
2659 err = -EINVAL; in hrz_probe()
2664 dev->atm_dev->number, dev, dev->atm_dev); in hrz_probe()
2665 dev->atm_dev->dev_data = (void *) dev; in hrz_probe()
2666 dev->pci_dev = pci_dev; in hrz_probe()
2683 dev->iobase = iobase; in hrz_probe()
2684 dev->irq = irq; in hrz_probe()
2685 dev->membase = membase; in hrz_probe()
2687 dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0]; in hrz_probe()
2688 dev->rx_q_wrap = &memmap->rx_q_entries[RX_CHANS-1]; in hrz_probe()
2691 dev->last_vc = -1; in hrz_probe()
2692 dev->tx_last = -1; in hrz_probe()
2693 dev->tx_idle = 0; in hrz_probe()
2695 dev->tx_regions = 0; in hrz_probe()
2696 dev->tx_bytes = 0; in hrz_probe()
2697 dev->tx_skb = NULL; in hrz_probe()
2698 dev->tx_iovec = NULL; in hrz_probe()
2700 dev->tx_cell_count = 0; in hrz_probe()
2701 dev->rx_cell_count = 0; in hrz_probe()
2702 dev->hec_error_count = 0; in hrz_probe()
2703 dev->unassigned_cell_count = 0; in hrz_probe()
2705 dev->noof_spare_buffers = 0; in hrz_probe()
2710 dev->tx_channel_record[i] = -1; in hrz_probe()
2713 dev->flags = 0; in hrz_probe()
2716 // Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53 in hrz_probe()
2718 // Copper: (plagarise!) 25600000/8/270*260/53 - n/53 in hrz_probe()
2722 dev->tx_avail = ATM_OC3_PCR; in hrz_probe()
2723 dev->rx_avail = ATM_OC3_PCR; in hrz_probe()
2724 set_bit(ultra, &dev->flags); // NOT "|= ultra" ! in hrz_probe()
2726 dev->tx_avail = ((25600000/8)*26)/(27*53); in hrz_probe()
2727 dev->rx_avail = ((25600000/8)*26)/(27*53); in hrz_probe()
2728 PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering."); in hrz_probe()
2732 spin_lock_init(&dev->rate_lock); in hrz_probe()
2734 // on-board memory access spinlock; we want atomic reads and in hrz_probe()
2736 spin_lock_init(&dev->mem_lock); in hrz_probe()
2738 init_waitqueue_head(&dev->tx_queue); in hrz_probe()
2741 dev->atm_dev->ci_range.vpi_bits = vpi_bits; in hrz_probe()
2742 dev->atm_dev->ci_range.vci_bits = 10-vpi_bits; in hrz_probe()
2744 timer_setup(&dev->housekeeping, do_housekeeping, 0); in hrz_probe()
2745 mod_timer(&dev->housekeeping, jiffies); in hrz_probe()
2767 PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev); in hrz_remove_one()
2768 del_timer_sync(&dev->housekeeping); in hrz_remove_one()
2770 atm_dev_deregister(dev->atm_dev); in hrz_remove_one()
2771 free_irq(dev->irq, dev); in hrz_remove_one()
2772 release_region(dev->iobase, HRZ_IO_EXTENT); in hrz_remove_one()
2812 MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");