Lines Matching full:qca
64 struct qcauart *qca = serdev_device_get_drvdata(serdev); in qca_tty_receive() local
65 struct net_device *netdev = qca->net_dev; in qca_tty_receive()
69 if (!qca->rx_skb) { in qca_tty_receive()
70 qca->rx_skb = netdev_alloc_skb_ip_align(netdev, in qca_tty_receive()
73 if (!qca->rx_skb) { in qca_tty_receive()
83 retcode = qcafrm_fsm_decode(&qca->frm_handle, in qca_tty_receive()
84 qca->rx_skb->data, in qca_tty_receive()
85 skb_tailroom(qca->rx_skb), in qca_tty_receive()
105 skb_put(qca->rx_skb, retcode); in qca_tty_receive()
106 qca->rx_skb->protocol = eth_type_trans( in qca_tty_receive()
107 qca->rx_skb, qca->rx_skb->dev); in qca_tty_receive()
108 skb_checksum_none_assert(qca->rx_skb); in qca_tty_receive()
109 netif_rx(qca->rx_skb); in qca_tty_receive()
110 qca->rx_skb = netdev_alloc_skb_ip_align(netdev, in qca_tty_receive()
113 if (!qca->rx_skb) { in qca_tty_receive()
127 struct qcauart *qca = container_of(work, struct qcauart, tx_work); in qcauart_transmit() local
128 struct net_device_stats *n_stats = &qca->net_dev->stats; in qcauart_transmit()
131 spin_lock_bh(&qca->lock); in qcauart_transmit()
134 if (!netif_running(qca->net_dev)) { in qcauart_transmit()
135 spin_unlock_bh(&qca->lock); in qcauart_transmit()
139 if (qca->tx_left <= 0) { in qcauart_transmit()
144 spin_unlock_bh(&qca->lock); in qcauart_transmit()
145 netif_wake_queue(qca->net_dev); in qcauart_transmit()
149 written = serdev_device_write_buf(qca->serdev, qca->tx_head, in qcauart_transmit()
150 qca->tx_left); in qcauart_transmit()
152 qca->tx_left -= written; in qcauart_transmit()
153 qca->tx_head += written; in qcauart_transmit()
155 spin_unlock_bh(&qca->lock); in qcauart_transmit()
163 struct qcauart *qca = serdev_device_get_drvdata(serdev); in qca_tty_wakeup() local
165 schedule_work(&qca->tx_work); in qca_tty_wakeup()
175 struct qcauart *qca = netdev_priv(dev); in qcauart_netdev_open() local
177 netif_start_queue(qca->net_dev); in qcauart_netdev_open()
184 struct qcauart *qca = netdev_priv(dev); in qcauart_netdev_close() local
187 flush_work(&qca->tx_work); in qcauart_netdev_close()
189 spin_lock_bh(&qca->lock); in qcauart_netdev_close()
190 qca->tx_left = 0; in qcauart_netdev_close()
191 spin_unlock_bh(&qca->lock); in qcauart_netdev_close()
200 struct qcauart *qca = netdev_priv(dev); in qcauart_netdev_xmit() local
205 spin_lock(&qca->lock); in qcauart_netdev_xmit()
207 WARN_ON(qca->tx_left); in qcauart_netdev_xmit()
210 spin_unlock(&qca->lock); in qcauart_netdev_xmit()
211 netdev_warn(qca->net_dev, "xmit: iface is down\n"); in qcauart_netdev_xmit()
215 pos = qca->tx_buffer; in qcauart_netdev_xmit()
232 netif_stop_queue(qca->net_dev); in qcauart_netdev_xmit()
234 written = serdev_device_write_buf(qca->serdev, qca->tx_buffer, in qcauart_netdev_xmit()
235 pos - qca->tx_buffer); in qcauart_netdev_xmit()
237 qca->tx_left = (pos - qca->tx_buffer) - written; in qcauart_netdev_xmit()
238 qca->tx_head = qca->tx_buffer + written; in qcauart_netdev_xmit()
241 spin_unlock(&qca->lock); in qcauart_netdev_xmit()
251 struct qcauart *qca = netdev_priv(dev); in qcauart_netdev_tx_timeout() local
253 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", in qcauart_netdev_tx_timeout()
261 struct qcauart *qca = netdev_priv(dev); in qcauart_netdev_init() local
269 qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL); in qcauart_netdev_init()
270 if (!qca->tx_buffer) in qcauart_netdev_init()
273 qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev, in qcauart_netdev_init()
274 qca->net_dev->mtu + in qcauart_netdev_init()
276 if (!qca->rx_skb) in qcauart_netdev_init()
284 struct qcauart *qca = netdev_priv(dev); in qcauart_netdev_uninit() local
286 dev_kfree_skb(qca->rx_skb); in qcauart_netdev_uninit()
314 .compatible = "qca,qca7000",
323 struct qcauart *qca; in qca_uart_probe() local
333 qca = netdev_priv(qcauart_dev); in qca_uart_probe()
334 if (!qca) { in qca_uart_probe()
339 qca->net_dev = qcauart_dev; in qca_uart_probe()
340 qca->serdev = serdev; in qca_uart_probe()
341 qcafrm_fsm_init_uart(&qca->frm_handle); in qca_uart_probe()
343 spin_lock_init(&qca->lock); in qca_uart_probe()
344 INIT_WORK(&qca->tx_work, qcauart_transmit); in qca_uart_probe()
348 ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev); in qca_uart_probe()
350 eth_hw_addr_random(qca->net_dev); in qca_uart_probe()
352 qca->net_dev->dev_addr); in qca_uart_probe()
355 netif_carrier_on(qca->net_dev); in qca_uart_probe()
356 serdev_device_set_drvdata(serdev, qca); in qca_uart_probe()
376 cancel_work_sync(&qca->tx_work); in qca_uart_probe()
389 struct qcauart *qca = serdev_device_get_drvdata(serdev); in qca_uart_remove() local
391 unregister_netdev(qca->net_dev); in qca_uart_remove()
395 cancel_work_sync(&qca->tx_work); in qca_uart_remove()
397 free_netdev(qca->net_dev); in qca_uart_remove()