1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) ST-Ericsson AB 2010
4  * Author:	Sjur Brendeland
5  */
6 
7 #include <linux/hardirq.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/types.h>
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/tty.h>
16 #include <linux/file.h>
17 #include <linux/if_arp.h>
18 #include <net/caif/caif_device.h>
19 #include <net/caif/cfcnfg.h>
20 #include <linux/err.h>
21 #include <linux/debugfs.h>
22 
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Sjur Brendeland");
25 MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26 MODULE_LICENSE("GPL");
27 MODULE_ALIAS_LDISC(N_CAIF);
28 
29 #define SEND_QUEUE_LOW 10
30 #define SEND_QUEUE_HIGH 100
31 #define CAIF_SENDING	        1 /* Bit 1 = 0x02*/
32 #define CAIF_FLOW_OFF_SENT	4 /* Bit 4 = 0x10 */
33 #define MAX_WRITE_CHUNK	     4096
34 #define ON 1
35 #define OFF 0
36 #define CAIF_MAX_MTU 4096
37 
38 static DEFINE_SPINLOCK(ser_lock);
39 static LIST_HEAD(ser_list);
40 static LIST_HEAD(ser_release_list);
41 
42 static bool ser_loop;
43 module_param(ser_loop, bool, 0444);
44 MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
45 
46 static bool ser_use_stx = true;
47 module_param(ser_use_stx, bool, 0444);
48 MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
49 
50 static bool ser_use_fcs = true;
51 
52 module_param(ser_use_fcs, bool, 0444);
53 MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
54 
55 static int ser_write_chunk = MAX_WRITE_CHUNK;
56 module_param(ser_write_chunk, int, 0444);
57 
58 MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
59 
60 static struct dentry *debugfsdir;
61 
62 static int caif_net_open(struct net_device *dev);
63 static int caif_net_close(struct net_device *dev);
64 
65 struct ser_device {
66 	struct caif_dev_common common;
67 	struct list_head node;
68 	struct net_device *dev;
69 	struct sk_buff_head head;
70 	struct tty_struct *tty;
71 	bool tx_started;
72 	unsigned long state;
73 #ifdef CONFIG_DEBUG_FS
74 	struct dentry *debugfs_tty_dir;
75 	struct debugfs_blob_wrapper tx_blob;
76 	struct debugfs_blob_wrapper rx_blob;
77 	u8 rx_data[128];
78 	u8 tx_data[128];
79 	u8 tty_status;
80 
81 #endif
82 };
83 
84 static void caifdev_setup(struct net_device *dev);
85 static void ldisc_tx_wakeup(struct tty_struct *tty);
86 #ifdef CONFIG_DEBUG_FS
87 static inline void update_tty_status(struct ser_device *ser)
88 {
89 	ser->tty_status =
90 		ser->tty->flow.stopped << 5 |
91 		ser->tty->flow.tco_stopped << 3 |
92 		ser->tty->ctrl.packet << 2;
93 }
94 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
95 {
96 	ser->debugfs_tty_dir = debugfs_create_dir(tty->name, debugfsdir);
97 
98 	debugfs_create_blob("last_tx_msg", 0400, ser->debugfs_tty_dir,
99 			    &ser->tx_blob);
100 
101 	debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir,
102 			    &ser->rx_blob);
103 
104 	debugfs_create_xul("ser_state", 0400, ser->debugfs_tty_dir,
105 			   &ser->state);
106 
107 	debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir,
108 			  &ser->tty_status);
109 
110 	ser->tx_blob.data = ser->tx_data;
111 	ser->tx_blob.size = 0;
112 	ser->rx_blob.data = ser->rx_data;
113 	ser->rx_blob.size = 0;
114 }
115 
116 static inline void debugfs_deinit(struct ser_device *ser)
117 {
118 	debugfs_remove_recursive(ser->debugfs_tty_dir);
119 }
120 
121 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
122 {
123 	if (size > sizeof(ser->rx_data))
124 		size = sizeof(ser->rx_data);
125 	memcpy(ser->rx_data, data, size);
126 	ser->rx_blob.data = ser->rx_data;
127 	ser->rx_blob.size = size;
128 }
129 #else
130 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
131 {
132 }
133 
134 static inline void debugfs_deinit(struct ser_device *ser)
135 {
136 }
137 
138 static inline void update_tty_status(struct ser_device *ser)
139 {
140 }
141 
142 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
143 {
144 }
145 #endif
146 
147 static void ldisc_receive(struct tty_struct *tty, const u8 *data,
148 			  const u8 *flags, size_t count)
149 {
150 	struct sk_buff *skb = NULL;
151 	struct ser_device *ser;
152 	int ret;
153 
154 	ser = tty->disc_data;
155 
156 	/*
157 	 * NOTE: flags may contain information about break or overrun.
158 	 * This is not yet handled.
159 	 */
160 
161 
162 	/*
163 	 * Workaround for garbage at start of transmission,
164 	 * only enable if STX handling is not enabled.
165 	 */
166 	if (!ser->common.use_stx && !ser->tx_started) {
167 		dev_info(&ser->dev->dev,
168 			"Bytes received before initial transmission -"
169 			"bytes discarded.\n");
170 		return;
171 	}
172 
173 	BUG_ON(ser->dev == NULL);
174 
175 	/* Get a suitable caif packet and copy in data. */
176 	skb = netdev_alloc_skb(ser->dev, count+1);
177 	if (skb == NULL)
178 		return;
179 	skb_put_data(skb, data, count);
180 
181 	skb->protocol = htons(ETH_P_CAIF);
182 	skb_reset_mac_header(skb);
183 	debugfs_rx(ser, data, count);
184 	/* Push received packet up the stack. */
185 	ret = netif_rx(skb);
186 	if (!ret) {
187 		ser->dev->stats.rx_packets++;
188 		ser->dev->stats.rx_bytes += count;
189 	} else
190 		++ser->dev->stats.rx_dropped;
191 	update_tty_status(ser);
192 }
193 
194 static int handle_tx(struct ser_device *ser)
195 {
196 	struct tty_struct *tty;
197 	struct sk_buff *skb;
198 	int tty_wr, len, room;
199 
200 	tty = ser->tty;
201 	ser->tx_started = true;
202 
203 	/* Enter critical section */
204 	if (test_and_set_bit(CAIF_SENDING, &ser->state))
205 		return 0;
206 
207 	/* skb_peek is safe because handle_tx is called after skb_queue_tail */
208 	while ((skb = skb_peek(&ser->head)) != NULL) {
209 
210 		/* Make sure you don't write too much */
211 		len = skb->len;
212 		room = tty_write_room(tty);
213 		if (!room)
214 			break;
215 		if (room > ser_write_chunk)
216 			room = ser_write_chunk;
217 		if (len > room)
218 			len = room;
219 
220 		/* Write to tty or loopback */
221 		if (!ser_loop) {
222 			tty_wr = tty->ops->write(tty, skb->data, len);
223 			update_tty_status(ser);
224 		} else {
225 			tty_wr = len;
226 			ldisc_receive(tty, skb->data, NULL, len);
227 		}
228 		ser->dev->stats.tx_packets++;
229 		ser->dev->stats.tx_bytes += tty_wr;
230 
231 		/* Error on TTY ?! */
232 		if (tty_wr < 0)
233 			goto error;
234 		/* Reduce buffer written, and discard if empty */
235 		skb_pull(skb, tty_wr);
236 		if (skb->len == 0) {
237 			struct sk_buff *tmp = skb_dequeue(&ser->head);
238 			WARN_ON(tmp != skb);
239 			dev_consume_skb_any(skb);
240 		}
241 	}
242 	/* Send flow off if queue is empty */
243 	if (ser->head.qlen <= SEND_QUEUE_LOW &&
244 		test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
245 		ser->common.flowctrl != NULL)
246 				ser->common.flowctrl(ser->dev, ON);
247 	clear_bit(CAIF_SENDING, &ser->state);
248 	return 0;
249 error:
250 	clear_bit(CAIF_SENDING, &ser->state);
251 	return tty_wr;
252 }
253 
254 static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
255 {
256 	struct ser_device *ser;
257 
258 	ser = netdev_priv(dev);
259 
260 	/* Send flow off once, on high water mark */
261 	if (ser->head.qlen > SEND_QUEUE_HIGH &&
262 		!test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
263 		ser->common.flowctrl != NULL)
264 
265 		ser->common.flowctrl(ser->dev, OFF);
266 
267 	skb_queue_tail(&ser->head, skb);
268 	return handle_tx(ser);
269 }
270 
271 
272 static void ldisc_tx_wakeup(struct tty_struct *tty)
273 {
274 	struct ser_device *ser;
275 
276 	ser = tty->disc_data;
277 	BUG_ON(ser == NULL);
278 	WARN_ON(ser->tty != tty);
279 	handle_tx(ser);
280 }
281 
282 
283 static void ser_release(struct work_struct *work)
284 {
285 	struct list_head list;
286 	struct ser_device *ser, *tmp;
287 
288 	spin_lock(&ser_lock);
289 	list_replace_init(&ser_release_list, &list);
290 	spin_unlock(&ser_lock);
291 
292 	if (!list_empty(&list)) {
293 		rtnl_lock();
294 		list_for_each_entry_safe(ser, tmp, &list, node) {
295 			dev_close(ser->dev);
296 			unregister_netdevice(ser->dev);
297 			debugfs_deinit(ser);
298 		}
299 		rtnl_unlock();
300 	}
301 }
302 
303 static DECLARE_WORK(ser_release_work, ser_release);
304 
305 static int ldisc_open(struct tty_struct *tty)
306 {
307 	struct ser_device *ser;
308 	struct net_device *dev;
309 	char name[64];
310 	int result;
311 
312 	/* No write no play */
313 	if (tty->ops->write == NULL)
314 		return -EOPNOTSUPP;
315 	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
316 		return -EPERM;
317 
318 	/* release devices to avoid name collision */
319 	ser_release(NULL);
320 
321 	result = snprintf(name, sizeof(name), "cf%s", tty->name);
322 	if (result >= IFNAMSIZ)
323 		return -EINVAL;
324 	dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
325 			   caifdev_setup);
326 	if (!dev)
327 		return -ENOMEM;
328 
329 	ser = netdev_priv(dev);
330 	ser->tty = tty_kref_get(tty);
331 	ser->dev = dev;
332 	debugfs_init(ser, tty);
333 	tty->receive_room = 4096;
334 	tty->disc_data = ser;
335 	set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
336 	rtnl_lock();
337 	result = register_netdevice(dev);
338 	if (result) {
339 		tty_kref_put(tty);
340 		rtnl_unlock();
341 		free_netdev(dev);
342 		return -ENODEV;
343 	}
344 
345 	spin_lock(&ser_lock);
346 	list_add(&ser->node, &ser_list);
347 	spin_unlock(&ser_lock);
348 	rtnl_unlock();
349 	netif_stop_queue(dev);
350 	update_tty_status(ser);
351 	return 0;
352 }
353 
354 static void ldisc_close(struct tty_struct *tty)
355 {
356 	struct ser_device *ser = tty->disc_data;
357 
358 	tty_kref_put(ser->tty);
359 
360 	spin_lock(&ser_lock);
361 	list_move(&ser->node, &ser_release_list);
362 	spin_unlock(&ser_lock);
363 	schedule_work(&ser_release_work);
364 }
365 
366 /* The line discipline structure. */
367 static struct tty_ldisc_ops caif_ldisc = {
368 	.owner =	THIS_MODULE,
369 	.num =		N_CAIF,
370 	.name =		"n_caif",
371 	.open =		ldisc_open,
372 	.close =	ldisc_close,
373 	.receive_buf =	ldisc_receive,
374 	.write_wakeup =	ldisc_tx_wakeup
375 };
376 
377 static const struct net_device_ops netdev_ops = {
378 	.ndo_open = caif_net_open,
379 	.ndo_stop = caif_net_close,
380 	.ndo_start_xmit = caif_xmit
381 };
382 
383 static void caifdev_setup(struct net_device *dev)
384 {
385 	struct ser_device *serdev = netdev_priv(dev);
386 
387 	dev->features = 0;
388 	dev->netdev_ops = &netdev_ops;
389 	dev->type = ARPHRD_CAIF;
390 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
391 	dev->mtu = CAIF_MAX_MTU;
392 	dev->priv_flags |= IFF_NO_QUEUE;
393 	dev->needs_free_netdev = true;
394 	skb_queue_head_init(&serdev->head);
395 	serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
396 	serdev->common.use_frag = true;
397 	serdev->common.use_stx = ser_use_stx;
398 	serdev->common.use_fcs = ser_use_fcs;
399 	serdev->dev = dev;
400 }
401 
402 
403 static int caif_net_open(struct net_device *dev)
404 {
405 	netif_wake_queue(dev);
406 	return 0;
407 }
408 
409 static int caif_net_close(struct net_device *dev)
410 {
411 	netif_stop_queue(dev);
412 	return 0;
413 }
414 
415 static int __init caif_ser_init(void)
416 {
417 	int ret;
418 
419 	ret = tty_register_ldisc(&caif_ldisc);
420 	if (ret < 0)
421 		pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, ret);
422 
423 	debugfsdir = debugfs_create_dir("caif_serial", NULL);
424 	return ret;
425 }
426 
427 static void __exit caif_ser_exit(void)
428 {
429 	spin_lock(&ser_lock);
430 	list_splice(&ser_list, &ser_release_list);
431 	spin_unlock(&ser_lock);
432 	ser_release(NULL);
433 	cancel_work_sync(&ser_release_work);
434 	tty_unregister_ldisc(&caif_ldisc);
435 	debugfs_remove_recursive(debugfsdir);
436 }
437 
438 module_init(caif_ser_init);
439 module_exit(caif_ser_exit);
440