xref: /linux/net/can/bcm.c (revision a0b0f6c7d7f29f1ade9ec59699d02e3b153ee8e4)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4  *
5  * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of Volkswagen nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * Alternatively, provided that this notice is retained in full, this
21  * software may be distributed under the terms of the GNU General
22  * Public License ("GPL") version 2, in which case the provisions of the
23  * GPL apply INSTEAD OF those given above.
24  *
25  * The provided data structures and external interfaces from this code
26  * are not restricted to be used by modules with a GPL compatible license.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39  * DAMAGE.
40  *
41  */
42 
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/interrupt.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/uio.h>
51 #include <linux/net.h>
52 #include <linux/netdevice.h>
53 #include <linux/socket.h>
54 #include <linux/if_arp.h>
55 #include <linux/skbuff.h>
56 #include <linux/can.h>
57 #include <linux/can/core.h>
58 #include <linux/can/skb.h>
59 #include <linux/can/bcm.h>
60 #include <linux/slab.h>
61 #include <linux/spinlock.h>
62 #include <net/can.h>
63 #include <net/sock.h>
64 #include <net/net_namespace.h>
65 
66 /*
67  * To send multiple CAN frame content within TX_SETUP or to filter
68  * CAN messages with multiplex index within RX_SETUP, the number of
69  * different filters is limited to 256 due to the one byte index value.
70  */
71 #define MAX_NFRAMES 256
72 
73 /* limit timers to 400 days for sending/timeouts */
74 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
75 
76 /* use of last_frames[index].flags */
77 #define RX_LOCAL   0x10 /* frame was created on the local host */
78 #define RX_OWN     0x20 /* frame was sent via the socket it was received on */
79 #define RX_RECV    0x40 /* received data for this element */
80 #define RX_THR     0x80 /* element not been sent due to throttle feature */
81 #define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
82 
83 /* get best masking value for can_rx_register() for a given single can_id */
84 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
85 		     (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
86 		     (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
87 
88 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
89 MODULE_LICENSE("Dual BSD/GPL");
90 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
91 MODULE_ALIAS("can-proto-2");
92 
93 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
94 
95 /*
96  * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
97  * 64 bit aligned so the offset has to be multiples of 8 which is ensured
98  * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
99  */
get_u64(const struct canfd_frame * cp,int offset)100 static inline u64 get_u64(const struct canfd_frame *cp, int offset)
101 {
102 	return *(u64 *)(cp->data + offset);
103 }
104 
105 struct bcm_op {
106 	struct list_head list;
107 	struct rcu_head rcu;
108 	int ifindex;
109 	canid_t can_id;
110 	u32 flags;
111 	unsigned long frames_abs, frames_filtered;
112 	struct bcm_timeval ival1, ival2;
113 	struct hrtimer timer, thrtimer;
114 	ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
115 	int rx_ifindex;
116 	int cfsiz;
117 	u32 count;
118 	u32 nframes;
119 	u32 currframe;
120 	/* void pointers to arrays of struct can[fd]_frame */
121 	void *frames;
122 	void *last_frames;
123 	struct canfd_frame sframe;
124 	struct canfd_frame last_sframe;
125 	struct sock *sk;
126 	struct net_device *rx_reg_dev;
127 	spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */
128 };
129 
130 struct bcm_sock {
131 	struct sock sk;
132 	int bound;
133 	int ifindex;
134 	struct list_head notifier;
135 	struct list_head rx_ops;
136 	struct list_head tx_ops;
137 	unsigned long dropped_usr_msgs;
138 	struct proc_dir_entry *bcm_proc_read;
139 	char procname [32]; /* inode number in decimal with \0 */
140 };
141 
142 static LIST_HEAD(bcm_notifier_list);
143 static DEFINE_SPINLOCK(bcm_notifier_lock);
144 static struct bcm_sock *bcm_busy_notifier;
145 
146 /* Return pointer to store the extra msg flags for bcm_recvmsg().
147  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
148  * in skb->cb.
149  */
bcm_flags(struct sk_buff * skb)150 static inline unsigned int *bcm_flags(struct sk_buff *skb)
151 {
152 	/* return pointer after struct sockaddr_can */
153 	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
154 }
155 
bcm_sk(const struct sock * sk)156 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
157 {
158 	return (struct bcm_sock *)sk;
159 }
160 
bcm_timeval_to_ktime(struct bcm_timeval tv)161 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
162 {
163 	return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
164 }
165 
166 /* check limitations for timeval provided by user */
bcm_is_invalid_tv(struct bcm_msg_head * msg_head)167 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
168 {
169 	if ((msg_head->ival1.tv_sec < 0) ||
170 	    (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
171 	    (msg_head->ival1.tv_usec < 0) ||
172 	    (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
173 	    (msg_head->ival2.tv_sec < 0) ||
174 	    (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
175 	    (msg_head->ival2.tv_usec < 0) ||
176 	    (msg_head->ival2.tv_usec >= USEC_PER_SEC))
177 		return true;
178 
179 	return false;
180 }
181 
182 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
183 #define OPSIZ sizeof(struct bcm_op)
184 #define MHSIZ sizeof(struct bcm_msg_head)
185 
186 /*
187  * procfs functions
188  */
189 #if IS_ENABLED(CONFIG_PROC_FS)
bcm_proc_getifname(struct net * net,char * result,int ifindex)190 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
191 {
192 	struct net_device *dev;
193 
194 	if (!ifindex)
195 		return "any";
196 
197 	rcu_read_lock();
198 	dev = dev_get_by_index_rcu(net, ifindex);
199 	if (dev)
200 		strcpy(result, dev->name);
201 	else
202 		strcpy(result, "???");
203 	rcu_read_unlock();
204 
205 	return result;
206 }
207 
bcm_proc_show(struct seq_file * m,void * v)208 static int bcm_proc_show(struct seq_file *m, void *v)
209 {
210 	char ifname[IFNAMSIZ];
211 	struct net *net = m->private;
212 	struct sock *sk = (struct sock *)pde_data(m->file->f_inode);
213 	struct bcm_sock *bo = bcm_sk(sk);
214 	struct bcm_op *op;
215 
216 	seq_printf(m, ">>> socket %pK", sk->sk_socket);
217 	seq_printf(m, " / sk %pK", sk);
218 	seq_printf(m, " / bo %pK", bo);
219 	seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
220 	seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
221 	seq_printf(m, " <<<\n");
222 
223 	rcu_read_lock();
224 
225 	list_for_each_entry_rcu(op, &bo->rx_ops, list) {
226 
227 		unsigned long reduction;
228 
229 		/* print only active entries & prevent division by zero */
230 		if (!op->frames_abs)
231 			continue;
232 
233 		seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
234 			   bcm_proc_getifname(net, ifname, op->ifindex));
235 
236 		if (op->flags & CAN_FD_FRAME)
237 			seq_printf(m, "(%u)", op->nframes);
238 		else
239 			seq_printf(m, "[%u]", op->nframes);
240 
241 		seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
242 
243 		if (op->kt_ival1)
244 			seq_printf(m, "timeo=%lld ",
245 				   (long long)ktime_to_us(op->kt_ival1));
246 
247 		if (op->kt_ival2)
248 			seq_printf(m, "thr=%lld ",
249 				   (long long)ktime_to_us(op->kt_ival2));
250 
251 		seq_printf(m, "# recv %ld (%ld) => reduction: ",
252 			   op->frames_filtered, op->frames_abs);
253 
254 		reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
255 
256 		seq_printf(m, "%s%ld%%\n",
257 			   (reduction == 100) ? "near " : "", reduction);
258 	}
259 
260 	list_for_each_entry(op, &bo->tx_ops, list) {
261 
262 		seq_printf(m, "tx_op: %03X %s ", op->can_id,
263 			   bcm_proc_getifname(net, ifname, op->ifindex));
264 
265 		if (op->flags & CAN_FD_FRAME)
266 			seq_printf(m, "(%u) ", op->nframes);
267 		else
268 			seq_printf(m, "[%u] ", op->nframes);
269 
270 		if (op->kt_ival1)
271 			seq_printf(m, "t1=%lld ",
272 				   (long long)ktime_to_us(op->kt_ival1));
273 
274 		if (op->kt_ival2)
275 			seq_printf(m, "t2=%lld ",
276 				   (long long)ktime_to_us(op->kt_ival2));
277 
278 		seq_printf(m, "# sent %ld\n", op->frames_abs);
279 	}
280 	seq_putc(m, '\n');
281 
282 	rcu_read_unlock();
283 
284 	return 0;
285 }
286 #endif /* CONFIG_PROC_FS */
287 
288 /*
289  * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
290  *              of the given bcm tx op
291  */
bcm_can_tx(struct bcm_op * op)292 static void bcm_can_tx(struct bcm_op *op)
293 {
294 	struct sk_buff *skb;
295 	struct can_skb_ext *csx;
296 	struct net_device *dev;
297 	struct canfd_frame *cf;
298 	int err;
299 
300 	/* no target device? => exit */
301 	if (!op->ifindex)
302 		return;
303 
304 	/* read currframe under lock protection */
305 	spin_lock_bh(&op->bcm_tx_lock);
306 	cf = op->frames + op->cfsiz * op->currframe;
307 	spin_unlock_bh(&op->bcm_tx_lock);
308 
309 	dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
310 	if (!dev) {
311 		/* RFC: should this bcm_op remove itself here? */
312 		return;
313 	}
314 
315 	skb = alloc_skb(op->cfsiz, gfp_any());
316 	if (!skb)
317 		goto out;
318 
319 	csx = can_skb_ext_add(skb);
320 	if (!csx) {
321 		kfree_skb(skb);
322 		goto out;
323 	}
324 
325 	csx->can_iif = dev->ifindex;
326 
327 	skb_put_data(skb, cf, op->cfsiz);
328 
329 	/* send with loopback */
330 	skb->dev = dev;
331 	can_skb_set_owner(skb, op->sk);
332 	err = can_send(skb, 1);
333 
334 	/* update currframe and count under lock protection */
335 	spin_lock_bh(&op->bcm_tx_lock);
336 
337 	if (!err)
338 		op->frames_abs++;
339 
340 	op->currframe++;
341 
342 	/* reached last frame? */
343 	if (op->currframe >= op->nframes)
344 		op->currframe = 0;
345 
346 	if (op->count > 0)
347 		op->count--;
348 
349 	spin_unlock_bh(&op->bcm_tx_lock);
350 out:
351 	dev_put(dev);
352 }
353 
354 /*
355  * bcm_send_to_user - send a BCM message to the userspace
356  *                    (consisting of bcm_msg_head + x CAN frames)
357  */
bcm_send_to_user(struct bcm_op * op,struct bcm_msg_head * head,struct canfd_frame * frames,int has_timestamp)358 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
359 			     struct canfd_frame *frames, int has_timestamp)
360 {
361 	struct sk_buff *skb;
362 	struct canfd_frame *firstframe;
363 	struct sockaddr_can *addr;
364 	struct sock *sk = op->sk;
365 	unsigned int datalen = head->nframes * op->cfsiz;
366 	unsigned int *pflags;
367 	enum skb_drop_reason reason;
368 
369 	skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
370 	if (!skb)
371 		return;
372 
373 	skb_put_data(skb, head, sizeof(*head));
374 
375 	/* ensure space for sockaddr_can and msg flags */
376 	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
377 			       sizeof(unsigned int));
378 
379 	/* initialize msg flags */
380 	pflags = bcm_flags(skb);
381 	*pflags = 0;
382 
383 	if (head->nframes) {
384 		/* CAN frames starting here */
385 		firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
386 
387 		skb_put_data(skb, frames, datalen);
388 
389 		/*
390 		 * the BCM uses the flags-element of the canfd_frame
391 		 * structure for internal purposes. This is only
392 		 * relevant for updates that are generated by the
393 		 * BCM, where nframes is 1
394 		 */
395 		if (head->nframes == 1) {
396 			if (firstframe->flags & RX_LOCAL)
397 				*pflags |= MSG_DONTROUTE;
398 			if (firstframe->flags & RX_OWN)
399 				*pflags |= MSG_CONFIRM;
400 
401 			firstframe->flags &= BCM_CAN_FLAGS_MASK;
402 		}
403 	}
404 
405 	if (has_timestamp) {
406 		/* restore rx timestamp */
407 		skb->tstamp = op->rx_stamp;
408 	}
409 
410 	/*
411 	 *  Put the datagram to the queue so that bcm_recvmsg() can
412 	 *  get it from there.  We need to pass the interface index to
413 	 *  bcm_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
414 	 *  containing the interface index.
415 	 */
416 
417 	addr = (struct sockaddr_can *)skb->cb;
418 	memset(addr, 0, sizeof(*addr));
419 	addr->can_family  = AF_CAN;
420 	addr->can_ifindex = op->rx_ifindex;
421 
422 	reason = sock_queue_rcv_skb_reason(sk, skb);
423 	if (reason) {
424 		struct bcm_sock *bo = bcm_sk(sk);
425 
426 		sk_skb_reason_drop(sk, skb, reason);
427 		/* don't care about overflows in this statistic */
428 		bo->dropped_usr_msgs++;
429 	}
430 }
431 
bcm_tx_set_expiry(struct bcm_op * op,struct hrtimer * hrt)432 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
433 {
434 	ktime_t ival;
435 
436 	if (op->kt_ival1 && op->count)
437 		ival = op->kt_ival1;
438 	else if (op->kt_ival2)
439 		ival = op->kt_ival2;
440 	else
441 		return false;
442 
443 	hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
444 	return true;
445 }
446 
bcm_tx_start_timer(struct bcm_op * op)447 static void bcm_tx_start_timer(struct bcm_op *op)
448 {
449 	if (bcm_tx_set_expiry(op, &op->timer))
450 		hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
451 }
452 
453 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
bcm_tx_timeout_handler(struct hrtimer * hrtimer)454 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
455 {
456 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
457 	struct bcm_msg_head msg_head;
458 
459 	if (op->kt_ival1 && (op->count > 0)) {
460 		bcm_can_tx(op);
461 		if (!op->count && (op->flags & TX_COUNTEVT)) {
462 
463 			/* create notification to user */
464 			memset(&msg_head, 0, sizeof(msg_head));
465 			msg_head.opcode  = TX_EXPIRED;
466 			msg_head.flags   = op->flags;
467 			msg_head.count   = op->count;
468 			msg_head.ival1   = op->ival1;
469 			msg_head.ival2   = op->ival2;
470 			msg_head.can_id  = op->can_id;
471 			msg_head.nframes = 0;
472 
473 			bcm_send_to_user(op, &msg_head, NULL, 0);
474 		}
475 
476 	} else if (op->kt_ival2) {
477 		bcm_can_tx(op);
478 	}
479 
480 	return bcm_tx_set_expiry(op, &op->timer) ?
481 		HRTIMER_RESTART : HRTIMER_NORESTART;
482 }
483 
484 /*
485  * bcm_rx_changed - create a RX_CHANGED notification due to changed content
486  */
bcm_rx_changed(struct bcm_op * op,struct canfd_frame * data)487 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
488 {
489 	struct bcm_msg_head head;
490 
491 	/* update statistics */
492 	op->frames_filtered++;
493 
494 	/* prevent statistics overflow */
495 	if (op->frames_filtered > ULONG_MAX/100)
496 		op->frames_filtered = op->frames_abs = 0;
497 
498 	/* this element is not throttled anymore */
499 	data->flags &= ~RX_THR;
500 
501 	memset(&head, 0, sizeof(head));
502 	head.opcode  = RX_CHANGED;
503 	head.flags   = op->flags;
504 	head.count   = op->count;
505 	head.ival1   = op->ival1;
506 	head.ival2   = op->ival2;
507 	head.can_id  = op->can_id;
508 	head.nframes = 1;
509 
510 	bcm_send_to_user(op, &head, data, 1);
511 }
512 
513 /*
514  * bcm_rx_update_and_send - process a detected relevant receive content change
515  *                          1. update the last received data
516  *                          2. send a notification to the user (if possible)
517  */
bcm_rx_update_and_send(struct bcm_op * op,struct canfd_frame * lastdata,const struct canfd_frame * rxdata,unsigned char traffic_flags)518 static void bcm_rx_update_and_send(struct bcm_op *op,
519 				   struct canfd_frame *lastdata,
520 				   const struct canfd_frame *rxdata,
521 				   unsigned char traffic_flags)
522 {
523 	memcpy(lastdata, rxdata, op->cfsiz);
524 
525 	/* mark as used and throttled by default */
526 	lastdata->flags |= (RX_RECV|RX_THR);
527 
528 	/* add own/local/remote traffic flags */
529 	lastdata->flags |= traffic_flags;
530 
531 	/* throttling mode inactive ? */
532 	if (!op->kt_ival2) {
533 		/* send RX_CHANGED to the user immediately */
534 		bcm_rx_changed(op, lastdata);
535 		return;
536 	}
537 
538 	/* with active throttling timer we are just done here */
539 	if (hrtimer_active(&op->thrtimer))
540 		return;
541 
542 	/* first reception with enabled throttling mode */
543 	if (!op->kt_lastmsg)
544 		goto rx_changed_settime;
545 
546 	/* got a second frame inside a potential throttle period? */
547 	if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
548 	    ktime_to_us(op->kt_ival2)) {
549 		/* do not send the saved data - only start throttle timer */
550 		hrtimer_start(&op->thrtimer,
551 			      ktime_add(op->kt_lastmsg, op->kt_ival2),
552 			      HRTIMER_MODE_ABS_SOFT);
553 		return;
554 	}
555 
556 	/* the gap was that big, that throttling was not needed here */
557 rx_changed_settime:
558 	bcm_rx_changed(op, lastdata);
559 	op->kt_lastmsg = ktime_get();
560 }
561 
562 /*
563  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
564  *                       received data stored in op->last_frames[]
565  */
bcm_rx_cmp_to_index(struct bcm_op * op,unsigned int index,const struct canfd_frame * rxdata,unsigned char traffic_flags)566 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
567 				const struct canfd_frame *rxdata,
568 				unsigned char traffic_flags)
569 {
570 	struct canfd_frame *cf = op->frames + op->cfsiz * index;
571 	struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
572 	int i;
573 
574 	/*
575 	 * no one uses the MSBs of flags for comparison,
576 	 * so we use it here to detect the first time of reception
577 	 */
578 
579 	if (!(lcf->flags & RX_RECV)) {
580 		/* received data for the first time => send update to user */
581 		bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
582 		return;
583 	}
584 
585 	/* do a real check in CAN frame data section */
586 	for (i = 0; i < rxdata->len; i += 8) {
587 		if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
588 		    (get_u64(cf, i) & get_u64(lcf, i))) {
589 			bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
590 			return;
591 		}
592 	}
593 
594 	if (op->flags & RX_CHECK_DLC) {
595 		/* do a real check in CAN frame length */
596 		if (rxdata->len != lcf->len) {
597 			bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
598 			return;
599 		}
600 	}
601 }
602 
603 /*
604  * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
605  */
bcm_rx_starttimer(struct bcm_op * op)606 static void bcm_rx_starttimer(struct bcm_op *op)
607 {
608 	if (op->flags & RX_NO_AUTOTIMER)
609 		return;
610 
611 	if (op->kt_ival1)
612 		hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
613 }
614 
615 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
bcm_rx_timeout_handler(struct hrtimer * hrtimer)616 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
617 {
618 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
619 	struct bcm_msg_head msg_head;
620 
621 	/* if user wants to be informed, when cyclic CAN-Messages come back */
622 	if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
623 		/* clear received CAN frames to indicate 'nothing received' */
624 		memset(op->last_frames, 0, op->nframes * op->cfsiz);
625 	}
626 
627 	/* create notification to user */
628 	memset(&msg_head, 0, sizeof(msg_head));
629 	msg_head.opcode  = RX_TIMEOUT;
630 	msg_head.flags   = op->flags;
631 	msg_head.count   = op->count;
632 	msg_head.ival1   = op->ival1;
633 	msg_head.ival2   = op->ival2;
634 	msg_head.can_id  = op->can_id;
635 	msg_head.nframes = 0;
636 
637 	bcm_send_to_user(op, &msg_head, NULL, 0);
638 
639 	return HRTIMER_NORESTART;
640 }
641 
642 /*
643  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
644  */
bcm_rx_do_flush(struct bcm_op * op,unsigned int index)645 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
646 {
647 	struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
648 
649 	if ((op->last_frames) && (lcf->flags & RX_THR)) {
650 		bcm_rx_changed(op, lcf);
651 		return 1;
652 	}
653 	return 0;
654 }
655 
656 /*
657  * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
658  */
bcm_rx_thr_flush(struct bcm_op * op)659 static int bcm_rx_thr_flush(struct bcm_op *op)
660 {
661 	int updated = 0;
662 
663 	if (op->nframes > 1) {
664 		unsigned int i;
665 
666 		/* for MUX filter we start at index 1 */
667 		for (i = 1; i < op->nframes; i++)
668 			updated += bcm_rx_do_flush(op, i);
669 
670 	} else {
671 		/* for RX_FILTER_ID and simple filter */
672 		updated += bcm_rx_do_flush(op, 0);
673 	}
674 
675 	return updated;
676 }
677 
678 /*
679  * bcm_rx_thr_handler - the time for blocked content updates is over now:
680  *                      Check for throttled data and send it to the userspace
681  */
bcm_rx_thr_handler(struct hrtimer * hrtimer)682 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
683 {
684 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
685 
686 	if (bcm_rx_thr_flush(op)) {
687 		hrtimer_forward_now(hrtimer, op->kt_ival2);
688 		return HRTIMER_RESTART;
689 	} else {
690 		/* rearm throttle handling */
691 		op->kt_lastmsg = 0;
692 		return HRTIMER_NORESTART;
693 	}
694 }
695 
696 /*
697  * bcm_rx_handler - handle a CAN frame reception
698  */
bcm_rx_handler(struct sk_buff * skb,void * data)699 static void bcm_rx_handler(struct sk_buff *skb, void *data)
700 {
701 	struct bcm_op *op = (struct bcm_op *)data;
702 	const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
703 	unsigned int i;
704 	unsigned char traffic_flags;
705 
706 	if (op->can_id != rxframe->can_id)
707 		return;
708 
709 	/* make sure to handle the correct frame type (CAN / CAN FD) */
710 	if (op->flags & CAN_FD_FRAME) {
711 		if (!can_is_canfd_skb(skb))
712 			return;
713 	} else {
714 		if (!can_is_can_skb(skb))
715 			return;
716 	}
717 
718 	/* disable timeout */
719 	hrtimer_cancel(&op->timer);
720 
721 	/* save rx timestamp */
722 	op->rx_stamp = skb->tstamp;
723 	/* save originator for recvfrom() */
724 	op->rx_ifindex = skb->dev->ifindex;
725 	/* update statistics */
726 	op->frames_abs++;
727 
728 	if (op->flags & RX_RTR_FRAME) {
729 		/* send reply for RTR-request (placed in op->frames[0]) */
730 		bcm_can_tx(op);
731 		return;
732 	}
733 
734 	/* compute flags to distinguish between own/local/remote CAN traffic */
735 	traffic_flags = 0;
736 	if (skb->sk) {
737 		traffic_flags |= RX_LOCAL;
738 		if (skb->sk == op->sk)
739 			traffic_flags |= RX_OWN;
740 	}
741 
742 	if (op->flags & RX_FILTER_ID) {
743 		/* the easiest case */
744 		bcm_rx_update_and_send(op, op->last_frames, rxframe,
745 				       traffic_flags);
746 		goto rx_starttimer;
747 	}
748 
749 	if (op->nframes == 1) {
750 		/* simple compare with index 0 */
751 		bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
752 		goto rx_starttimer;
753 	}
754 
755 	if (op->nframes > 1) {
756 		/*
757 		 * multiplex compare
758 		 *
759 		 * find the first multiplex mask that fits.
760 		 * Remark: The MUX-mask is stored in index 0 - but only the
761 		 * first 64 bits of the frame data[] are relevant (CAN FD)
762 		 */
763 
764 		for (i = 1; i < op->nframes; i++) {
765 			if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
766 			    (get_u64(op->frames, 0) &
767 			     get_u64(op->frames + op->cfsiz * i, 0))) {
768 				bcm_rx_cmp_to_index(op, i, rxframe,
769 						    traffic_flags);
770 				break;
771 			}
772 		}
773 	}
774 
775 rx_starttimer:
776 	bcm_rx_starttimer(op);
777 }
778 
779 /*
780  * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
781  */
bcm_find_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)782 static struct bcm_op *bcm_find_op(struct list_head *ops,
783 				  struct bcm_msg_head *mh, int ifindex)
784 {
785 	struct bcm_op *op;
786 
787 	list_for_each_entry(op, ops, list) {
788 		if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
789 		    (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
790 			return op;
791 	}
792 
793 	return NULL;
794 }
795 
bcm_free_op_rcu(struct rcu_head * rcu_head)796 static void bcm_free_op_rcu(struct rcu_head *rcu_head)
797 {
798 	struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
799 
800 	if ((op->frames) && (op->frames != &op->sframe))
801 		kfree(op->frames);
802 
803 	if ((op->last_frames) && (op->last_frames != &op->last_sframe))
804 		kfree(op->last_frames);
805 
806 	kfree(op);
807 }
808 
bcm_remove_op(struct bcm_op * op)809 static void bcm_remove_op(struct bcm_op *op)
810 {
811 	hrtimer_cancel(&op->timer);
812 	hrtimer_cancel(&op->thrtimer);
813 
814 	call_rcu(&op->rcu, bcm_free_op_rcu);
815 }
816 
bcm_rx_unreg(struct net_device * dev,struct bcm_op * op)817 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
818 {
819 	if (op->rx_reg_dev == dev) {
820 		can_rx_unregister(dev_net(dev), dev, op->can_id,
821 				  REGMASK(op->can_id), bcm_rx_handler, op);
822 
823 		/* mark as removed subscription */
824 		op->rx_reg_dev = NULL;
825 	} else
826 		printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
827 		       "mismatch %p %p\n", op->rx_reg_dev, dev);
828 }
829 
830 /*
831  * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
832  */
bcm_delete_rx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)833 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
834 			    int ifindex)
835 {
836 	struct bcm_op *op, *n;
837 
838 	list_for_each_entry_safe(op, n, ops, list) {
839 		if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
840 		    (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
841 
842 			/* disable automatic timer on frame reception */
843 			op->flags |= RX_NO_AUTOTIMER;
844 
845 			/*
846 			 * Don't care if we're bound or not (due to netdev
847 			 * problems) can_rx_unregister() is always a save
848 			 * thing to do here.
849 			 */
850 			if (op->ifindex) {
851 				/*
852 				 * Only remove subscriptions that had not
853 				 * been removed due to NETDEV_UNREGISTER
854 				 * in bcm_notifier()
855 				 */
856 				if (op->rx_reg_dev) {
857 					struct net_device *dev;
858 
859 					dev = dev_get_by_index(sock_net(op->sk),
860 							       op->ifindex);
861 					if (dev) {
862 						bcm_rx_unreg(dev, op);
863 						dev_put(dev);
864 					}
865 				}
866 			} else
867 				can_rx_unregister(sock_net(op->sk), NULL,
868 						  op->can_id,
869 						  REGMASK(op->can_id),
870 						  bcm_rx_handler, op);
871 
872 			list_del_rcu(&op->list);
873 			bcm_remove_op(op);
874 			return 1; /* done */
875 		}
876 	}
877 
878 	return 0; /* not found */
879 }
880 
881 /*
882  * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
883  */
bcm_delete_tx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)884 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
885 			    int ifindex)
886 {
887 	struct bcm_op *op, *n;
888 
889 	list_for_each_entry_safe(op, n, ops, list) {
890 		if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
891 		    (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
892 			list_del_rcu(&op->list);
893 			bcm_remove_op(op);
894 			return 1; /* done */
895 		}
896 	}
897 
898 	return 0; /* not found */
899 }
900 
901 /*
902  * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
903  */
bcm_read_op(struct list_head * ops,struct bcm_msg_head * msg_head,int ifindex)904 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
905 		       int ifindex)
906 {
907 	struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
908 
909 	if (!op)
910 		return -EINVAL;
911 
912 	/* put current values into msg_head */
913 	msg_head->flags   = op->flags;
914 	msg_head->count   = op->count;
915 	msg_head->ival1   = op->ival1;
916 	msg_head->ival2   = op->ival2;
917 	msg_head->nframes = op->nframes;
918 
919 	bcm_send_to_user(op, msg_head, op->frames, 0);
920 
921 	return MHSIZ;
922 }
923 
924 /*
925  * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
926  */
bcm_tx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)927 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
928 			int ifindex, struct sock *sk)
929 {
930 	struct bcm_sock *bo = bcm_sk(sk);
931 	struct bcm_op *op;
932 	struct canfd_frame *cf;
933 	unsigned int i;
934 	int err;
935 
936 	/* we need a real device to send frames */
937 	if (!ifindex)
938 		return -ENODEV;
939 
940 	/* check nframes boundaries - we need at least one CAN frame */
941 	if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
942 		return -EINVAL;
943 
944 	/* check timeval limitations */
945 	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
946 		return -EINVAL;
947 
948 	/* check the given can_id */
949 	op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
950 	if (op) {
951 		/* update existing BCM operation */
952 
953 		/*
954 		 * Do we need more space for the CAN frames than currently
955 		 * allocated? -> This is a _really_ unusual use-case and
956 		 * therefore (complexity / locking) it is not supported.
957 		 */
958 		if (msg_head->nframes > op->nframes)
959 			return -E2BIG;
960 
961 		/* update CAN frames content */
962 		for (i = 0; i < msg_head->nframes; i++) {
963 
964 			cf = op->frames + op->cfsiz * i;
965 			err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
966 
967 			if (op->flags & CAN_FD_FRAME) {
968 				if (cf->len > 64)
969 					err = -EINVAL;
970 			} else {
971 				if (cf->len > 8)
972 					err = -EINVAL;
973 			}
974 
975 			if (err < 0)
976 				return err;
977 
978 			if (msg_head->flags & TX_CP_CAN_ID) {
979 				/* copy can_id into frame */
980 				cf->can_id = msg_head->can_id;
981 			}
982 		}
983 		op->flags = msg_head->flags;
984 
985 		/* only lock for unlikely count/nframes/currframe changes */
986 		if (op->nframes != msg_head->nframes ||
987 		    op->flags & TX_RESET_MULTI_IDX ||
988 		    op->flags & SETTIMER) {
989 
990 			spin_lock_bh(&op->bcm_tx_lock);
991 
992 			if (op->nframes != msg_head->nframes ||
993 			    op->flags & TX_RESET_MULTI_IDX) {
994 				/* potentially update changed nframes */
995 				op->nframes = msg_head->nframes;
996 				/* restart multiple frame transmission */
997 				op->currframe = 0;
998 			}
999 
1000 			if (op->flags & SETTIMER)
1001 				op->count = msg_head->count;
1002 
1003 			spin_unlock_bh(&op->bcm_tx_lock);
1004 		}
1005 
1006 	} else {
1007 		/* insert new BCM operation for the given can_id */
1008 
1009 		op = kzalloc(OPSIZ, GFP_KERNEL);
1010 		if (!op)
1011 			return -ENOMEM;
1012 
1013 		spin_lock_init(&op->bcm_tx_lock);
1014 		op->can_id = msg_head->can_id;
1015 		op->cfsiz = CFSIZ(msg_head->flags);
1016 		op->flags = msg_head->flags;
1017 		op->nframes = msg_head->nframes;
1018 
1019 		if (op->flags & SETTIMER)
1020 			op->count = msg_head->count;
1021 
1022 		/* create array for CAN frames and copy the data */
1023 		if (msg_head->nframes > 1) {
1024 			op->frames = kmalloc_array(msg_head->nframes,
1025 						   op->cfsiz,
1026 						   GFP_KERNEL);
1027 			if (!op->frames) {
1028 				kfree(op);
1029 				return -ENOMEM;
1030 			}
1031 		} else
1032 			op->frames = &op->sframe;
1033 
1034 		for (i = 0; i < msg_head->nframes; i++) {
1035 
1036 			cf = op->frames + op->cfsiz * i;
1037 			err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
1038 			if (err < 0)
1039 				goto free_op;
1040 
1041 			if (op->flags & CAN_FD_FRAME) {
1042 				if (cf->len > 64)
1043 					err = -EINVAL;
1044 			} else {
1045 				if (cf->len > 8)
1046 					err = -EINVAL;
1047 			}
1048 
1049 			if (err < 0)
1050 				goto free_op;
1051 
1052 			if (msg_head->flags & TX_CP_CAN_ID) {
1053 				/* copy can_id into frame */
1054 				cf->can_id = msg_head->can_id;
1055 			}
1056 		}
1057 
1058 		/* tx_ops never compare with previous received messages */
1059 		op->last_frames = NULL;
1060 
1061 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
1062 		op->sk = sk;
1063 		op->ifindex = ifindex;
1064 
1065 		/* initialize uninitialized (kzalloc) structure */
1066 		hrtimer_setup(&op->timer, bcm_tx_timeout_handler, CLOCK_MONOTONIC,
1067 			      HRTIMER_MODE_REL_SOFT);
1068 
1069 		/* currently unused in tx_ops */
1070 		hrtimer_setup(&op->thrtimer, hrtimer_dummy_timeout, CLOCK_MONOTONIC,
1071 			      HRTIMER_MODE_REL_SOFT);
1072 
1073 		/* add this bcm_op to the list of the tx_ops */
1074 		list_add(&op->list, &bo->tx_ops);
1075 
1076 	} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
1077 
1078 	if (op->flags & SETTIMER) {
1079 		/* set timer values */
1080 		op->ival1 = msg_head->ival1;
1081 		op->ival2 = msg_head->ival2;
1082 		op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1083 		op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1084 
1085 		/* disable an active timer due to zero values? */
1086 		if (!op->kt_ival1 && !op->kt_ival2)
1087 			hrtimer_cancel(&op->timer);
1088 	}
1089 
1090 	if (op->flags & STARTTIMER) {
1091 		hrtimer_cancel(&op->timer);
1092 		/* spec: send CAN frame when starting timer */
1093 		op->flags |= TX_ANNOUNCE;
1094 	}
1095 
1096 	if (op->flags & TX_ANNOUNCE)
1097 		bcm_can_tx(op);
1098 
1099 	if (op->flags & STARTTIMER)
1100 		bcm_tx_start_timer(op);
1101 
1102 	return msg_head->nframes * op->cfsiz + MHSIZ;
1103 
1104 free_op:
1105 	if (op->frames != &op->sframe)
1106 		kfree(op->frames);
1107 	kfree(op);
1108 	return err;
1109 }
1110 
1111 /*
1112  * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1113  */
bcm_rx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)1114 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1115 			int ifindex, struct sock *sk)
1116 {
1117 	struct bcm_sock *bo = bcm_sk(sk);
1118 	struct bcm_op *op;
1119 	int do_rx_register;
1120 	int err = 0;
1121 
1122 	if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1123 		/* be robust against wrong usage ... */
1124 		msg_head->flags |= RX_FILTER_ID;
1125 		/* ignore trailing garbage */
1126 		msg_head->nframes = 0;
1127 	}
1128 
1129 	/* the first element contains the mux-mask => MAX_NFRAMES + 1  */
1130 	if (msg_head->nframes > MAX_NFRAMES + 1)
1131 		return -EINVAL;
1132 
1133 	if ((msg_head->flags & RX_RTR_FRAME) &&
1134 	    ((msg_head->nframes != 1) ||
1135 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
1136 		return -EINVAL;
1137 
1138 	/* check timeval limitations */
1139 	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1140 		return -EINVAL;
1141 
1142 	/* check the given can_id */
1143 	op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1144 	if (op) {
1145 		/* update existing BCM operation */
1146 
1147 		/*
1148 		 * Do we need more space for the CAN frames than currently
1149 		 * allocated? -> This is a _really_ unusual use-case and
1150 		 * therefore (complexity / locking) it is not supported.
1151 		 */
1152 		if (msg_head->nframes > op->nframes)
1153 			return -E2BIG;
1154 
1155 		if (msg_head->nframes) {
1156 			/* update CAN frames content */
1157 			err = memcpy_from_msg(op->frames, msg,
1158 					      msg_head->nframes * op->cfsiz);
1159 			if (err < 0)
1160 				return err;
1161 
1162 			/* clear last_frames to indicate 'nothing received' */
1163 			memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1164 		}
1165 
1166 		op->nframes = msg_head->nframes;
1167 		op->flags = msg_head->flags;
1168 
1169 		/* Only an update -> do not call can_rx_register() */
1170 		do_rx_register = 0;
1171 
1172 	} else {
1173 		/* insert new BCM operation for the given can_id */
1174 		op = kzalloc(OPSIZ, GFP_KERNEL);
1175 		if (!op)
1176 			return -ENOMEM;
1177 
1178 		spin_lock_init(&op->bcm_tx_lock);
1179 		op->can_id = msg_head->can_id;
1180 		op->nframes = msg_head->nframes;
1181 		op->cfsiz = CFSIZ(msg_head->flags);
1182 		op->flags = msg_head->flags;
1183 
1184 		if (msg_head->nframes > 1) {
1185 			/* create array for CAN frames and copy the data */
1186 			op->frames = kmalloc_array(msg_head->nframes,
1187 						   op->cfsiz,
1188 						   GFP_KERNEL);
1189 			if (!op->frames) {
1190 				kfree(op);
1191 				return -ENOMEM;
1192 			}
1193 
1194 			/* create and init array for received CAN frames */
1195 			op->last_frames = kcalloc(msg_head->nframes,
1196 						  op->cfsiz,
1197 						  GFP_KERNEL);
1198 			if (!op->last_frames) {
1199 				kfree(op->frames);
1200 				kfree(op);
1201 				return -ENOMEM;
1202 			}
1203 
1204 		} else {
1205 			op->frames = &op->sframe;
1206 			op->last_frames = &op->last_sframe;
1207 		}
1208 
1209 		if (msg_head->nframes) {
1210 			err = memcpy_from_msg(op->frames, msg,
1211 					      msg_head->nframes * op->cfsiz);
1212 			if (err < 0) {
1213 				if (op->frames != &op->sframe)
1214 					kfree(op->frames);
1215 				if (op->last_frames != &op->last_sframe)
1216 					kfree(op->last_frames);
1217 				kfree(op);
1218 				return err;
1219 			}
1220 		}
1221 
1222 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
1223 		op->sk = sk;
1224 		op->ifindex = ifindex;
1225 
1226 		/* ifindex for timeout events w/o previous frame reception */
1227 		op->rx_ifindex = ifindex;
1228 
1229 		/* initialize uninitialized (kzalloc) structure */
1230 		hrtimer_setup(&op->timer, bcm_rx_timeout_handler, CLOCK_MONOTONIC,
1231 			      HRTIMER_MODE_REL_SOFT);
1232 		hrtimer_setup(&op->thrtimer, bcm_rx_thr_handler, CLOCK_MONOTONIC,
1233 			      HRTIMER_MODE_REL_SOFT);
1234 
1235 		/* add this bcm_op to the list of the rx_ops */
1236 		list_add(&op->list, &bo->rx_ops);
1237 
1238 		/* call can_rx_register() */
1239 		do_rx_register = 1;
1240 
1241 	} /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1242 
1243 	/* check flags */
1244 
1245 	if (op->flags & RX_RTR_FRAME) {
1246 		struct canfd_frame *frame0 = op->frames;
1247 
1248 		/* no timers in RTR-mode */
1249 		hrtimer_cancel(&op->thrtimer);
1250 		hrtimer_cancel(&op->timer);
1251 
1252 		/*
1253 		 * funny feature in RX(!)_SETUP only for RTR-mode:
1254 		 * copy can_id into frame BUT without RTR-flag to
1255 		 * prevent a full-load-loopback-test ... ;-]
1256 		 */
1257 		if ((op->flags & TX_CP_CAN_ID) ||
1258 		    (frame0->can_id == op->can_id))
1259 			frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1260 
1261 	} else {
1262 		if (op->flags & SETTIMER) {
1263 
1264 			/* set timer value */
1265 			op->ival1 = msg_head->ival1;
1266 			op->ival2 = msg_head->ival2;
1267 			op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1268 			op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1269 
1270 			/* disable an active timer due to zero value? */
1271 			if (!op->kt_ival1)
1272 				hrtimer_cancel(&op->timer);
1273 
1274 			/*
1275 			 * In any case cancel the throttle timer, flush
1276 			 * potentially blocked msgs and reset throttle handling
1277 			 */
1278 			op->kt_lastmsg = 0;
1279 			hrtimer_cancel(&op->thrtimer);
1280 			bcm_rx_thr_flush(op);
1281 		}
1282 
1283 		if ((op->flags & STARTTIMER) && op->kt_ival1)
1284 			hrtimer_start(&op->timer, op->kt_ival1,
1285 				      HRTIMER_MODE_REL_SOFT);
1286 	}
1287 
1288 	/* now we can register for can_ids, if we added a new bcm_op */
1289 	if (do_rx_register) {
1290 		if (ifindex) {
1291 			struct net_device *dev;
1292 
1293 			dev = dev_get_by_index(sock_net(sk), ifindex);
1294 			if (dev) {
1295 				err = can_rx_register(sock_net(sk), dev,
1296 						      op->can_id,
1297 						      REGMASK(op->can_id),
1298 						      bcm_rx_handler, op,
1299 						      "bcm", sk);
1300 
1301 				op->rx_reg_dev = dev;
1302 				dev_put(dev);
1303 			}
1304 
1305 		} else
1306 			err = can_rx_register(sock_net(sk), NULL, op->can_id,
1307 					      REGMASK(op->can_id),
1308 					      bcm_rx_handler, op, "bcm", sk);
1309 		if (err) {
1310 			/* this bcm rx op is broken -> remove it */
1311 			list_del_rcu(&op->list);
1312 			bcm_remove_op(op);
1313 			return err;
1314 		}
1315 	}
1316 
1317 	return msg_head->nframes * op->cfsiz + MHSIZ;
1318 }
1319 
1320 /*
1321  * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1322  */
bcm_tx_send(struct msghdr * msg,int ifindex,struct sock * sk,int cfsiz)1323 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1324 		       int cfsiz)
1325 {
1326 	struct sk_buff *skb;
1327 	struct can_skb_ext *csx;
1328 	struct net_device *dev;
1329 	int err;
1330 
1331 	/* we need a real device to send frames */
1332 	if (!ifindex)
1333 		return -ENODEV;
1334 
1335 	skb = alloc_skb(cfsiz, GFP_KERNEL);
1336 	if (!skb)
1337 		return -ENOMEM;
1338 
1339 	csx = can_skb_ext_add(skb);
1340 	if (!csx) {
1341 		kfree_skb(skb);
1342 		return -ENOMEM;
1343 	}
1344 
1345 	err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1346 	if (err < 0) {
1347 		kfree_skb(skb);
1348 		return err;
1349 	}
1350 
1351 	dev = dev_get_by_index(sock_net(sk), ifindex);
1352 	if (!dev) {
1353 		kfree_skb(skb);
1354 		return -ENODEV;
1355 	}
1356 
1357 	csx->can_iif = dev->ifindex;
1358 	skb->dev = dev;
1359 	can_skb_set_owner(skb, sk);
1360 	err = can_send(skb, 1); /* send with loopback */
1361 	dev_put(dev);
1362 
1363 	if (err)
1364 		return err;
1365 
1366 	return cfsiz + MHSIZ;
1367 }
1368 
1369 /*
1370  * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1371  */
bcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)1372 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1373 {
1374 	struct sock *sk = sock->sk;
1375 	struct bcm_sock *bo = bcm_sk(sk);
1376 	int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1377 	struct bcm_msg_head msg_head;
1378 	int cfsiz;
1379 	int ret; /* read bytes or error codes as return value */
1380 
1381 	if (!bo->bound)
1382 		return -ENOTCONN;
1383 
1384 	/* check for valid message length from userspace */
1385 	if (size < MHSIZ)
1386 		return -EINVAL;
1387 
1388 	/* read message head information */
1389 	ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1390 	if (ret < 0)
1391 		return ret;
1392 
1393 	cfsiz = CFSIZ(msg_head.flags);
1394 	if ((size - MHSIZ) % cfsiz)
1395 		return -EINVAL;
1396 
1397 	/* check for alternative ifindex for this bcm_op */
1398 
1399 	if (!ifindex && msg->msg_name) {
1400 		/* no bound device as default => check msg_name */
1401 		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1402 
1403 		if (msg->msg_namelen < BCM_MIN_NAMELEN)
1404 			return -EINVAL;
1405 
1406 		if (addr->can_family != AF_CAN)
1407 			return -EINVAL;
1408 
1409 		/* ifindex from sendto() */
1410 		ifindex = addr->can_ifindex;
1411 
1412 		if (ifindex) {
1413 			struct net_device *dev;
1414 
1415 			dev = dev_get_by_index(sock_net(sk), ifindex);
1416 			if (!dev)
1417 				return -ENODEV;
1418 
1419 			if (dev->type != ARPHRD_CAN) {
1420 				dev_put(dev);
1421 				return -ENODEV;
1422 			}
1423 
1424 			dev_put(dev);
1425 		}
1426 	}
1427 
1428 	lock_sock(sk);
1429 
1430 	switch (msg_head.opcode) {
1431 
1432 	case TX_SETUP:
1433 		ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1434 		break;
1435 
1436 	case RX_SETUP:
1437 		ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1438 		break;
1439 
1440 	case TX_DELETE:
1441 		if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1442 			ret = MHSIZ;
1443 		else
1444 			ret = -EINVAL;
1445 		break;
1446 
1447 	case RX_DELETE:
1448 		if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1449 			ret = MHSIZ;
1450 		else
1451 			ret = -EINVAL;
1452 		break;
1453 
1454 	case TX_READ:
1455 		/* reuse msg_head for the reply to TX_READ */
1456 		msg_head.opcode  = TX_STATUS;
1457 		ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1458 		break;
1459 
1460 	case RX_READ:
1461 		/* reuse msg_head for the reply to RX_READ */
1462 		msg_head.opcode  = RX_STATUS;
1463 		ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1464 		break;
1465 
1466 	case TX_SEND:
1467 		/* we need exactly one CAN frame behind the msg head */
1468 		if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1469 			ret = -EINVAL;
1470 		else
1471 			ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1472 		break;
1473 
1474 	default:
1475 		ret = -EINVAL;
1476 		break;
1477 	}
1478 
1479 	release_sock(sk);
1480 
1481 	return ret;
1482 }
1483 
1484 /*
1485  * notification handler for netdevice status changes
1486  */
bcm_notify(struct bcm_sock * bo,unsigned long msg,struct net_device * dev)1487 static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1488 		       struct net_device *dev)
1489 {
1490 	struct sock *sk = &bo->sk;
1491 	struct bcm_op *op;
1492 	int notify_enodev = 0;
1493 
1494 	if (!net_eq(dev_net(dev), sock_net(sk)))
1495 		return;
1496 
1497 	switch (msg) {
1498 
1499 	case NETDEV_UNREGISTER:
1500 		lock_sock(sk);
1501 
1502 		/* remove device specific receive entries */
1503 		list_for_each_entry(op, &bo->rx_ops, list)
1504 			if (op->rx_reg_dev == dev)
1505 				bcm_rx_unreg(dev, op);
1506 
1507 		/* remove device reference, if this is our bound device */
1508 		if (bo->bound && bo->ifindex == dev->ifindex) {
1509 #if IS_ENABLED(CONFIG_PROC_FS)
1510 			if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
1511 				remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
1512 				bo->bcm_proc_read = NULL;
1513 			}
1514 #endif
1515 			bo->bound   = 0;
1516 			bo->ifindex = 0;
1517 			notify_enodev = 1;
1518 		}
1519 
1520 		release_sock(sk);
1521 
1522 		if (notify_enodev) {
1523 			sk->sk_err = ENODEV;
1524 			if (!sock_flag(sk, SOCK_DEAD))
1525 				sk_error_report(sk);
1526 		}
1527 		break;
1528 
1529 	case NETDEV_DOWN:
1530 		if (bo->bound && bo->ifindex == dev->ifindex) {
1531 			sk->sk_err = ENETDOWN;
1532 			if (!sock_flag(sk, SOCK_DEAD))
1533 				sk_error_report(sk);
1534 		}
1535 	}
1536 }
1537 
bcm_notifier(struct notifier_block * nb,unsigned long msg,void * ptr)1538 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1539 			void *ptr)
1540 {
1541 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1542 
1543 	if (dev->type != ARPHRD_CAN)
1544 		return NOTIFY_DONE;
1545 	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1546 		return NOTIFY_DONE;
1547 	if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1548 		return NOTIFY_DONE;
1549 
1550 	spin_lock(&bcm_notifier_lock);
1551 	list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1552 		spin_unlock(&bcm_notifier_lock);
1553 		bcm_notify(bcm_busy_notifier, msg, dev);
1554 		spin_lock(&bcm_notifier_lock);
1555 	}
1556 	bcm_busy_notifier = NULL;
1557 	spin_unlock(&bcm_notifier_lock);
1558 	return NOTIFY_DONE;
1559 }
1560 
1561 /*
1562  * initial settings for all BCM sockets to be set at socket creation time
1563  */
bcm_init(struct sock * sk)1564 static int bcm_init(struct sock *sk)
1565 {
1566 	struct bcm_sock *bo = bcm_sk(sk);
1567 
1568 	bo->bound            = 0;
1569 	bo->ifindex          = 0;
1570 	bo->dropped_usr_msgs = 0;
1571 	bo->bcm_proc_read    = NULL;
1572 
1573 	INIT_LIST_HEAD(&bo->tx_ops);
1574 	INIT_LIST_HEAD(&bo->rx_ops);
1575 
1576 	/* set notifier */
1577 	spin_lock(&bcm_notifier_lock);
1578 	list_add_tail(&bo->notifier, &bcm_notifier_list);
1579 	spin_unlock(&bcm_notifier_lock);
1580 
1581 	return 0;
1582 }
1583 
1584 /*
1585  * standard socket functions
1586  */
bcm_release(struct socket * sock)1587 static int bcm_release(struct socket *sock)
1588 {
1589 	struct sock *sk = sock->sk;
1590 	struct net *net;
1591 	struct bcm_sock *bo;
1592 	struct bcm_op *op, *next;
1593 
1594 	if (!sk)
1595 		return 0;
1596 
1597 	net = sock_net(sk);
1598 	bo = bcm_sk(sk);
1599 
1600 	/* remove bcm_ops, timer, rx_unregister(), etc. */
1601 
1602 	spin_lock(&bcm_notifier_lock);
1603 	while (bcm_busy_notifier == bo) {
1604 		spin_unlock(&bcm_notifier_lock);
1605 		schedule_timeout_uninterruptible(1);
1606 		spin_lock(&bcm_notifier_lock);
1607 	}
1608 	list_del(&bo->notifier);
1609 	spin_unlock(&bcm_notifier_lock);
1610 
1611 	lock_sock(sk);
1612 
1613 #if IS_ENABLED(CONFIG_PROC_FS)
1614 	/* remove procfs entry */
1615 	if (net->can.bcmproc_dir && bo->bcm_proc_read)
1616 		remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1617 #endif /* CONFIG_PROC_FS */
1618 
1619 	list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1620 		bcm_remove_op(op);
1621 
1622 	list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1623 		/*
1624 		 * Don't care if we're bound or not (due to netdev problems)
1625 		 * can_rx_unregister() is always a save thing to do here.
1626 		 */
1627 		if (op->ifindex) {
1628 			/*
1629 			 * Only remove subscriptions that had not
1630 			 * been removed due to NETDEV_UNREGISTER
1631 			 * in bcm_notifier()
1632 			 */
1633 			if (op->rx_reg_dev) {
1634 				struct net_device *dev;
1635 
1636 				dev = dev_get_by_index(net, op->ifindex);
1637 				if (dev) {
1638 					bcm_rx_unreg(dev, op);
1639 					dev_put(dev);
1640 				}
1641 			}
1642 		} else
1643 			can_rx_unregister(net, NULL, op->can_id,
1644 					  REGMASK(op->can_id),
1645 					  bcm_rx_handler, op);
1646 
1647 	}
1648 
1649 	synchronize_rcu();
1650 
1651 	list_for_each_entry_safe(op, next, &bo->rx_ops, list)
1652 		bcm_remove_op(op);
1653 
1654 	/* remove device reference */
1655 	if (bo->bound) {
1656 		bo->bound   = 0;
1657 		bo->ifindex = 0;
1658 	}
1659 
1660 	sock_orphan(sk);
1661 	sock->sk = NULL;
1662 
1663 	release_sock(sk);
1664 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1665 	sock_put(sk);
1666 
1667 	return 0;
1668 }
1669 
bcm_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int len,int flags)1670 static int bcm_connect(struct socket *sock, struct sockaddr_unsized *uaddr, int len,
1671 		       int flags)
1672 {
1673 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1674 	struct sock *sk = sock->sk;
1675 	struct bcm_sock *bo = bcm_sk(sk);
1676 	struct net *net = sock_net(sk);
1677 	int ret = 0;
1678 
1679 	if (len < BCM_MIN_NAMELEN)
1680 		return -EINVAL;
1681 
1682 	lock_sock(sk);
1683 
1684 	if (bo->bound) {
1685 		ret = -EISCONN;
1686 		goto fail;
1687 	}
1688 
1689 	/* bind a device to this socket */
1690 	if (addr->can_ifindex) {
1691 		struct net_device *dev;
1692 
1693 		dev = dev_get_by_index(net, addr->can_ifindex);
1694 		if (!dev) {
1695 			ret = -ENODEV;
1696 			goto fail;
1697 		}
1698 		if (dev->type != ARPHRD_CAN) {
1699 			dev_put(dev);
1700 			ret = -ENODEV;
1701 			goto fail;
1702 		}
1703 
1704 		bo->ifindex = dev->ifindex;
1705 		dev_put(dev);
1706 
1707 	} else {
1708 		/* no interface reference for ifindex = 0 ('any' CAN device) */
1709 		bo->ifindex = 0;
1710 	}
1711 
1712 #if IS_ENABLED(CONFIG_PROC_FS)
1713 	if (net->can.bcmproc_dir) {
1714 		/* unique socket address as filename */
1715 		sprintf(bo->procname, "%llu", sock_i_ino(sk));
1716 		bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1717 						     net->can.bcmproc_dir,
1718 						     bcm_proc_show, sk);
1719 		if (!bo->bcm_proc_read) {
1720 			ret = -ENOMEM;
1721 			goto fail;
1722 		}
1723 	}
1724 #endif /* CONFIG_PROC_FS */
1725 
1726 	bo->bound = 1;
1727 
1728 fail:
1729 	release_sock(sk);
1730 
1731 	return ret;
1732 }
1733 
bcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1734 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1735 		       int flags)
1736 {
1737 	struct sock *sk = sock->sk;
1738 	struct sk_buff *skb;
1739 	int error = 0;
1740 	int err;
1741 
1742 	skb = skb_recv_datagram(sk, flags, &error);
1743 	if (!skb)
1744 		return error;
1745 
1746 	if (skb->len < size)
1747 		size = skb->len;
1748 
1749 	err = memcpy_to_msg(msg, skb->data, size);
1750 	if (err < 0) {
1751 		skb_free_datagram(sk, skb);
1752 		return err;
1753 	}
1754 
1755 	sock_recv_cmsgs(msg, sk, skb);
1756 
1757 	if (msg->msg_name) {
1758 		__sockaddr_check_size(BCM_MIN_NAMELEN);
1759 		msg->msg_namelen = BCM_MIN_NAMELEN;
1760 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1761 	}
1762 
1763 	/* assign the flags that have been recorded in bcm_send_to_user() */
1764 	msg->msg_flags |= *(bcm_flags(skb));
1765 
1766 	skb_free_datagram(sk, skb);
1767 
1768 	return size;
1769 }
1770 
bcm_sock_no_ioctlcmd(struct socket * sock,unsigned int cmd,unsigned long arg)1771 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1772 				unsigned long arg)
1773 {
1774 	/* no ioctls for socket layer -> hand it down to NIC layer */
1775 	return -ENOIOCTLCMD;
1776 }
1777 
1778 static const struct proto_ops bcm_ops = {
1779 	.family        = PF_CAN,
1780 	.release       = bcm_release,
1781 	.bind          = sock_no_bind,
1782 	.connect       = bcm_connect,
1783 	.socketpair    = sock_no_socketpair,
1784 	.accept        = sock_no_accept,
1785 	.getname       = sock_no_getname,
1786 	.poll          = datagram_poll,
1787 	.ioctl         = bcm_sock_no_ioctlcmd,
1788 	.gettstamp     = sock_gettstamp,
1789 	.listen        = sock_no_listen,
1790 	.shutdown      = sock_no_shutdown,
1791 	.sendmsg       = bcm_sendmsg,
1792 	.recvmsg       = bcm_recvmsg,
1793 	.mmap          = sock_no_mmap,
1794 };
1795 
1796 static struct proto bcm_proto __read_mostly = {
1797 	.name       = "CAN_BCM",
1798 	.owner      = THIS_MODULE,
1799 	.obj_size   = sizeof(struct bcm_sock),
1800 	.init       = bcm_init,
1801 };
1802 
1803 static const struct can_proto bcm_can_proto = {
1804 	.type       = SOCK_DGRAM,
1805 	.protocol   = CAN_BCM,
1806 	.ops        = &bcm_ops,
1807 	.prot       = &bcm_proto,
1808 };
1809 
canbcm_pernet_init(struct net * net)1810 static int canbcm_pernet_init(struct net *net)
1811 {
1812 #if IS_ENABLED(CONFIG_PROC_FS)
1813 	/* create /proc/net/can-bcm directory */
1814 	net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1815 #endif /* CONFIG_PROC_FS */
1816 
1817 	return 0;
1818 }
1819 
canbcm_pernet_exit(struct net * net)1820 static void canbcm_pernet_exit(struct net *net)
1821 {
1822 #if IS_ENABLED(CONFIG_PROC_FS)
1823 	/* remove /proc/net/can-bcm directory */
1824 	if (net->can.bcmproc_dir)
1825 		remove_proc_entry("can-bcm", net->proc_net);
1826 #endif /* CONFIG_PROC_FS */
1827 }
1828 
1829 static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1830 	.init = canbcm_pernet_init,
1831 	.exit = canbcm_pernet_exit,
1832 };
1833 
1834 static struct notifier_block canbcm_notifier = {
1835 	.notifier_call = bcm_notifier
1836 };
1837 
bcm_module_init(void)1838 static int __init bcm_module_init(void)
1839 {
1840 	int err;
1841 
1842 	pr_info("can: broadcast manager protocol\n");
1843 
1844 	err = register_pernet_subsys(&canbcm_pernet_ops);
1845 	if (err)
1846 		return err;
1847 
1848 	err = register_netdevice_notifier(&canbcm_notifier);
1849 	if (err)
1850 		goto register_notifier_failed;
1851 
1852 	err = can_proto_register(&bcm_can_proto);
1853 	if (err < 0) {
1854 		printk(KERN_ERR "can: registration of bcm protocol failed\n");
1855 		goto register_proto_failed;
1856 	}
1857 
1858 	return 0;
1859 
1860 register_proto_failed:
1861 	unregister_netdevice_notifier(&canbcm_notifier);
1862 register_notifier_failed:
1863 	unregister_pernet_subsys(&canbcm_pernet_ops);
1864 	return err;
1865 }
1866 
bcm_module_exit(void)1867 static void __exit bcm_module_exit(void)
1868 {
1869 	can_proto_unregister(&bcm_can_proto);
1870 	unregister_netdevice_notifier(&canbcm_notifier);
1871 	unregister_pernet_subsys(&canbcm_pernet_ops);
1872 }
1873 
1874 module_init(bcm_module_init);
1875 module_exit(bcm_module_exit);
1876