Lines Matching refs:op

213 	struct bcm_op *op;  in bcm_proc_show()  local
224 list_for_each_entry_rcu(op, &bo->rx_ops, list) { in bcm_proc_show()
229 if (!op->frames_abs) in bcm_proc_show()
232 seq_printf(m, "rx_op: %03X %-5s ", op->can_id, in bcm_proc_show()
233 bcm_proc_getifname(net, ifname, op->ifindex)); in bcm_proc_show()
235 if (op->flags & CAN_FD_FRAME) in bcm_proc_show()
236 seq_printf(m, "(%u)", op->nframes); in bcm_proc_show()
238 seq_printf(m, "[%u]", op->nframes); in bcm_proc_show()
240 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); in bcm_proc_show()
242 if (op->kt_ival1) in bcm_proc_show()
244 (long long)ktime_to_us(op->kt_ival1)); in bcm_proc_show()
246 if (op->kt_ival2) in bcm_proc_show()
248 (long long)ktime_to_us(op->kt_ival2)); in bcm_proc_show()
251 op->frames_filtered, op->frames_abs); in bcm_proc_show()
253 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; in bcm_proc_show()
259 list_for_each_entry(op, &bo->tx_ops, list) { in bcm_proc_show()
261 seq_printf(m, "tx_op: %03X %s ", op->can_id, in bcm_proc_show()
262 bcm_proc_getifname(net, ifname, op->ifindex)); in bcm_proc_show()
264 if (op->flags & CAN_FD_FRAME) in bcm_proc_show()
265 seq_printf(m, "(%u) ", op->nframes); in bcm_proc_show()
267 seq_printf(m, "[%u] ", op->nframes); in bcm_proc_show()
269 if (op->kt_ival1) in bcm_proc_show()
271 (long long)ktime_to_us(op->kt_ival1)); in bcm_proc_show()
273 if (op->kt_ival2) in bcm_proc_show()
275 (long long)ktime_to_us(op->kt_ival2)); in bcm_proc_show()
277 seq_printf(m, "# sent %ld\n", op->frames_abs); in bcm_proc_show()
291 static void bcm_can_tx(struct bcm_op *op) in bcm_can_tx() argument
299 if (!op->ifindex) in bcm_can_tx()
303 spin_lock_bh(&op->bcm_tx_lock); in bcm_can_tx()
304 cf = op->frames + op->cfsiz * op->currframe; in bcm_can_tx()
305 spin_unlock_bh(&op->bcm_tx_lock); in bcm_can_tx()
307 dev = dev_get_by_index(sock_net(op->sk), op->ifindex); in bcm_can_tx()
313 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any()); in bcm_can_tx()
321 skb_put_data(skb, cf, op->cfsiz); in bcm_can_tx()
325 can_skb_set_owner(skb, op->sk); in bcm_can_tx()
329 spin_lock_bh(&op->bcm_tx_lock); in bcm_can_tx()
332 op->frames_abs++; in bcm_can_tx()
334 op->currframe++; in bcm_can_tx()
337 if (op->currframe >= op->nframes) in bcm_can_tx()
338 op->currframe = 0; in bcm_can_tx()
340 if (op->count > 0) in bcm_can_tx()
341 op->count--; in bcm_can_tx()
343 spin_unlock_bh(&op->bcm_tx_lock); in bcm_can_tx()
352 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, in bcm_send_to_user() argument
358 struct sock *sk = op->sk; in bcm_send_to_user()
359 unsigned int datalen = head->nframes * op->cfsiz; in bcm_send_to_user()
402 skb->tstamp = op->rx_stamp; in bcm_send_to_user()
415 addr->can_ifindex = op->rx_ifindex; in bcm_send_to_user()
427 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt) in bcm_tx_set_expiry() argument
431 if (op->kt_ival1 && op->count) in bcm_tx_set_expiry()
432 ival = op->kt_ival1; in bcm_tx_set_expiry()
433 else if (op->kt_ival2) in bcm_tx_set_expiry()
434 ival = op->kt_ival2; in bcm_tx_set_expiry()
442 static void bcm_tx_start_timer(struct bcm_op *op) in bcm_tx_start_timer() argument
444 if (bcm_tx_set_expiry(op, &op->timer)) in bcm_tx_start_timer()
445 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT); in bcm_tx_start_timer()
451 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); in bcm_tx_timeout_handler() local
454 if (op->kt_ival1 && (op->count > 0)) { in bcm_tx_timeout_handler()
455 bcm_can_tx(op); in bcm_tx_timeout_handler()
456 if (!op->count && (op->flags & TX_COUNTEVT)) { in bcm_tx_timeout_handler()
461 msg_head.flags = op->flags; in bcm_tx_timeout_handler()
462 msg_head.count = op->count; in bcm_tx_timeout_handler()
463 msg_head.ival1 = op->ival1; in bcm_tx_timeout_handler()
464 msg_head.ival2 = op->ival2; in bcm_tx_timeout_handler()
465 msg_head.can_id = op->can_id; in bcm_tx_timeout_handler()
468 bcm_send_to_user(op, &msg_head, NULL, 0); in bcm_tx_timeout_handler()
471 } else if (op->kt_ival2) { in bcm_tx_timeout_handler()
472 bcm_can_tx(op); in bcm_tx_timeout_handler()
475 return bcm_tx_set_expiry(op, &op->timer) ? in bcm_tx_timeout_handler()
482 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data) in bcm_rx_changed() argument
487 op->frames_filtered++; in bcm_rx_changed()
490 if (op->frames_filtered > ULONG_MAX/100) in bcm_rx_changed()
491 op->frames_filtered = op->frames_abs = 0; in bcm_rx_changed()
498 head.flags = op->flags; in bcm_rx_changed()
499 head.count = op->count; in bcm_rx_changed()
500 head.ival1 = op->ival1; in bcm_rx_changed()
501 head.ival2 = op->ival2; in bcm_rx_changed()
502 head.can_id = op->can_id; in bcm_rx_changed()
505 bcm_send_to_user(op, &head, data, 1); in bcm_rx_changed()
513 static void bcm_rx_update_and_send(struct bcm_op *op, in bcm_rx_update_and_send() argument
518 memcpy(lastdata, rxdata, op->cfsiz); in bcm_rx_update_and_send()
527 if (!op->kt_ival2) { in bcm_rx_update_and_send()
529 bcm_rx_changed(op, lastdata); in bcm_rx_update_and_send()
534 if (hrtimer_active(&op->thrtimer)) in bcm_rx_update_and_send()
538 if (!op->kt_lastmsg) in bcm_rx_update_and_send()
542 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < in bcm_rx_update_and_send()
543 ktime_to_us(op->kt_ival2)) { in bcm_rx_update_and_send()
545 hrtimer_start(&op->thrtimer, in bcm_rx_update_and_send()
546 ktime_add(op->kt_lastmsg, op->kt_ival2), in bcm_rx_update_and_send()
553 bcm_rx_changed(op, lastdata); in bcm_rx_update_and_send()
554 op->kt_lastmsg = ktime_get(); in bcm_rx_update_and_send()
561 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, in bcm_rx_cmp_to_index() argument
565 struct canfd_frame *cf = op->frames + op->cfsiz * index; in bcm_rx_cmp_to_index()
566 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; in bcm_rx_cmp_to_index()
576 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); in bcm_rx_cmp_to_index()
584 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); in bcm_rx_cmp_to_index()
589 if (op->flags & RX_CHECK_DLC) { in bcm_rx_cmp_to_index()
592 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags); in bcm_rx_cmp_to_index()
601 static void bcm_rx_starttimer(struct bcm_op *op) in bcm_rx_starttimer() argument
603 if (op->flags & RX_NO_AUTOTIMER) in bcm_rx_starttimer()
606 if (op->kt_ival1) in bcm_rx_starttimer()
607 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); in bcm_rx_starttimer()
613 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); in bcm_rx_timeout_handler() local
617 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { in bcm_rx_timeout_handler()
619 memset(op->last_frames, 0, op->nframes * op->cfsiz); in bcm_rx_timeout_handler()
625 msg_head.flags = op->flags; in bcm_rx_timeout_handler()
626 msg_head.count = op->count; in bcm_rx_timeout_handler()
627 msg_head.ival1 = op->ival1; in bcm_rx_timeout_handler()
628 msg_head.ival2 = op->ival2; in bcm_rx_timeout_handler()
629 msg_head.can_id = op->can_id; in bcm_rx_timeout_handler()
632 bcm_send_to_user(op, &msg_head, NULL, 0); in bcm_rx_timeout_handler()
640 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index) in bcm_rx_do_flush() argument
642 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; in bcm_rx_do_flush()
644 if ((op->last_frames) && (lcf->flags & RX_THR)) { in bcm_rx_do_flush()
645 bcm_rx_changed(op, lcf); in bcm_rx_do_flush()
654 static int bcm_rx_thr_flush(struct bcm_op *op) in bcm_rx_thr_flush() argument
658 if (op->nframes > 1) { in bcm_rx_thr_flush()
662 for (i = 1; i < op->nframes; i++) in bcm_rx_thr_flush()
663 updated += bcm_rx_do_flush(op, i); in bcm_rx_thr_flush()
667 updated += bcm_rx_do_flush(op, 0); in bcm_rx_thr_flush()
679 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); in bcm_rx_thr_handler() local
681 if (bcm_rx_thr_flush(op)) { in bcm_rx_thr_handler()
682 hrtimer_forward_now(hrtimer, op->kt_ival2); in bcm_rx_thr_handler()
686 op->kt_lastmsg = 0; in bcm_rx_thr_handler()
696 struct bcm_op *op = (struct bcm_op *)data; in bcm_rx_handler() local
701 if (op->can_id != rxframe->can_id) in bcm_rx_handler()
705 if (op->flags & CAN_FD_FRAME) { in bcm_rx_handler()
714 hrtimer_cancel(&op->timer); in bcm_rx_handler()
717 op->rx_stamp = skb->tstamp; in bcm_rx_handler()
719 op->rx_ifindex = skb->dev->ifindex; in bcm_rx_handler()
721 op->frames_abs++; in bcm_rx_handler()
723 if (op->flags & RX_RTR_FRAME) { in bcm_rx_handler()
725 bcm_can_tx(op); in bcm_rx_handler()
733 if (skb->sk == op->sk) in bcm_rx_handler()
737 if (op->flags & RX_FILTER_ID) { in bcm_rx_handler()
739 bcm_rx_update_and_send(op, op->last_frames, rxframe, in bcm_rx_handler()
744 if (op->nframes == 1) { in bcm_rx_handler()
746 bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags); in bcm_rx_handler()
750 if (op->nframes > 1) { in bcm_rx_handler()
759 for (i = 1; i < op->nframes; i++) { in bcm_rx_handler()
760 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) == in bcm_rx_handler()
761 (get_u64(op->frames, 0) & in bcm_rx_handler()
762 get_u64(op->frames + op->cfsiz * i, 0))) { in bcm_rx_handler()
763 bcm_rx_cmp_to_index(op, i, rxframe, in bcm_rx_handler()
771 bcm_rx_starttimer(op); in bcm_rx_handler()
780 struct bcm_op *op; in bcm_find_op() local
782 list_for_each_entry(op, ops, list) { in bcm_find_op()
783 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && in bcm_find_op()
784 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) in bcm_find_op()
785 return op; in bcm_find_op()
793 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu); in bcm_free_op_rcu() local
795 if ((op->frames) && (op->frames != &op->sframe)) in bcm_free_op_rcu()
796 kfree(op->frames); in bcm_free_op_rcu()
798 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) in bcm_free_op_rcu()
799 kfree(op->last_frames); in bcm_free_op_rcu()
801 kfree(op); in bcm_free_op_rcu()
804 static void bcm_remove_op(struct bcm_op *op) in bcm_remove_op() argument
806 hrtimer_cancel(&op->timer); in bcm_remove_op()
807 hrtimer_cancel(&op->thrtimer); in bcm_remove_op()
809 call_rcu(&op->rcu, bcm_free_op_rcu); in bcm_remove_op()
812 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) in bcm_rx_unreg() argument
814 if (op->rx_reg_dev == dev) { in bcm_rx_unreg()
815 can_rx_unregister(dev_net(dev), dev, op->can_id, in bcm_rx_unreg()
816 REGMASK(op->can_id), bcm_rx_handler, op); in bcm_rx_unreg()
819 op->rx_reg_dev = NULL; in bcm_rx_unreg()
822 "mismatch %p %p\n", op->rx_reg_dev, dev); in bcm_rx_unreg()
831 struct bcm_op *op, *n; in bcm_delete_rx_op() local
833 list_for_each_entry_safe(op, n, ops, list) { in bcm_delete_rx_op()
834 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && in bcm_delete_rx_op()
835 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { in bcm_delete_rx_op()
838 op->flags |= RX_NO_AUTOTIMER; in bcm_delete_rx_op()
845 if (op->ifindex) { in bcm_delete_rx_op()
851 if (op->rx_reg_dev) { in bcm_delete_rx_op()
854 dev = dev_get_by_index(sock_net(op->sk), in bcm_delete_rx_op()
855 op->ifindex); in bcm_delete_rx_op()
857 bcm_rx_unreg(dev, op); in bcm_delete_rx_op()
862 can_rx_unregister(sock_net(op->sk), NULL, in bcm_delete_rx_op()
863 op->can_id, in bcm_delete_rx_op()
864 REGMASK(op->can_id), in bcm_delete_rx_op()
865 bcm_rx_handler, op); in bcm_delete_rx_op()
867 list_del_rcu(&op->list); in bcm_delete_rx_op()
868 bcm_remove_op(op); in bcm_delete_rx_op()
882 struct bcm_op *op, *n; in bcm_delete_tx_op() local
884 list_for_each_entry_safe(op, n, ops, list) { in bcm_delete_tx_op()
885 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && in bcm_delete_tx_op()
886 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { in bcm_delete_tx_op()
887 list_del_rcu(&op->list); in bcm_delete_tx_op()
888 bcm_remove_op(op); in bcm_delete_tx_op()
902 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex); in bcm_read_op() local
904 if (!op) in bcm_read_op()
908 msg_head->flags = op->flags; in bcm_read_op()
909 msg_head->count = op->count; in bcm_read_op()
910 msg_head->ival1 = op->ival1; in bcm_read_op()
911 msg_head->ival2 = op->ival2; in bcm_read_op()
912 msg_head->nframes = op->nframes; in bcm_read_op()
914 bcm_send_to_user(op, msg_head, op->frames, 0); in bcm_read_op()
926 struct bcm_op *op; in bcm_tx_setup() local
944 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); in bcm_tx_setup()
945 if (op) { in bcm_tx_setup()
953 if (msg_head->nframes > op->nframes) in bcm_tx_setup()
959 cf = op->frames + op->cfsiz * i; in bcm_tx_setup()
960 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); in bcm_tx_setup()
962 if (op->flags & CAN_FD_FRAME) { in bcm_tx_setup()
978 op->flags = msg_head->flags; in bcm_tx_setup()
981 if (op->nframes != msg_head->nframes || in bcm_tx_setup()
982 op->flags & TX_RESET_MULTI_IDX || in bcm_tx_setup()
983 op->flags & SETTIMER) { in bcm_tx_setup()
985 spin_lock_bh(&op->bcm_tx_lock); in bcm_tx_setup()
987 if (op->nframes != msg_head->nframes || in bcm_tx_setup()
988 op->flags & TX_RESET_MULTI_IDX) { in bcm_tx_setup()
990 op->nframes = msg_head->nframes; in bcm_tx_setup()
992 op->currframe = 0; in bcm_tx_setup()
995 if (op->flags & SETTIMER) in bcm_tx_setup()
996 op->count = msg_head->count; in bcm_tx_setup()
998 spin_unlock_bh(&op->bcm_tx_lock); in bcm_tx_setup()
1004 op = kzalloc(OPSIZ, GFP_KERNEL); in bcm_tx_setup()
1005 if (!op) in bcm_tx_setup()
1008 spin_lock_init(&op->bcm_tx_lock); in bcm_tx_setup()
1009 op->can_id = msg_head->can_id; in bcm_tx_setup()
1010 op->cfsiz = CFSIZ(msg_head->flags); in bcm_tx_setup()
1011 op->flags = msg_head->flags; in bcm_tx_setup()
1012 op->nframes = msg_head->nframes; in bcm_tx_setup()
1014 if (op->flags & SETTIMER) in bcm_tx_setup()
1015 op->count = msg_head->count; in bcm_tx_setup()
1019 op->frames = kmalloc_array(msg_head->nframes, in bcm_tx_setup()
1020 op->cfsiz, in bcm_tx_setup()
1022 if (!op->frames) { in bcm_tx_setup()
1023 kfree(op); in bcm_tx_setup()
1027 op->frames = &op->sframe; in bcm_tx_setup()
1031 cf = op->frames + op->cfsiz * i; in bcm_tx_setup()
1032 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); in bcm_tx_setup()
1036 if (op->flags & CAN_FD_FRAME) { in bcm_tx_setup()
1054 op->last_frames = NULL; in bcm_tx_setup()
1057 op->sk = sk; in bcm_tx_setup()
1058 op->ifindex = ifindex; in bcm_tx_setup()
1061 hrtimer_setup(&op->timer, bcm_tx_timeout_handler, CLOCK_MONOTONIC, in bcm_tx_setup()
1065 hrtimer_setup(&op->thrtimer, hrtimer_dummy_timeout, CLOCK_MONOTONIC, in bcm_tx_setup()
1069 list_add(&op->list, &bo->tx_ops); in bcm_tx_setup()
1073 if (op->flags & SETTIMER) { in bcm_tx_setup()
1075 op->ival1 = msg_head->ival1; in bcm_tx_setup()
1076 op->ival2 = msg_head->ival2; in bcm_tx_setup()
1077 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); in bcm_tx_setup()
1078 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); in bcm_tx_setup()
1081 if (!op->kt_ival1 && !op->kt_ival2) in bcm_tx_setup()
1082 hrtimer_cancel(&op->timer); in bcm_tx_setup()
1085 if (op->flags & STARTTIMER) { in bcm_tx_setup()
1086 hrtimer_cancel(&op->timer); in bcm_tx_setup()
1088 op->flags |= TX_ANNOUNCE; in bcm_tx_setup()
1091 if (op->flags & TX_ANNOUNCE) in bcm_tx_setup()
1092 bcm_can_tx(op); in bcm_tx_setup()
1094 if (op->flags & STARTTIMER) in bcm_tx_setup()
1095 bcm_tx_start_timer(op); in bcm_tx_setup()
1097 return msg_head->nframes * op->cfsiz + MHSIZ; in bcm_tx_setup()
1100 if (op->frames != &op->sframe) in bcm_tx_setup()
1101 kfree(op->frames); in bcm_tx_setup()
1102 kfree(op); in bcm_tx_setup()
1113 struct bcm_op *op; in bcm_rx_setup() local
1138 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); in bcm_rx_setup()
1139 if (op) { in bcm_rx_setup()
1147 if (msg_head->nframes > op->nframes) in bcm_rx_setup()
1152 err = memcpy_from_msg(op->frames, msg, in bcm_rx_setup()
1153 msg_head->nframes * op->cfsiz); in bcm_rx_setup()
1158 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz); in bcm_rx_setup()
1161 op->nframes = msg_head->nframes; in bcm_rx_setup()
1162 op->flags = msg_head->flags; in bcm_rx_setup()
1169 op = kzalloc(OPSIZ, GFP_KERNEL); in bcm_rx_setup()
1170 if (!op) in bcm_rx_setup()
1173 op->can_id = msg_head->can_id; in bcm_rx_setup()
1174 op->nframes = msg_head->nframes; in bcm_rx_setup()
1175 op->cfsiz = CFSIZ(msg_head->flags); in bcm_rx_setup()
1176 op->flags = msg_head->flags; in bcm_rx_setup()
1180 op->frames = kmalloc_array(msg_head->nframes, in bcm_rx_setup()
1181 op->cfsiz, in bcm_rx_setup()
1183 if (!op->frames) { in bcm_rx_setup()
1184 kfree(op); in bcm_rx_setup()
1189 op->last_frames = kcalloc(msg_head->nframes, in bcm_rx_setup()
1190 op->cfsiz, in bcm_rx_setup()
1192 if (!op->last_frames) { in bcm_rx_setup()
1193 kfree(op->frames); in bcm_rx_setup()
1194 kfree(op); in bcm_rx_setup()
1199 op->frames = &op->sframe; in bcm_rx_setup()
1200 op->last_frames = &op->last_sframe; in bcm_rx_setup()
1204 err = memcpy_from_msg(op->frames, msg, in bcm_rx_setup()
1205 msg_head->nframes * op->cfsiz); in bcm_rx_setup()
1207 if (op->frames != &op->sframe) in bcm_rx_setup()
1208 kfree(op->frames); in bcm_rx_setup()
1209 if (op->last_frames != &op->last_sframe) in bcm_rx_setup()
1210 kfree(op->last_frames); in bcm_rx_setup()
1211 kfree(op); in bcm_rx_setup()
1217 op->sk = sk; in bcm_rx_setup()
1218 op->ifindex = ifindex; in bcm_rx_setup()
1221 op->rx_ifindex = ifindex; in bcm_rx_setup()
1224 hrtimer_setup(&op->timer, bcm_rx_timeout_handler, CLOCK_MONOTONIC, in bcm_rx_setup()
1226 hrtimer_setup(&op->thrtimer, bcm_rx_thr_handler, CLOCK_MONOTONIC, in bcm_rx_setup()
1230 list_add(&op->list, &bo->rx_ops); in bcm_rx_setup()
1239 if (op->flags & RX_RTR_FRAME) { in bcm_rx_setup()
1240 struct canfd_frame *frame0 = op->frames; in bcm_rx_setup()
1243 hrtimer_cancel(&op->thrtimer); in bcm_rx_setup()
1244 hrtimer_cancel(&op->timer); in bcm_rx_setup()
1251 if ((op->flags & TX_CP_CAN_ID) || in bcm_rx_setup()
1252 (frame0->can_id == op->can_id)) in bcm_rx_setup()
1253 frame0->can_id = op->can_id & ~CAN_RTR_FLAG; in bcm_rx_setup()
1256 if (op->flags & SETTIMER) { in bcm_rx_setup()
1259 op->ival1 = msg_head->ival1; in bcm_rx_setup()
1260 op->ival2 = msg_head->ival2; in bcm_rx_setup()
1261 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); in bcm_rx_setup()
1262 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); in bcm_rx_setup()
1265 if (!op->kt_ival1) in bcm_rx_setup()
1266 hrtimer_cancel(&op->timer); in bcm_rx_setup()
1272 op->kt_lastmsg = 0; in bcm_rx_setup()
1273 hrtimer_cancel(&op->thrtimer); in bcm_rx_setup()
1274 bcm_rx_thr_flush(op); in bcm_rx_setup()
1277 if ((op->flags & STARTTIMER) && op->kt_ival1) in bcm_rx_setup()
1278 hrtimer_start(&op->timer, op->kt_ival1, in bcm_rx_setup()
1290 op->can_id, in bcm_rx_setup()
1291 REGMASK(op->can_id), in bcm_rx_setup()
1292 bcm_rx_handler, op, in bcm_rx_setup()
1295 op->rx_reg_dev = dev; in bcm_rx_setup()
1300 err = can_rx_register(sock_net(sk), NULL, op->can_id, in bcm_rx_setup()
1301 REGMASK(op->can_id), in bcm_rx_setup()
1302 bcm_rx_handler, op, "bcm", sk); in bcm_rx_setup()
1305 list_del_rcu(&op->list); in bcm_rx_setup()
1306 bcm_remove_op(op); in bcm_rx_setup()
1311 return msg_head->nframes * op->cfsiz + MHSIZ; in bcm_rx_setup()
1481 struct bcm_op *op; in bcm_notify() local
1493 list_for_each_entry(op, &bo->rx_ops, list) in bcm_notify()
1494 if (op->rx_reg_dev == dev) in bcm_notify()
1495 bcm_rx_unreg(dev, op); in bcm_notify()
1582 struct bcm_op *op, *next; in bcm_release() local
1609 list_for_each_entry_safe(op, next, &bo->tx_ops, list) in bcm_release()
1610 bcm_remove_op(op); in bcm_release()
1612 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { in bcm_release()
1617 if (op->ifindex) { in bcm_release()
1623 if (op->rx_reg_dev) { in bcm_release()
1626 dev = dev_get_by_index(net, op->ifindex); in bcm_release()
1628 bcm_rx_unreg(dev, op); in bcm_release()
1633 can_rx_unregister(net, NULL, op->can_id, in bcm_release()
1634 REGMASK(op->can_id), in bcm_release()
1635 bcm_rx_handler, op); in bcm_release()
1641 list_for_each_entry_safe(op, next, &bo->rx_ops, list) in bcm_release()
1642 bcm_remove_op(op); in bcm_release()