Lines Matching refs:ndlc

42 	print_hex_dump(KERN_DEBUG, "ndlc: ", DUMP_PREFIX_OFFSET, \
46 int ndlc_open(struct llt_ndlc *ndlc)
49 ndlc->ops->enable(ndlc->phy_id);
50 ndlc->powered = 1;
55 void ndlc_close(struct llt_ndlc *ndlc)
63 ndlc->ops->enable(ndlc->phy_id);
65 nci_prop_cmd(ndlc->ndev, ST_NCI_CORE_PROP,
68 ndlc->powered = 0;
69 ndlc->ops->disable(ndlc->phy_id);
73 int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb)
75 /* add ndlc header */
80 skb_queue_tail(&ndlc->send_q, skb);
82 schedule_work(&ndlc->sm_work);
88 static void llt_ndlc_send_queue(struct llt_ndlc *ndlc)
94 if (ndlc->send_q.qlen)
96 ndlc->send_q.qlen, ndlc->ack_pending_q.qlen);
98 while (ndlc->send_q.qlen) {
99 skb = skb_dequeue(&ndlc->send_q);
100 NDLC_DUMP_SKB("ndlc frame written", skb);
101 r = ndlc->ops->write(ndlc->phy_id, skb);
103 ndlc->hard_fault = r;
109 skb_queue_tail(&ndlc->ack_pending_q, skb);
111 /* start timer t1 for ndlc aknowledge */
112 ndlc->t1_active = true;
113 mod_timer(&ndlc->t1_timer, time_sent +
116 ndlc->t2_active = true;
117 mod_timer(&ndlc->t2_timer, time_sent +
122 static void llt_ndlc_requeue_data_pending(struct llt_ndlc *ndlc)
127 while ((skb = skb_dequeue_tail(&ndlc->ack_pending_q))) {
143 skb_queue_head(&ndlc->send_q, skb);
147 static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc)
153 if (ndlc->rcv_q.qlen)
154 pr_debug("rcvQlen=%d\n", ndlc->rcv_q.qlen);
156 while ((skb = skb_dequeue(&ndlc->rcv_q)) != NULL) {
162 skb = skb_dequeue(&ndlc->ack_pending_q);
164 timer_delete_sync(&ndlc->t1_timer);
165 timer_delete_sync(&ndlc->t2_timer);
166 ndlc->t2_active = false;
167 ndlc->t1_active = false;
170 llt_ndlc_requeue_data_pending(ndlc);
171 llt_ndlc_send_queue(ndlc);
172 /* start timer t1 for ndlc aknowledge */
174 ndlc->t1_active = true;
175 mod_timer(&ndlc->t1_timer, time_sent +
180 ndlc->t1_active = true;
181 mod_timer(&ndlc->t1_timer, time_sent +
189 nci_recv_frame(ndlc->ndev, skb);
198 struct llt_ndlc *ndlc = container_of(work, struct llt_ndlc, sm_work);
200 llt_ndlc_send_queue(ndlc);
201 llt_ndlc_rcv_queue(ndlc);
203 if (ndlc->t1_active && timer_pending(&ndlc->t1_timer) == 0) {
206 ndlc->t1_active = false;
208 llt_ndlc_requeue_data_pending(ndlc);
209 llt_ndlc_send_queue(ndlc);
212 if (ndlc->t2_active && timer_pending(&ndlc->t2_timer) == 0) {
214 ndlc->t2_active = false;
215 ndlc->t1_active = false;
216 timer_delete_sync(&ndlc->t1_timer);
217 timer_delete_sync(&ndlc->t2_timer);
218 ndlc_close(ndlc);
219 ndlc->hard_fault = -EREMOTEIO;
223 void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb)
227 ndlc->hard_fault = -EREMOTEIO;
228 ndlc_close(ndlc);
231 skb_queue_tail(&ndlc->rcv_q, skb);
234 schedule_work(&ndlc->sm_work);
240 struct llt_ndlc *ndlc = timer_container_of(ndlc, t, t1_timer);
242 schedule_work(&ndlc->sm_work);
247 struct llt_ndlc *ndlc = timer_container_of(ndlc, t, t2_timer);
249 schedule_work(&ndlc->sm_work);
256 struct llt_ndlc *ndlc;
258 ndlc = devm_kzalloc(dev, sizeof(struct llt_ndlc), GFP_KERNEL);
259 if (!ndlc)
262 ndlc->ops = phy_ops;
263 ndlc->phy_id = phy_id;
264 ndlc->dev = dev;
265 ndlc->powered = 0;
267 *ndlc_id = ndlc;
270 timer_setup(&ndlc->t1_timer, ndlc_t1_timeout, 0);
271 timer_setup(&ndlc->t2_timer, ndlc_t2_timeout, 0);
273 skb_queue_head_init(&ndlc->rcv_q);
274 skb_queue_head_init(&ndlc->send_q);
275 skb_queue_head_init(&ndlc->ack_pending_q);
277 INIT_WORK(&ndlc->sm_work, llt_ndlc_sm_work);
279 return st_nci_probe(ndlc, phy_headroom, phy_tailroom, se_status);
283 void ndlc_remove(struct llt_ndlc *ndlc)
286 timer_delete_sync(&ndlc->t1_timer);
287 timer_delete_sync(&ndlc->t2_timer);
288 ndlc->t2_active = false;
289 ndlc->t1_active = false;
291 cancel_work_sync(&ndlc->sm_work);
293 st_nci_remove(ndlc->ndev);
295 skb_queue_purge(&ndlc->rcv_q);
296 skb_queue_purge(&ndlc->send_q);