Lines Matching +full:0 +full:xd

31 #define TBNET_LOCAL_PATH	0xf
45 #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
56 * supported then @frame_id is filled, otherwise it stays %0.
88 #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
147 * @xd: XDomain the service blongs to
178 struct tb_xdomain *xd; member
201 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
202 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
206 UUID_INIT(0x798f589e, 0x3616, 0x8a47,
207 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
235 struct tb_xdomain *xd = net->xd; in tbnet_login_response() local
237 memset(&reply, 0, sizeof(reply)); in tbnet_login_response()
238 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, in tbnet_login_response()
239 xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply), in tbnet_login_response()
244 return tb_xdomain_response(xd, &reply, sizeof(reply), in tbnet_login_response()
252 struct tb_xdomain *xd = net->xd; in tbnet_login_request() local
254 memset(&request, 0, sizeof(request)); in tbnet_login_request()
255 tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid, in tbnet_login_request()
256 xd->remote_uuid, TBIP_LOGIN, sizeof(request), in tbnet_login_request()
262 return tb_xdomain_request(xd, &request, sizeof(request), in tbnet_login_request()
272 struct tb_xdomain *xd = net->xd; in tbnet_logout_response() local
274 memset(&reply, 0, sizeof(reply)); in tbnet_logout_response()
275 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, in tbnet_logout_response()
276 xd->remote_uuid, TBIP_STATUS, sizeof(reply), in tbnet_logout_response()
278 return tb_xdomain_response(xd, &reply, sizeof(reply), in tbnet_logout_response()
286 struct tb_xdomain *xd = net->xd; in tbnet_logout_request() local
288 memset(&request, 0, sizeof(request)); in tbnet_logout_request()
289 tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid, in tbnet_logout_request()
290 xd->remote_uuid, TBIP_LOGOUT, sizeof(request), in tbnet_logout_request()
293 return tb_xdomain_request(xd, &request, sizeof(request), in tbnet_logout_request()
325 for (i = 0; i < TBNET_RING_SIZE; i++) { in tbnet_free_buffers()
337 order = 0; in tbnet_free_buffers()
353 ring->cons = 0; in tbnet_free_buffers()
354 ring->prod = 0; in tbnet_free_buffers()
369 while (send_logout && retries-- > 0) { in tbnet_tear_down()
380 if (tb_xdomain_disable_paths(net->xd)) in tbnet_tear_down()
384 net->login_retries = 0; in tbnet_tear_down()
396 int ret = 0; in tbnet_handle_packet()
402 return 0; in tbnet_handle_packet()
403 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) in tbnet_handle_packet()
404 return 0; in tbnet_handle_packet()
405 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) in tbnet_handle_packet()
406 return 0; in tbnet_handle_packet()
410 if (route != net->xd->route) in tbnet_handle_packet()
411 return 0; in tbnet_handle_packet()
435 net->login_retries = 0; in tbnet_handle_packet()
437 &net->login_work, 0); in tbnet_handle_packet()
452 return 0; in tbnet_handle_packet()
480 /* Allocate page (order > 0) so that it can hold maximum in tbnet_alloc_rx_buffers()
490 dma_addr = dma_map_page(dma_dev, tf->page, 0, in tbnet_alloc_rx_buffers()
505 return 0; in tbnet_alloc_rx_buffers()
525 tf->frame.size = 0; in tbnet_get_tx_buffer()
552 for (i = 0; i < TBNET_RING_SIZE; i++) { in tbnet_alloc_tx_buffers()
562 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, in tbnet_alloc_tx_buffers()
578 ring->cons = 0; in tbnet_alloc_tx_buffers()
581 return 0; in tbnet_alloc_tx_buffers()
603 ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH, in tbnet_connected_work()
652 net->login_retries = 0; in tbnet_login_work()
728 if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) { in tbnet_check_frame()
732 if (frame_index != 0) { in tbnet_check_frame()
745 unsigned int rx_packets = 0; in tbnet_poll()
763 cleaned_count = 0; in tbnet_poll()
851 struct tb_xdomain *xd = net->xd; in tbnet_open() local
857 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, in tbnet_open()
868 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, in tbnet_open()
882 return 0; in tbnet_open()
899 return 0; in tbnet_stop()
905 struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page); in tbnet_xmit_csum_and_map()
918 for (i = 0; i < frame_count; i++) { in tbnet_xmit_csum_and_map()
947 *ipcso = 0; in tbnet_xmit_csum_and_map()
959 ip_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
960 ip_hdr(skb)->protocol, 0); in tbnet_xmit_csum_and_map()
964 &ipv6_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
965 IPPROTO_TCP, 0); in tbnet_xmit_csum_and_map()
970 &ipv6_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
971 ipv6_hdr(skb)->nexthdr, 0); in tbnet_xmit_csum_and_map()
979 for (i = 0; i < frame_count; i++) { in tbnet_xmit_csum_and_map()
986 offset = 0; in tbnet_xmit_csum_and_map()
994 for (i = 0; i < frame_count; i++) { in tbnet_xmit_csum_and_map()
1021 unsigned int frag = 0; in tbnet_start_xmit()
1023 u32 frame_index = 0; in tbnet_start_xmit()
1075 } else if (unlikely(size_left > 0)) { in tbnet_start_xmit()
1078 } while (size_left > 0); in tbnet_start_xmit()
1111 } else if (unlikely(data_len > 0)) { in tbnet_start_xmit()
1124 for (i = 0; i < frame_index + 1; i++) in tbnet_start_xmit()
1176 const struct tb_xdomain *xd = net->xd; in tbnet_generate_mac() local
1180 phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); in tbnet_generate_mac()
1183 dev->dev_addr[0] = phy_port << 4 | 0x02; in tbnet_generate_mac()
1184 hash = jhash2((u32 *)xd->local_uuid, 4, 0); in tbnet_generate_mac()
1186 hash = jhash2((u32 *)xd->local_uuid, 4, hash); in tbnet_generate_mac()
1187 dev->dev_addr[5] = hash & 0xff; in tbnet_generate_mac()
1192 struct tb_xdomain *xd = tb_service_parent(svc); in tbnet_probe() local
1208 atomic_set(&net->command_id, 0); in tbnet_probe()
1209 atomic_set(&net->frame_id, 0); in tbnet_probe()
1212 net->xd = xd; in tbnet_probe()
1257 return 0; in tbnet_probe()
1286 return 0; in tbnet_suspend()
1302 return 0; in tbnet_resume()
1338 /* Currently only announce support for match frags ID (bit 1). Bit 0 in tbnet_init()