Home
last modified time | relevance | path

Searched full:tx (Results 1 – 25 of 200) sorted by relevance

12345678

/qemu/hw/i386/kvm/
H A Dtrace-events6 xenstore_error(unsigned int id, unsigned int tx_id, const char *err) "req %u tx %u err %s"
7 xenstore_read(unsigned int tx_id, const char *path) "tx %u path %s"
8 xenstore_write(unsigned int tx_id, const char *path) "tx %u path %s"
9 xenstore_mkdir(unsigned int tx_id, const char *path) "tx %u path %s"
10 xenstore_directory(unsigned int tx_id, const char *path) "tx %u path %s"
11 xenstore_directory_part(unsigned int tx_id, const char *path, unsigned int offset) "tx %u path %s o…
13 xenstore_transaction_end(unsigned int tx_id, bool commit) "tx %u commit %d"
14 xenstore_rm(unsigned int tx_id, const char *path) "tx %u path %s"
15 xenstore_get_perms(unsigned int tx_id, const char *path) "tx %u path %s"
16 xenstore_set_perms(unsigned int tx_id, const char *path) "tx %u path %s"
H A Dxenstore_impl.c79 XsTransaction *tx = value; in nobble_tx() local
81 if (tx->base_tx == *new_tx_id) { in nobble_tx()
83 tx->base_tx = XBT_NULL; in nobble_tx()
91 /* Find the next TX id which isn't either XBT_NULL or in use. */ in next_tx()
99 * is based on the (previous incarnation of the) newly-allocated TX id. in next_tx()
750 * the main tree was changed, bump its tx ID so that outstanding in xs_node_walk()
761 XsTransaction *tx = g_hash_table_lookup(op->s->transactions, in xs_node_walk() local
763 assert(tx); in xs_node_walk()
764 tx->nr_nodes = op->new_nr_nodes; in xs_node_walk()
856 XsTransaction *tx = g_hash_table_lookup(s->transactions, in init_walk_op() local
[all …]
/qemu/hw/net/
H A De1000_regs.h47 #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
49 #define E1000_TBT 0x00448 /* TX Burst Timer - RW */
90 #define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
92 #define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
94 #define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
96 #define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
98 #define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
100 #define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
102 #define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
103 #define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
[all …]
H A Digb_regs.h121 /* TX/RX descriptor defines */
186 #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
201 #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
202 #define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
203 #define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
204 #define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
205 #define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
208 #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
210 #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
287 #define E1000_TCTL_EN 0x00000002 /* enable tx */
[all …]
H A De1000x_regs.h141 #define E1000_TCTL 0x00400 /* TX Control - RW */
142 #define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
143 #define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
162 #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
164 #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
166 #define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
167 #define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
168 #define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
180 #define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
183 #define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
[all …]
H A Dsungem.c38 #define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */
39 #define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */
40 #define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */
44 #define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */
65 #define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */
69 /* TX DMA Registers */
72 #define TXDMA_KICK 0x0000UL /* TX Kick Register */
74 #define TXDMA_CFG 0x0004UL /* TX Configuration Register */
75 #define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */
76 #define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */
[all …]
H A De1000.c110 } tx; member
392 memset(&d->tx, 0, sizeof d->tx); in e1000_reset_hold()
575 unsigned int frames = s->tx.tso_frames, css, sofar; in xmit_seg()
576 struct e1000_tx *tp = &s->tx; in xmit_seg()
632 e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4); in xmit_seg()
634 e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4); in xmit_seg()
647 struct e1000_tx *tp = &s->tx; in process_tx_desc()
760 DBGOUT(TX, "tx disabled\n"); in start_xmit()
764 if (s->tx.busy) { in start_xmit()
767 s->tx.busy = true; in start_xmit()
[all …]
H A Dtrace-events7 allwinner_sun8i_emac_transmit(uint32_t desc, uint32_t paddr, uint32_t bytes) "TX packet: desc=0x%" …
44 open_eth_start_xmit(uint32_t addr, unsigned len, unsigned tx_len) "TX: 0x%08x, len: %u, tx_len: %u"
134 …todetect: %d, Speed: %d, Force speed: %d, Force duplex: %d, RX flow control %d, TX flow control %d"
135 …todetect: %d, Speed: %d, Force speed: %d, Force duplex: %d, RX flow control %d, TX flow control %d"
145 e1000e_wrn_no_ts_support(void) "WARNING: Guest requested TX timestamping which is not supported"
146 e1000e_wrn_no_snap_support(void) "WARNING: Guest requested TX SNAP header update which is not suppo…
151 e1000e_tx_disabled(void) "TX Disabled"
322 sungem_tx_checksum(uint16_t start, uint16_t off) "TX checksumming from byte %d, inserting at %d"
323 sungem_tx_checksum_oob(void) "TX checksum out of packet bounds"
324 sungem_tx_unfinished(void) "TX packet started without finishing the previous one"
[all …]
H A Digb_core.c407 igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx, in igb_rss_parse_packet() argument
412 if (tx || !igb_rss_enabled(core)) { in igb_rss_parse_packet()
438 igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx, in igb_tx_insert_vlan() argument
454 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan, in igb_tx_insert_vlan()
460 igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) in igb_setup_tx_offloads() argument
462 uint32_t idx = (tx->first_olinfo_status >> 4) & 1; in igb_setup_tx_offloads()
464 if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) { in igb_setup_tx_offloads()
465 uint32_t mss = tx->ctx[idx].mss_l4len_idx >> E1000_ADVTXD_MSS_SHIFT; in igb_setup_tx_offloads()
466 if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) { in igb_setup_tx_offloads()
470 net_tx_pkt_update_ip_checksums(tx->tx_pkt); in igb_setup_tx_offloads()
[all …]
H A Dnet_tx_pkt.h2 * QEMU TX packets abstraction
33 * Init function for tx packet functionality
36 * @max_frags: max tx ip fragments
41 * Clean all tx packet resources.
154 * reset tx packet private context (needed to be called between packets)
H A Dopencores_eth.c485 static void open_eth_start_xmit(OpenEthState *s, desc *tx) argument
489 unsigned len = GET_FIELD(tx->len_flags, TXD_LEN);
492 if ((tx->len_flags & TXD_PAD) &&
501 trace_open_eth_start_xmit(tx->buf_ptr, len, tx_len);
511 cpu_physical_memory_read(tx->buf_ptr, buf, len);
520 if (tx->len_flags & TXD_WR) {
528 tx->len_flags &= ~(TXD_RD | TXD_UR |
530 if (tx->len_flags & TXD_IRQ) {
538 desc *tx = tx_desc(s); local
540 (tx->len_flags & TXD_RD) &&
[all …]
H A De1000e_core.c603 e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx) in e1000e_setup_tx_offloads() argument
605 if (tx->props.tse && tx->cptse) { in e1000e_setup_tx_offloads()
606 if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss)) { in e1000e_setup_tx_offloads()
610 net_tx_pkt_update_ip_checksums(tx->tx_pkt); in e1000e_setup_tx_offloads()
615 if (tx->sum_needed & E1000_TXD_POPTS_TXSM) { in e1000e_setup_tx_offloads()
616 if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) { in e1000e_setup_tx_offloads()
621 if (tx->sum_needed & E1000_TXD_POPTS_IXSM) { in e1000e_setup_tx_offloads()
622 net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); in e1000e_setup_tx_offloads()
638 e1000e_tx_pkt_send(E1000ECore *core, struct e1000e_tx *tx, int queue_index) in e1000e_tx_pkt_send() argument
643 if (!e1000e_setup_tx_offloads(core, tx)) { in e1000e_tx_pkt_send()
[all …]
/qemu/tests/qtest/
H A Dtpm-tests.c32 void tpm_test_swtpm_test(const char *src_tpm_path, tx_func *tx, in tpm_test_swtpm_test() argument
59 tpm_util_startup(s, tx); in tpm_test_swtpm_test()
60 tpm_util_pcrextend(s, tx); in tpm_test_swtpm_test()
67 tpm_util_pcrread(s, tx, tpm_pcrread_resp, in tpm_test_swtpm_test()
79 const char *uri, tx_func *tx, in tpm_test_swtpm_migration_test() argument
105 tpm_util_startup(src_qemu, tx); in tpm_test_swtpm_migration_test()
106 tpm_util_pcrextend(src_qemu, tx); in tpm_test_swtpm_migration_test()
113 tpm_util_pcrread(src_qemu, tx, tpm_pcrread_resp, in tpm_test_swtpm_migration_test()
119 tpm_util_pcrread(dst_qemu, tx, tpm_pcrread_resp, in tpm_test_swtpm_migration_test()
H A Dtpm-util.h31 void tpm_util_startup(QTestState *s, tx_func *tx);
32 void tpm_util_pcrextend(QTestState *s, tx_func *tx);
33 void tpm_util_pcrread(QTestState *s, tx_func *tx,
H A Dahci-test.c858 unsigned char *tx = g_malloc(bufsize); in ahci_test_io_rw_simple() local
873 generate_pattern(tx, bufsize, AHCI_SECTOR_SIZE); in ahci_test_io_rw_simple()
874 qtest_bufwrite(ahci->parent->qts, ptr, tx, bufsize); in ahci_test_io_rw_simple()
883 g_assert_cmphex(memcmp(tx, rx, bufsize), ==, 0); in ahci_test_io_rw_simple()
886 g_free(tx); in ahci_test_io_rw_simple()
1027 unsigned char *tx = g_malloc(bufsize); in test_dma_fragmented() local
1036 generate_pattern(tx, bufsize, AHCI_SECTOR_SIZE); in test_dma_fragmented()
1041 qtest_bufwrite(ahci->parent->qts, ptr, tx, bufsize); in test_dma_fragmented()
1061 g_assert_cmphex(memcmp(tx, rx, bufsize), ==, 0); in test_dma_fragmented()
1066 g_free(tx); in test_dma_fragmented()
[all …]
H A Dtpm-util.c54 void tpm_util_startup(QTestState *s, tx_func *tx) in tpm_util_startup() argument
62 tx(s, tpm_startup, sizeof(tpm_startup), buffer, sizeof(buffer)); in tpm_util_startup()
68 void tpm_util_pcrextend(QTestState *s, tx_func *tx) in tpm_util_pcrextend() argument
82 tx(s, tpm_pcrextend, sizeof(tpm_pcrextend), buffer, sizeof(buffer)); in tpm_util_pcrextend()
88 void tpm_util_pcrread(QTestState *s, tx_func *tx, in tpm_util_pcrread() argument
96 tx(s, tpm_pcrread, sizeof(tpm_pcrread), buffer, sizeof(buffer)); in tpm_util_pcrread()
/qemu/hw/char/
H A Dbcm2835_aux.c14 * At present only the core UART functions (data path for tx/rx) are
54 * 2. the tx interrupt is enabled (since we instantly drain the tx fifo) in bcm2835_aux_update()
97 /* The spec is unclear on what happens when both tx and rx in bcm2835_aux_read()
100 * the tx fifo is always empty. */ in bcm2835_aux_read()
120 res = 0x60; /* tx idle, empty */ in bcm2835_aux_read()
135 return 0x3; /* tx, rx enabled */ in bcm2835_aux_read()
138 res = 0x30e; /* space in the output buffer, empty tx fifo, idle tx/rx */ in bcm2835_aux_read()
/qemu/net/
H A Daf-xdp.c37 struct xsk_ring_prod tx; member
134 if (!s->outstanding_tx || !xsk_ring_prod__needs_wakeup(&s->tx)) { in af_xdp_writable()
158 if (!s->n_pool || !xsk_ring_prod__reserve(&s->tx, 1, &idx)) { in af_xdp_receive()
160 * Out of buffers or space in tx ring. Poll until we can write. in af_xdp_receive()
161 * This will also kick the Tx, if it was waiting on CQ. in af_xdp_receive()
167 desc = xsk_ring_prod__tx_desc(&s->tx, idx); in af_xdp_receive()
174 xsk_ring_prod__submit(&s->tx, 1); in af_xdp_receive()
177 if (xsk_ring_prod__needs_wakeup(&s->tx)) { in af_xdp_receive()
199 /* Leave one packet for Tx, just in case. */ in af_xdp_fq_refill()
300 /* Number of descriptors if all 4 queues (rx, tx, cq, fq) are full. */ in af_xdp_umem_create()
[all …]
/qemu/ui/
H A Dvnc-enc-zrle.c.inc80 int tx, th;
84 for (tx = x; tx < x + w; tx += VNC_ZRLE_TILE_WIDTH) {
88 tw = MIN(VNC_ZRLE_TILE_WIDTH, x + w - tx);
90 buf = zrle_convert_fb(vs, tx, ty, tw, th, ZRLE_BPP);
/qemu/hw/ssi/
H A Dimx_spi.c164 uint32_t tx; in imx_spi_flush_txfifo() local
167 DPRINTF("Begin: TX Fifo Size = %d, RX Fifo Size = %d\n", in imx_spi_flush_txfifo()
183 tx = fifo32_pop(&s->tx_fifo); in imx_spi_flush_txfifo()
185 DPRINTF("data tx:0x%08x\n", tx); in imx_spi_flush_txfifo()
192 uint8_t byte = tx >> (tx_burst - 8); in imx_spi_flush_txfifo()
231 DPRINTF("End: TX Fifo Size = %d, RX Fifo Size = %d\n", in imx_spi_flush_txfifo()
307 "[%s]%s: Trying to read from TX FIFO\n", in imx_spi_read()
414 /* SMC bit is set and TX FIFO has some slots filled in */ in imx_spi_write()
H A Dpnv_spi.c196 uint32_t tx, rx, payload_len; in transfer() local
201 tx = 0; in transfer()
204 tx <<= 8; in transfer()
206 tx = (tx << 8) | fifo8_pop(&s->tx_fifo); in transfer()
211 rx = ssi_transfer(s->ssi_bus, tx); in transfer()
248 * If M != 0 the shift count is M bytes and M is the number of tx bytes. in calculate_N1()
250 * M is the shift count but tx and rx is determined by the count control in calculate_N1()
263 /* If tx count control for N1 is set, load the tx value */ in calculate_N1()
285 * indicate transmit then reset the tx count to 0 in calculate_N1()
351 * whether N1 is used for tx, rx or both. Loop over the size to build a in operation_shiftn1()
[all …]
/qemu/include/hw/audio/
H A Dvirtio-snd.h87 * In the case of TX (i.e. playback) buffers, we defer reading the raw PCM data
110 * In TX / Plaback, `offset` represents the first unused position inside
114 /* Used for the TX queue for lazy I/O copy from `elem` */
226 * Convenience queue to keep track of invalid tx/rx queue messages inside
227 * the tx/rx callbacks.
236 * This queue must be empty at all times except for inside the tx/rx
/qemu/tests/bench/
H A Datomic64-bench.c121 double tx; in pr_stats() local
127 tx = val / duration / 1e6; in pr_stats()
131 printf(" Throughput: %.2f Mops/s\n", tx); in pr_stats()
132 printf(" Throughput/thread: %.2f Mops/s/thread\n", tx / n_threads); in pr_stats()
H A Datomic_add-bench.c128 double tx; in pr_stats() local
133 tx = val / duration / 1e6; in pr_stats()
137 printf(" Throughput: %.2f Mops/s\n", tx); in pr_stats()
138 printf(" Throughput/thread: %.2f Mops/s/thread\n", tx / n_threads); in pr_stats()
/qemu/include/hw/char/
H A Dimx_serial.h39 #define USR1_TRDY (1<<13) /* Tx ready */
67 #define UCR1_TRDYEN (1<<13) /* Tx Ready Interrupt Enable */
69 #define UCR1_TXMPTYEN (1<<6) /* Tx Empty Interrupt Enable */
79 #define UCR4_TCEN BIT(3) /* TX complete interrupt enable */

12345678