Lines Matching refs:hw

12  *  @hw: pointer to the hardware structure
14 static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
18 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
20 (hw->aq.num_asq_entries *
26 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
27 (hw->aq.num_asq_entries *
30 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
39 * @hw: pointer to the hardware structure
41 static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
45 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
47 (hw->aq.num_arq_entries *
56 * @hw: pointer to the hardware structure
61 static void iavf_free_adminq_asq(struct iavf_hw *hw)
63 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
68 * @hw: pointer to the hardware structure
73 static void iavf_free_adminq_arq(struct iavf_hw *hw)
75 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
80 * @hw: pointer to the hardware structure
82 static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
94 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
95 (hw->aq.num_arq_entries *
99 hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
102 for (i = 0; i < hw->aq.num_arq_entries; i++) {
103 bi = &hw->aq.arq.r.arq_bi[i];
104 ret_code = iavf_allocate_dma_mem(hw, bi,
106 hw->aq.arq_buf_size,
112 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
115 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
140 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
141 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
148 * @hw: pointer to the hardware structure
150 static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
157 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
158 (hw->aq.num_asq_entries *
162 hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
165 for (i = 0; i < hw->aq.num_asq_entries; i++) {
166 bi = &hw->aq.asq.r.asq_bi[i];
167 ret_code = iavf_allocate_dma_mem(hw, bi,
169 hw->aq.asq_buf_size,
181 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
182 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
189 * @hw: pointer to the hardware structure
191 static void iavf_free_arq_bufs(struct iavf_hw *hw)
196 for (i = 0; i < hw->aq.num_arq_entries; i++)
197 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
200 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
203 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
208 * @hw: pointer to the hardware structure
210 static void iavf_free_asq_bufs(struct iavf_hw *hw)
215 for (i = 0; i < hw->aq.num_asq_entries; i++)
216 if (hw->aq.asq.r.asq_bi[i].pa)
217 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
220 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
223 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
226 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
231 * @hw: pointer to the hardware structure
235 static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
241 wr32(hw, IAVF_VF_ATQH1, 0);
242 wr32(hw, IAVF_VF_ATQT1, 0);
245 wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries |
247 wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa));
248 wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa));
251 reg = rd32(hw, IAVF_VF_ATQBAL1);
252 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
260 * @hw: pointer to the hardware structure
264 static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
270 wr32(hw, IAVF_VF_ARQH1, 0);
271 wr32(hw, IAVF_VF_ARQT1, 0);
274 wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries |
276 wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa));
277 wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa));
280 wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1);
283 reg = rd32(hw, IAVF_VF_ARQBAL1);
284 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
292 * @hw: pointer to the hardware structure
296 * in the hw->aq structure:
297 * - hw->aq.num_asq_entries
298 * - hw->aq.arq_buf_size
303 static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
308 if (hw->aq.asq.count > 0) {
315 if ((hw->aq.num_asq_entries == 0) ||
316 (hw->aq.asq_buf_size == 0)) {
321 hw->aq.asq.next_to_use = 0;
322 hw->aq.asq.next_to_clean = 0;
325 ret_code = iavf_alloc_adminq_asq_ring(hw);
330 ret_code = iavf_alloc_asq_bufs(hw);
335 ret_code = iavf_config_asq_regs(hw);
340 hw->aq.asq.count = hw->aq.num_asq_entries;
344 for (i = 0; i < hw->aq.num_asq_entries; i++)
345 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
346 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
349 iavf_free_adminq_asq(hw);
357 * @hw: pointer to the hardware structure
361 * in the hw->aq structure:
362 * - hw->aq.num_asq_entries
363 * - hw->aq.arq_buf_size
368 static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
373 if (hw->aq.arq.count > 0) {
380 if ((hw->aq.num_arq_entries == 0) ||
381 (hw->aq.arq_buf_size == 0)) {
386 hw->aq.arq.next_to_use = 0;
387 hw->aq.arq.next_to_clean = 0;
390 ret_code = iavf_alloc_adminq_arq_ring(hw);
395 ret_code = iavf_alloc_arq_bufs(hw);
400 ret_code = iavf_config_arq_regs(hw);
405 hw->aq.arq.count = hw->aq.num_arq_entries;
409 for (i = 0; i < hw->aq.num_arq_entries; i++)
410 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
411 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
413 iavf_free_adminq_arq(hw);
421 * @hw: pointer to the hardware structure
425 static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
429 mutex_lock(&hw->aq.asq_mutex);
431 if (hw->aq.asq.count == 0) {
437 wr32(hw, IAVF_VF_ATQH1, 0);
438 wr32(hw, IAVF_VF_ATQT1, 0);
439 wr32(hw, IAVF_VF_ATQLEN1, 0);
440 wr32(hw, IAVF_VF_ATQBAL1, 0);
441 wr32(hw, IAVF_VF_ATQBAH1, 0);
443 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
446 iavf_free_asq_bufs(hw);
449 mutex_unlock(&hw->aq.asq_mutex);
455 * @hw: pointer to the hardware structure
459 static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
463 mutex_lock(&hw->aq.arq_mutex);
465 if (hw->aq.arq.count == 0) {
471 wr32(hw, IAVF_VF_ARQH1, 0);
472 wr32(hw, IAVF_VF_ARQT1, 0);
473 wr32(hw, IAVF_VF_ARQLEN1, 0);
474 wr32(hw, IAVF_VF_ARQBAL1, 0);
475 wr32(hw, IAVF_VF_ARQBAH1, 0);
477 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
480 iavf_free_arq_bufs(hw);
483 mutex_unlock(&hw->aq.arq_mutex);
489 * @hw: pointer to the hardware structure
492 * in the hw->aq structure:
493 * - hw->aq.num_asq_entries
494 * - hw->aq.num_arq_entries
495 * - hw->aq.arq_buf_size
496 * - hw->aq.asq_buf_size
498 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
503 if ((hw->aq.num_arq_entries == 0) ||
504 (hw->aq.num_asq_entries == 0) ||
505 (hw->aq.arq_buf_size == 0) ||
506 (hw->aq.asq_buf_size == 0)) {
512 hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
515 ret_code = iavf_init_asq(hw);
520 ret_code = iavf_init_arq(hw);
528 iavf_shutdown_asq(hw);
537 * @hw: pointer to the hardware structure
539 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
541 if (iavf_check_asq_alive(hw))
542 iavf_aq_queue_shutdown(hw, true);
544 iavf_shutdown_asq(hw);
545 iavf_shutdown_arq(hw);
552 * @hw: pointer to the hardware structure
556 static u16 iavf_clean_asq(struct iavf_hw *hw)
558 struct iavf_adminq_ring *asq = &hw->aq.asq;
566 while (rd32(hw, IAVF_VF_ATQH1) != ntc) {
567 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
568 "ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1));
574 cb_func(hw, &desc_cb);
593 * @hw: pointer to the hw struct
598 bool iavf_asq_done(struct iavf_hw *hw)
603 return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use;
608 * @hw: pointer to the hw struct
617 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
631 mutex_lock(&hw->aq.asq_mutex);
633 if (hw->aq.asq.count == 0) {
634 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
640 hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
642 val = rd32(hw, IAVF_VF_ATQH1);
643 if (val >= hw->aq.num_asq_entries) {
644 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
650 details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
672 if (buff_size > hw->aq.asq_buf_size) {
673 iavf_debug(hw,
682 iavf_debug(hw,
696 if (iavf_clean_asq(hw) == 0) {
697 iavf_debug(hw,
705 desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
712 dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
727 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
728 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
730 (hw->aq.asq.next_to_use)++;
731 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
732 hw->aq.asq.next_to_use = 0;
734 wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use);
746 if (iavf_asq_done(hw))
750 } while (total_delay < hw->aq.asq_cmd_timeout);
754 if (iavf_asq_done(hw)) {
760 iavf_debug(hw,
775 hw->aq.asq_last_status = (enum libie_aq_err)retval;
778 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
780 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
789 if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
790 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
794 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
801 mutex_unlock(&hw->aq.asq_mutex);
822 * @hw: pointer to the hw struct
830 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
834 u16 ntc = hw->aq.arq.next_to_clean;
847 mutex_lock(&hw->aq.arq_mutex);
849 if (hw->aq.arq.count == 0) {
850 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
857 ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK;
865 desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
868 hw->aq.arq_last_status =
873 iavf_debug(hw,
876 hw->aq.arq_last_status);
883 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
886 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
887 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
888 hw->aq.arq_buf_size);
894 bi = &hw->aq.arq.r.arq_bi[ntc];
898 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
905 wr32(hw, IAVF_VF_ARQT1, ntc);
908 if (ntc == hw->aq.num_arq_entries)
910 hw->aq.arq.next_to_clean = ntc;
911 hw->aq.arq.next_to_use = ntu;
916 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
919 mutex_unlock(&hw->aq.arq_mutex);