Lines Matching full:queue
12 Abstract: rt2x00 queue specific routines.
25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local
26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb()
37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb()
95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb()
112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb()
490 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data()
495 * a queue corruption! in rt2x00queue_write_tx_data()
500 "Corrupt queue %d, accessing entry which is not ours\n" in rt2x00queue_write_tx_data()
502 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data()
531 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local
533 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor()
539 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); in rt2x00queue_write_tx_descriptor()
542 static void rt2x00queue_kick_tx_queue(struct data_queue *queue, in rt2x00queue_kick_tx_queue() argument
546 * Check if we need to kick the queue, there are however a few rules in rt2x00queue_kick_tx_queue()
552 * in the queue are less then a certain threshold. in rt2x00queue_kick_tx_queue()
554 if (rt2x00queue_threshold(queue) || in rt2x00queue_kick_tx_queue()
556 queue->rt2x00dev->ops->lib->kick_queue(queue); in rt2x00queue_kick_tx_queue()
561 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_bar_check()
601 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, in rt2x00queue_write_tx_frame() argument
616 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); in rt2x00queue_write_tx_frame()
641 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) in rt2x00queue_write_tx_frame()
655 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) in rt2x00queue_write_tx_frame()
657 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) in rt2x00queue_write_tx_frame()
663 spin_lock(&queue->tx_lock); in rt2x00queue_write_tx_frame()
665 if (unlikely(rt2x00queue_full(queue))) { in rt2x00queue_write_tx_frame()
666 rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", in rt2x00queue_write_tx_frame()
667 queue->qid); in rt2x00queue_write_tx_frame()
672 entry = rt2x00queue_get_entry(queue, Q_INDEX); in rt2x00queue_write_tx_frame()
676 rt2x00_err(queue->rt2x00dev, in rt2x00queue_write_tx_frame()
677 "Arrived at non-free entry in the non-full queue %d\n" in rt2x00queue_write_tx_frame()
679 queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_frame()
687 * It could be possible that the queue was corrupted and this in rt2x00queue_write_tx_frame()
707 rt2x00queue_kick_tx_queue(queue, &txdesc); in rt2x00queue_write_tx_frame()
711 * Pausing queue has to be serialized with rt2x00lib_txdone(), so we in rt2x00queue_write_tx_frame()
712 * do this under queue->tx_lock. Bottom halve was already disabled in rt2x00queue_write_tx_frame()
715 if (rt2x00queue_threshold(queue)) in rt2x00queue_write_tx_frame()
716 rt2x00queue_pause_queue(queue); in rt2x00queue_write_tx_frame()
718 spin_unlock(&queue->tx_lock); in rt2x00queue_write_tx_frame()
737 * since the beacon queue will get stopped anyway). in rt2x00queue_clear_beacon()
786 bool rt2x00queue_for_each_entry(struct data_queue *queue, in rt2x00queue_for_each_entry() argument
799 rt2x00_err(queue->rt2x00dev, in rt2x00queue_for_each_entry()
811 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00queue_for_each_entry()
812 index_start = queue->index[start]; in rt2x00queue_for_each_entry()
813 index_end = queue->index[end]; in rt2x00queue_for_each_entry()
814 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00queue_for_each_entry()
822 if (fn(&queue->entries[i], data)) in rt2x00queue_for_each_entry()
826 for (i = index_start; i < queue->limit; i++) { in rt2x00queue_for_each_entry()
827 if (fn(&queue->entries[i], data)) in rt2x00queue_for_each_entry()
832 if (fn(&queue->entries[i], data)) in rt2x00queue_for_each_entry()
841 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, in rt2x00queue_get_entry() argument
848 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", in rt2x00queue_get_entry()
853 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00queue_get_entry()
855 entry = &queue->entries[queue->index[index]]; in rt2x00queue_get_entry()
857 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00queue_get_entry()
865 struct data_queue *queue = entry->queue; in rt2x00queue_index_inc() local
869 rt2x00_err(queue->rt2x00dev, in rt2x00queue_index_inc()
874 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00queue_index_inc()
876 queue->index[index]++; in rt2x00queue_index_inc()
877 if (queue->index[index] >= queue->limit) in rt2x00queue_index_inc()
878 queue->index[index] = 0; in rt2x00queue_index_inc()
883 queue->length++; in rt2x00queue_index_inc()
885 queue->length--; in rt2x00queue_index_inc()
886 queue->count++; in rt2x00queue_index_inc()
889 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00queue_index_inc()
892 static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) in rt2x00queue_pause_queue_nocheck() argument
894 switch (queue->qid) { in rt2x00queue_pause_queue_nocheck()
900 * For TX queues, we have to disable the queue in rt2x00queue_pause_queue_nocheck()
903 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); in rt2x00queue_pause_queue_nocheck()
909 void rt2x00queue_pause_queue(struct data_queue *queue) in rt2x00queue_pause_queue() argument
911 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || in rt2x00queue_pause_queue()
912 !test_bit(QUEUE_STARTED, &queue->flags) || in rt2x00queue_pause_queue()
913 test_and_set_bit(QUEUE_PAUSED, &queue->flags)) in rt2x00queue_pause_queue()
916 rt2x00queue_pause_queue_nocheck(queue); in rt2x00queue_pause_queue()
920 void rt2x00queue_unpause_queue(struct data_queue *queue) in rt2x00queue_unpause_queue() argument
922 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || in rt2x00queue_unpause_queue()
923 !test_bit(QUEUE_STARTED, &queue->flags) || in rt2x00queue_unpause_queue()
924 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) in rt2x00queue_unpause_queue()
927 switch (queue->qid) { in rt2x00queue_unpause_queue()
933 * For TX queues, we have to enable the queue in rt2x00queue_unpause_queue()
936 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); in rt2x00queue_unpause_queue()
940 * For RX we need to kick the queue now in order to in rt2x00queue_unpause_queue()
943 queue->rt2x00dev->ops->lib->kick_queue(queue); in rt2x00queue_unpause_queue()
950 void rt2x00queue_start_queue(struct data_queue *queue) in rt2x00queue_start_queue() argument
952 mutex_lock(&queue->status_lock); in rt2x00queue_start_queue()
954 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || in rt2x00queue_start_queue()
955 test_and_set_bit(QUEUE_STARTED, &queue->flags)) { in rt2x00queue_start_queue()
956 mutex_unlock(&queue->status_lock); in rt2x00queue_start_queue()
960 set_bit(QUEUE_PAUSED, &queue->flags); in rt2x00queue_start_queue()
962 queue->rt2x00dev->ops->lib->start_queue(queue); in rt2x00queue_start_queue()
964 rt2x00queue_unpause_queue(queue); in rt2x00queue_start_queue()
966 mutex_unlock(&queue->status_lock); in rt2x00queue_start_queue()
970 void rt2x00queue_stop_queue(struct data_queue *queue) in rt2x00queue_stop_queue() argument
972 mutex_lock(&queue->status_lock); in rt2x00queue_stop_queue()
974 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { in rt2x00queue_stop_queue()
975 mutex_unlock(&queue->status_lock); in rt2x00queue_stop_queue()
979 rt2x00queue_pause_queue_nocheck(queue); in rt2x00queue_stop_queue()
981 queue->rt2x00dev->ops->lib->stop_queue(queue); in rt2x00queue_stop_queue()
983 mutex_unlock(&queue->status_lock); in rt2x00queue_stop_queue()
987 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) in rt2x00queue_flush_queue() argument
990 (queue->qid == QID_AC_VO) || in rt2x00queue_flush_queue()
991 (queue->qid == QID_AC_VI) || in rt2x00queue_flush_queue()
992 (queue->qid == QID_AC_BE) || in rt2x00queue_flush_queue()
993 (queue->qid == QID_AC_BK); in rt2x00queue_flush_queue()
995 if (rt2x00queue_empty(queue)) in rt2x00queue_flush_queue()
1001 * to the queue to make sure the hardware will in rt2x00queue_flush_queue()
1005 queue->rt2x00dev->ops->lib->kick_queue(queue); in rt2x00queue_flush_queue()
1010 * alternative which just waits for the queue to become empty. in rt2x00queue_flush_queue()
1012 if (likely(queue->rt2x00dev->ops->lib->flush_queue)) in rt2x00queue_flush_queue()
1013 queue->rt2x00dev->ops->lib->flush_queue(queue, drop); in rt2x00queue_flush_queue()
1016 * The queue flush has failed... in rt2x00queue_flush_queue()
1018 if (unlikely(!rt2x00queue_empty(queue))) in rt2x00queue_flush_queue()
1019 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", in rt2x00queue_flush_queue()
1020 queue->qid); in rt2x00queue_flush_queue()
1026 struct data_queue *queue; in rt2x00queue_start_queues() local
1030 * for each queue after is has been properly initialized. in rt2x00queue_start_queues()
1032 tx_queue_for_each(rt2x00dev, queue) in rt2x00queue_start_queues()
1033 rt2x00queue_start_queue(queue); in rt2x00queue_start_queues()
1041 struct data_queue *queue; in rt2x00queue_stop_queues() local
1051 tx_queue_for_each(rt2x00dev, queue) in rt2x00queue_stop_queues()
1052 rt2x00queue_stop_queue(queue); in rt2x00queue_stop_queues()
1060 struct data_queue *queue; in rt2x00queue_flush_queues() local
1062 tx_queue_for_each(rt2x00dev, queue) in rt2x00queue_flush_queues()
1063 rt2x00queue_flush_queue(queue, drop); in rt2x00queue_flush_queues()
1069 static void rt2x00queue_reset(struct data_queue *queue) in rt2x00queue_reset() argument
1074 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00queue_reset()
1076 queue->count = 0; in rt2x00queue_reset()
1077 queue->length = 0; in rt2x00queue_reset()
1080 queue->index[i] = 0; in rt2x00queue_reset()
1082 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00queue_reset()
1087 struct data_queue *queue; in rt2x00queue_init_queues() local
1090 queue_for_each(rt2x00dev, queue) { in rt2x00queue_init_queues()
1091 rt2x00queue_reset(queue); in rt2x00queue_init_queues()
1093 for (i = 0; i < queue->limit; i++) in rt2x00queue_init_queues()
1094 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); in rt2x00queue_init_queues()
1098 static int rt2x00queue_alloc_entries(struct data_queue *queue) in rt2x00queue_alloc_entries() argument
1104 rt2x00queue_reset(queue); in rt2x00queue_alloc_entries()
1107 * Allocate all queue entries. in rt2x00queue_alloc_entries()
1109 entry_size = sizeof(*entries) + queue->priv_size; in rt2x00queue_alloc_entries()
1110 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); in rt2x00queue_alloc_entries()
1118 for (i = 0; i < queue->limit; i++) { in rt2x00queue_alloc_entries()
1120 entries[i].queue = queue; in rt2x00queue_alloc_entries()
1124 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, in rt2x00queue_alloc_entries()
1125 sizeof(*entries), queue->priv_size); in rt2x00queue_alloc_entries()
1130 queue->entries = entries; in rt2x00queue_alloc_entries()
1135 static void rt2x00queue_free_skbs(struct data_queue *queue) in rt2x00queue_free_skbs() argument
1139 if (!queue->entries) in rt2x00queue_free_skbs()
1142 for (i = 0; i < queue->limit; i++) { in rt2x00queue_free_skbs()
1143 rt2x00queue_free_skb(&queue->entries[i]); in rt2x00queue_free_skbs()
1147 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) in rt2x00queue_alloc_rxskbs() argument
1152 for (i = 0; i < queue->limit; i++) { in rt2x00queue_alloc_rxskbs()
1153 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); in rt2x00queue_alloc_rxskbs()
1156 queue->entries[i].skb = skb; in rt2x00queue_alloc_rxskbs()
1164 struct data_queue *queue; in rt2x00queue_initialize() local
1171 tx_queue_for_each(rt2x00dev, queue) { in rt2x00queue_initialize()
1172 status = rt2x00queue_alloc_entries(queue); in rt2x00queue_initialize()
1194 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n"); in rt2x00queue_initialize()
1203 struct data_queue *queue; in rt2x00queue_uninitialize() local
1207 queue_for_each(rt2x00dev, queue) { in rt2x00queue_uninitialize()
1208 kfree(queue->entries); in rt2x00queue_uninitialize()
1209 queue->entries = NULL; in rt2x00queue_uninitialize()
1214 struct data_queue *queue, enum data_queue_qid qid) in rt2x00queue_init() argument
1216 mutex_init(&queue->status_lock); in rt2x00queue_init()
1217 spin_lock_init(&queue->tx_lock); in rt2x00queue_init()
1218 spin_lock_init(&queue->index_lock); in rt2x00queue_init()
1220 queue->rt2x00dev = rt2x00dev; in rt2x00queue_init()
1221 queue->qid = qid; in rt2x00queue_init()
1222 queue->txop = 0; in rt2x00queue_init()
1223 queue->aifs = 2; in rt2x00queue_init()
1224 queue->cw_min = 5; in rt2x00queue_init()
1225 queue->cw_max = 10; in rt2x00queue_init()
1227 rt2x00dev->ops->queue_init(queue); in rt2x00queue_init()
1229 queue->threshold = DIV_ROUND_UP(queue->limit, 10); in rt2x00queue_init()
1234 struct data_queue *queue; in rt2x00queue_allocate() local
1248 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); in rt2x00queue_allocate()
1249 if (!queue) in rt2x00queue_allocate()
1255 rt2x00dev->rx = queue; in rt2x00queue_allocate()
1256 rt2x00dev->tx = &queue[1]; in rt2x00queue_allocate()
1257 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; in rt2x00queue_allocate()
1258 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; in rt2x00queue_allocate()
1261 * Initialize queue parameters. in rt2x00queue_allocate()
1272 tx_queue_for_each(rt2x00dev, queue) in rt2x00queue_allocate()
1273 rt2x00queue_init(rt2x00dev, queue, qid++); in rt2x00queue_allocate()