1b72a32daSRahul Lakkireddy /* 2b72a32daSRahul Lakkireddy * This file is part of the Chelsio T4 Ethernet driver for Linux. 3b72a32daSRahul Lakkireddy * 4b72a32daSRahul Lakkireddy * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. 5b72a32daSRahul Lakkireddy * 6b72a32daSRahul Lakkireddy * This software is available to you under a choice of one of two 7b72a32daSRahul Lakkireddy * licenses. You may choose to be licensed under the terms of the GNU 8b72a32daSRahul Lakkireddy * General Public License (GPL) Version 2, available from the file 9b72a32daSRahul Lakkireddy * COPYING in the main directory of this source tree, or the 10b72a32daSRahul Lakkireddy * OpenIB.org BSD license below: 11b72a32daSRahul Lakkireddy * 12b72a32daSRahul Lakkireddy * Redistribution and use in source and binary forms, with or 13b72a32daSRahul Lakkireddy * without modification, are permitted provided that the following 14b72a32daSRahul Lakkireddy * conditions are met: 15b72a32daSRahul Lakkireddy * 16b72a32daSRahul Lakkireddy * - Redistributions of source code must retain the above 17b72a32daSRahul Lakkireddy * copyright notice, this list of conditions and the following 18b72a32daSRahul Lakkireddy * disclaimer. 19b72a32daSRahul Lakkireddy * 20b72a32daSRahul Lakkireddy * - Redistributions in binary form must reproduce the above 21b72a32daSRahul Lakkireddy * copyright notice, this list of conditions and the following 22b72a32daSRahul Lakkireddy * disclaimer in the documentation and/or other materials 23b72a32daSRahul Lakkireddy * provided with the distribution. 24b72a32daSRahul Lakkireddy * 25b72a32daSRahul Lakkireddy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26b72a32daSRahul Lakkireddy * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27b72a32daSRahul Lakkireddy * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28b72a32daSRahul Lakkireddy * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29b72a32daSRahul Lakkireddy * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30b72a32daSRahul Lakkireddy * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31b72a32daSRahul Lakkireddy * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32b72a32daSRahul Lakkireddy * SOFTWARE. 33b72a32daSRahul Lakkireddy */ 34b72a32daSRahul Lakkireddy 35b72a32daSRahul Lakkireddy #include <linux/module.h> 36b72a32daSRahul Lakkireddy #include <linux/netdevice.h> 37b72a32daSRahul Lakkireddy 38b72a32daSRahul Lakkireddy #include "cxgb4.h" 39b72a32daSRahul Lakkireddy #include "sched.h" 40b72a32daSRahul Lakkireddy 41b72a32daSRahul Lakkireddy static int t4_sched_class_fw_cmd(struct port_info *pi, 42b72a32daSRahul Lakkireddy struct ch_sched_params *p, 43b72a32daSRahul Lakkireddy enum sched_fw_ops op) 44b72a32daSRahul Lakkireddy { 45b72a32daSRahul Lakkireddy struct adapter *adap = pi->adapter; 46b72a32daSRahul Lakkireddy struct sched_table *s = pi->sched_tbl; 47b72a32daSRahul Lakkireddy struct sched_class *e; 48b72a32daSRahul Lakkireddy int err = 0; 49b72a32daSRahul Lakkireddy 50b72a32daSRahul Lakkireddy e = &s->tab[p->u.params.class]; 51b72a32daSRahul Lakkireddy switch (op) { 52b72a32daSRahul Lakkireddy case SCHED_FW_OP_ADD: 534ec4762dSRahul Lakkireddy case SCHED_FW_OP_DEL: 54b72a32daSRahul Lakkireddy err = t4_sched_params(adap, p->type, 55b72a32daSRahul Lakkireddy p->u.params.level, p->u.params.mode, 56b72a32daSRahul Lakkireddy p->u.params.rateunit, 57b72a32daSRahul Lakkireddy p->u.params.ratemode, 58b72a32daSRahul Lakkireddy p->u.params.channel, e->idx, 59b72a32daSRahul Lakkireddy p->u.params.minrate, p->u.params.maxrate, 604bccfc03SRahul Lakkireddy p->u.params.weight, p->u.params.pktsize, 614bccfc03SRahul Lakkireddy p->u.params.burstsize); 62b72a32daSRahul Lakkireddy break; 63b72a32daSRahul Lakkireddy default: 64b72a32daSRahul Lakkireddy err = -ENOTSUPP; 65b72a32daSRahul Lakkireddy break; 66b72a32daSRahul Lakkireddy } 67b72a32daSRahul Lakkireddy 68b72a32daSRahul Lakkireddy return err; 69b72a32daSRahul Lakkireddy } 70b72a32daSRahul Lakkireddy 716cede1f1SRahul Lakkireddy static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, 726cede1f1SRahul Lakkireddy enum sched_bind_type type, bool bind) 736cede1f1SRahul Lakkireddy { 746cede1f1SRahul Lakkireddy struct adapter *adap = pi->adapter; 756cede1f1SRahul Lakkireddy u32 fw_mnem, fw_class, fw_param; 766cede1f1SRahul Lakkireddy unsigned int pf = adap->pf; 776cede1f1SRahul Lakkireddy unsigned int vf = 0; 786cede1f1SRahul Lakkireddy int err = 0; 796cede1f1SRahul Lakkireddy 806cede1f1SRahul Lakkireddy switch (type) { 816cede1f1SRahul Lakkireddy case SCHED_QUEUE: { 826cede1f1SRahul Lakkireddy struct sched_queue_entry *qe; 836cede1f1SRahul Lakkireddy 846cede1f1SRahul Lakkireddy qe = (struct sched_queue_entry *)arg; 856cede1f1SRahul Lakkireddy 866cede1f1SRahul Lakkireddy /* Create a template for the FW_PARAMS_CMD mnemonic and 876cede1f1SRahul Lakkireddy * value (TX Scheduling Class in this case). 886cede1f1SRahul Lakkireddy */ 896cede1f1SRahul Lakkireddy fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 906cede1f1SRahul Lakkireddy FW_PARAMS_PARAM_X_V( 916cede1f1SRahul Lakkireddy FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 926cede1f1SRahul Lakkireddy fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE; 936cede1f1SRahul Lakkireddy fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id)); 946cede1f1SRahul Lakkireddy 956cede1f1SRahul Lakkireddy pf = adap->pf; 966cede1f1SRahul Lakkireddy vf = 0; 970e395b3cSRahul Lakkireddy 980e395b3cSRahul Lakkireddy err = t4_set_params(adap, adap->mbox, pf, vf, 1, 990e395b3cSRahul Lakkireddy &fw_param, &fw_class); 1000e395b3cSRahul Lakkireddy break; 1010e395b3cSRahul Lakkireddy } 1020e395b3cSRahul Lakkireddy case SCHED_FLOWC: { 1030e395b3cSRahul Lakkireddy struct sched_flowc_entry *fe; 1040e395b3cSRahul Lakkireddy 1050e395b3cSRahul Lakkireddy fe = (struct sched_flowc_entry *)arg; 1060e395b3cSRahul Lakkireddy 1070e395b3cSRahul Lakkireddy fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE; 1080e395b3cSRahul Lakkireddy err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id], 1090e395b3cSRahul Lakkireddy fe->param.tid, fw_class); 1106cede1f1SRahul Lakkireddy break; 1116cede1f1SRahul Lakkireddy } 1126cede1f1SRahul Lakkireddy default: 1136cede1f1SRahul Lakkireddy err = -ENOTSUPP; 1140e395b3cSRahul Lakkireddy break; 1156cede1f1SRahul Lakkireddy } 1166cede1f1SRahul Lakkireddy 1176cede1f1SRahul Lakkireddy return err; 1186cede1f1SRahul Lakkireddy } 1196cede1f1SRahul Lakkireddy 1200e395b3cSRahul Lakkireddy static void *t4_sched_entry_lookup(struct port_info *pi, 1210e395b3cSRahul Lakkireddy enum sched_bind_type type, 1220e395b3cSRahul Lakkireddy const u32 val) 1236cede1f1SRahul Lakkireddy { 1246cede1f1SRahul Lakkireddy struct sched_table *s = pi->sched_tbl; 1256cede1f1SRahul Lakkireddy struct sched_class *e, *end; 1260e395b3cSRahul Lakkireddy void *found = NULL; 1276cede1f1SRahul Lakkireddy 1280e395b3cSRahul Lakkireddy /* Look for an entry with matching @val */ 1296cede1f1SRahul Lakkireddy end = &s->tab[s->sched_size]; 1306cede1f1SRahul Lakkireddy for (e = &s->tab[0]; e != end; ++e) { 1310e395b3cSRahul Lakkireddy if (e->state == SCHED_STATE_UNUSED || 1320e395b3cSRahul Lakkireddy e->bind_type != type) 1336cede1f1SRahul Lakkireddy continue; 1346cede1f1SRahul Lakkireddy 1350e395b3cSRahul Lakkireddy switch (type) { 1360e395b3cSRahul Lakkireddy case SCHED_QUEUE: { 1370e395b3cSRahul Lakkireddy struct sched_queue_entry *qe; 1380e395b3cSRahul Lakkireddy 1390e395b3cSRahul Lakkireddy list_for_each_entry(qe, &e->entry_list, list) { 1400e395b3cSRahul Lakkireddy if (qe->cntxt_id == val) { 1410e395b3cSRahul Lakkireddy found = qe; 1426cede1f1SRahul Lakkireddy break; 1436cede1f1SRahul Lakkireddy } 1440e395b3cSRahul Lakkireddy } 1450e395b3cSRahul Lakkireddy break; 1460e395b3cSRahul Lakkireddy } 1470e395b3cSRahul Lakkireddy case SCHED_FLOWC: { 1480e395b3cSRahul Lakkireddy struct sched_flowc_entry *fe; 1490e395b3cSRahul Lakkireddy 1500e395b3cSRahul Lakkireddy list_for_each_entry(fe, &e->entry_list, list) { 1510e395b3cSRahul Lakkireddy if (fe->param.tid == val) { 1520e395b3cSRahul Lakkireddy found = fe; 1530e395b3cSRahul Lakkireddy break; 1540e395b3cSRahul Lakkireddy } 1550e395b3cSRahul Lakkireddy } 1560e395b3cSRahul Lakkireddy break; 1570e395b3cSRahul Lakkireddy } 1580e395b3cSRahul Lakkireddy default: 1590e395b3cSRahul Lakkireddy return NULL; 1606cede1f1SRahul Lakkireddy } 1616cede1f1SRahul Lakkireddy 1626cede1f1SRahul Lakkireddy if (found) 1636cede1f1SRahul Lakkireddy break; 1646cede1f1SRahul Lakkireddy } 1656cede1f1SRahul Lakkireddy 1666cede1f1SRahul Lakkireddy return found; 1676cede1f1SRahul Lakkireddy } 1686cede1f1SRahul Lakkireddy 169c856e2b6SRahul Lakkireddy struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, 170c856e2b6SRahul Lakkireddy struct ch_sched_queue *p) 171c856e2b6SRahul Lakkireddy { 172c856e2b6SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 173c856e2b6SRahul Lakkireddy struct sched_queue_entry *qe = NULL; 174c856e2b6SRahul Lakkireddy struct adapter *adap = pi->adapter; 175c856e2b6SRahul Lakkireddy struct sge_eth_txq *txq; 176c856e2b6SRahul Lakkireddy 177c856e2b6SRahul Lakkireddy if (p->queue < 0 || p->queue >= pi->nqsets) 178c856e2b6SRahul Lakkireddy return NULL; 179c856e2b6SRahul Lakkireddy 180c856e2b6SRahul Lakkireddy txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; 181c856e2b6SRahul Lakkireddy qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); 182c856e2b6SRahul Lakkireddy return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL; 183c856e2b6SRahul Lakkireddy } 184c856e2b6SRahul Lakkireddy 1856cede1f1SRahul Lakkireddy static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) 1866cede1f1SRahul Lakkireddy { 1876cede1f1SRahul Lakkireddy struct sched_queue_entry *qe = NULL; 1880e395b3cSRahul Lakkireddy struct adapter *adap = pi->adapter; 1896cede1f1SRahul Lakkireddy struct sge_eth_txq *txq; 1900e395b3cSRahul Lakkireddy struct sched_class *e; 1916cede1f1SRahul Lakkireddy int err = 0; 1926cede1f1SRahul Lakkireddy 1936cede1f1SRahul Lakkireddy if (p->queue < 0 || p->queue >= pi->nqsets) 1946cede1f1SRahul Lakkireddy return -ERANGE; 1956cede1f1SRahul Lakkireddy 1966cede1f1SRahul Lakkireddy txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; 1976cede1f1SRahul Lakkireddy 1980e395b3cSRahul Lakkireddy /* Find the existing entry that the queue is bound to */ 1990e395b3cSRahul Lakkireddy qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); 2000e395b3cSRahul Lakkireddy if (qe) { 2016cede1f1SRahul Lakkireddy err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, 2026cede1f1SRahul Lakkireddy false); 203db3408a1SGanesh Goudar if (err) 204db3408a1SGanesh Goudar return err; 2056cede1f1SRahul Lakkireddy 2060e395b3cSRahul Lakkireddy e = &pi->sched_tbl->tab[qe->param.class]; 2076cede1f1SRahul Lakkireddy list_del(&qe->list); 208752ade68SMichal Hocko kvfree(qe); 2094ec4762dSRahul Lakkireddy if (atomic_dec_and_test(&e->refcnt)) 2104ec4762dSRahul Lakkireddy cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); 2116cede1f1SRahul Lakkireddy } 2126cede1f1SRahul Lakkireddy return err; 2136cede1f1SRahul Lakkireddy } 2146cede1f1SRahul Lakkireddy 2156cede1f1SRahul Lakkireddy static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) 2166cede1f1SRahul Lakkireddy { 2176cede1f1SRahul Lakkireddy struct sched_table *s = pi->sched_tbl; 2186cede1f1SRahul Lakkireddy struct sched_queue_entry *qe = NULL; 2190e395b3cSRahul Lakkireddy struct adapter *adap = pi->adapter; 2206cede1f1SRahul Lakkireddy struct sge_eth_txq *txq; 2210e395b3cSRahul Lakkireddy struct sched_class *e; 2226cede1f1SRahul Lakkireddy unsigned int qid; 2236cede1f1SRahul Lakkireddy int err = 0; 2246cede1f1SRahul Lakkireddy 2256cede1f1SRahul Lakkireddy if (p->queue < 0 || p->queue >= pi->nqsets) 2266cede1f1SRahul Lakkireddy return -ERANGE; 2276cede1f1SRahul Lakkireddy 228752ade68SMichal Hocko qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL); 2296cede1f1SRahul Lakkireddy if (!qe) 2306cede1f1SRahul Lakkireddy return -ENOMEM; 2316cede1f1SRahul Lakkireddy 2326cede1f1SRahul Lakkireddy txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; 2336cede1f1SRahul Lakkireddy qid = txq->q.cntxt_id; 2346cede1f1SRahul Lakkireddy 2356cede1f1SRahul Lakkireddy /* Unbind queue from any existing class */ 2366cede1f1SRahul Lakkireddy err = t4_sched_queue_unbind(pi, p); 237db3408a1SGanesh Goudar if (err) 238db3408a1SGanesh Goudar goto out_err; 2396cede1f1SRahul Lakkireddy 2406cede1f1SRahul Lakkireddy /* Bind queue to specified class */ 2416cede1f1SRahul Lakkireddy qe->cntxt_id = qid; 2426cede1f1SRahul Lakkireddy memcpy(&qe->param, p, sizeof(qe->param)); 2436cede1f1SRahul Lakkireddy 2446cede1f1SRahul Lakkireddy e = &s->tab[qe->param.class]; 2456cede1f1SRahul Lakkireddy err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); 246db3408a1SGanesh Goudar if (err) 247db3408a1SGanesh Goudar goto out_err; 2486cede1f1SRahul Lakkireddy 2490e395b3cSRahul Lakkireddy list_add_tail(&qe->list, &e->entry_list); 2500e395b3cSRahul Lakkireddy e->bind_type = SCHED_QUEUE; 2516cede1f1SRahul Lakkireddy atomic_inc(&e->refcnt); 252db3408a1SGanesh Goudar return err; 253db3408a1SGanesh Goudar 254db3408a1SGanesh Goudar out_err: 255db3408a1SGanesh Goudar kvfree(qe); 2566cede1f1SRahul Lakkireddy return err; 2576cede1f1SRahul Lakkireddy } 2586cede1f1SRahul Lakkireddy 2590e395b3cSRahul Lakkireddy static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p) 2600e395b3cSRahul Lakkireddy { 2610e395b3cSRahul Lakkireddy struct sched_flowc_entry *fe = NULL; 2620e395b3cSRahul Lakkireddy struct adapter *adap = pi->adapter; 2630e395b3cSRahul Lakkireddy struct sched_class *e; 2640e395b3cSRahul Lakkireddy int err = 0; 2650e395b3cSRahul Lakkireddy 2660e395b3cSRahul Lakkireddy if (p->tid < 0 || p->tid >= adap->tids.neotids) 2670e395b3cSRahul Lakkireddy return -ERANGE; 2680e395b3cSRahul Lakkireddy 2690e395b3cSRahul Lakkireddy /* Find the existing entry that the flowc is bound to */ 2700e395b3cSRahul Lakkireddy fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid); 2710e395b3cSRahul Lakkireddy if (fe) { 2720e395b3cSRahul Lakkireddy err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, 2730e395b3cSRahul Lakkireddy false); 2740e395b3cSRahul Lakkireddy if (err) 2750e395b3cSRahul Lakkireddy return err; 2760e395b3cSRahul Lakkireddy 2770e395b3cSRahul Lakkireddy e = &pi->sched_tbl->tab[fe->param.class]; 2780e395b3cSRahul Lakkireddy list_del(&fe->list); 2790e395b3cSRahul Lakkireddy kvfree(fe); 2804ec4762dSRahul Lakkireddy if (atomic_dec_and_test(&e->refcnt)) 2814ec4762dSRahul Lakkireddy cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); 2820e395b3cSRahul Lakkireddy } 2830e395b3cSRahul Lakkireddy return err; 2840e395b3cSRahul Lakkireddy } 2850e395b3cSRahul Lakkireddy 2860e395b3cSRahul Lakkireddy static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p) 2870e395b3cSRahul Lakkireddy { 2880e395b3cSRahul Lakkireddy struct sched_table *s = pi->sched_tbl; 2890e395b3cSRahul Lakkireddy struct sched_flowc_entry *fe = NULL; 2900e395b3cSRahul Lakkireddy struct adapter *adap = pi->adapter; 2910e395b3cSRahul Lakkireddy struct sched_class *e; 2920e395b3cSRahul Lakkireddy int err = 0; 2930e395b3cSRahul Lakkireddy 2940e395b3cSRahul Lakkireddy if (p->tid < 0 || p->tid >= adap->tids.neotids) 2950e395b3cSRahul Lakkireddy return -ERANGE; 2960e395b3cSRahul Lakkireddy 2970e395b3cSRahul Lakkireddy fe = kvzalloc(sizeof(*fe), GFP_KERNEL); 2980e395b3cSRahul Lakkireddy if (!fe) 2990e395b3cSRahul Lakkireddy return -ENOMEM; 3000e395b3cSRahul Lakkireddy 3010e395b3cSRahul Lakkireddy /* Unbind flowc from any existing class */ 3020e395b3cSRahul Lakkireddy err = t4_sched_flowc_unbind(pi, p); 3030e395b3cSRahul Lakkireddy if (err) 3040e395b3cSRahul Lakkireddy goto out_err; 3050e395b3cSRahul Lakkireddy 3060e395b3cSRahul Lakkireddy /* Bind flowc to specified class */ 3070e395b3cSRahul Lakkireddy memcpy(&fe->param, p, sizeof(fe->param)); 3080e395b3cSRahul Lakkireddy 3090e395b3cSRahul Lakkireddy e = &s->tab[fe->param.class]; 3100e395b3cSRahul Lakkireddy err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true); 3110e395b3cSRahul Lakkireddy if (err) 3120e395b3cSRahul Lakkireddy goto out_err; 3130e395b3cSRahul Lakkireddy 3140e395b3cSRahul Lakkireddy list_add_tail(&fe->list, &e->entry_list); 3150e395b3cSRahul Lakkireddy e->bind_type = SCHED_FLOWC; 3160e395b3cSRahul Lakkireddy atomic_inc(&e->refcnt); 3170e395b3cSRahul Lakkireddy return err; 3180e395b3cSRahul Lakkireddy 3190e395b3cSRahul Lakkireddy out_err: 3200e395b3cSRahul Lakkireddy kvfree(fe); 3210e395b3cSRahul Lakkireddy return err; 3220e395b3cSRahul Lakkireddy } 3230e395b3cSRahul Lakkireddy 3246cede1f1SRahul Lakkireddy static void t4_sched_class_unbind_all(struct port_info *pi, 3256cede1f1SRahul Lakkireddy struct sched_class *e, 3266cede1f1SRahul Lakkireddy enum sched_bind_type type) 3276cede1f1SRahul Lakkireddy { 3286cede1f1SRahul Lakkireddy if (!e) 3296cede1f1SRahul Lakkireddy return; 3306cede1f1SRahul Lakkireddy 3316cede1f1SRahul Lakkireddy switch (type) { 3326cede1f1SRahul Lakkireddy case SCHED_QUEUE: { 3336cede1f1SRahul Lakkireddy struct sched_queue_entry *qe; 3346cede1f1SRahul Lakkireddy 3350e395b3cSRahul Lakkireddy list_for_each_entry(qe, &e->entry_list, list) 3366cede1f1SRahul Lakkireddy t4_sched_queue_unbind(pi, &qe->param); 3376cede1f1SRahul Lakkireddy break; 3386cede1f1SRahul Lakkireddy } 3390e395b3cSRahul Lakkireddy case SCHED_FLOWC: { 3400e395b3cSRahul Lakkireddy struct sched_flowc_entry *fe; 3410e395b3cSRahul Lakkireddy 3420e395b3cSRahul Lakkireddy list_for_each_entry(fe, &e->entry_list, list) 3430e395b3cSRahul Lakkireddy t4_sched_flowc_unbind(pi, &fe->param); 3440e395b3cSRahul Lakkireddy break; 3450e395b3cSRahul Lakkireddy } 3466cede1f1SRahul Lakkireddy default: 3476cede1f1SRahul Lakkireddy break; 3486cede1f1SRahul Lakkireddy } 3496cede1f1SRahul Lakkireddy } 3506cede1f1SRahul Lakkireddy 3516cede1f1SRahul Lakkireddy static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg, 3526cede1f1SRahul Lakkireddy enum sched_bind_type type, bool bind) 3536cede1f1SRahul Lakkireddy { 3546cede1f1SRahul Lakkireddy int err = 0; 3556cede1f1SRahul Lakkireddy 3566cede1f1SRahul Lakkireddy if (!arg) 3576cede1f1SRahul Lakkireddy return -EINVAL; 3586cede1f1SRahul Lakkireddy 3596cede1f1SRahul Lakkireddy switch (type) { 3606cede1f1SRahul Lakkireddy case SCHED_QUEUE: { 3616cede1f1SRahul Lakkireddy struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 3626cede1f1SRahul Lakkireddy 3636cede1f1SRahul Lakkireddy if (bind) 3646cede1f1SRahul Lakkireddy err = t4_sched_queue_bind(pi, qe); 3656cede1f1SRahul Lakkireddy else 3666cede1f1SRahul Lakkireddy err = t4_sched_queue_unbind(pi, qe); 3676cede1f1SRahul Lakkireddy break; 3686cede1f1SRahul Lakkireddy } 3690e395b3cSRahul Lakkireddy case SCHED_FLOWC: { 3700e395b3cSRahul Lakkireddy struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; 3710e395b3cSRahul Lakkireddy 3720e395b3cSRahul Lakkireddy if (bind) 3730e395b3cSRahul Lakkireddy err = t4_sched_flowc_bind(pi, fe); 3740e395b3cSRahul Lakkireddy else 3750e395b3cSRahul Lakkireddy err = t4_sched_flowc_unbind(pi, fe); 3760e395b3cSRahul Lakkireddy break; 3770e395b3cSRahul Lakkireddy } 3786cede1f1SRahul Lakkireddy default: 3796cede1f1SRahul Lakkireddy err = -ENOTSUPP; 3806cede1f1SRahul Lakkireddy break; 3816cede1f1SRahul Lakkireddy } 3826cede1f1SRahul Lakkireddy 3836cede1f1SRahul Lakkireddy return err; 3846cede1f1SRahul Lakkireddy } 3856cede1f1SRahul Lakkireddy 3866cede1f1SRahul Lakkireddy /** 3876cede1f1SRahul Lakkireddy * cxgb4_sched_class_bind - Bind an entity to a scheduling class 3886cede1f1SRahul Lakkireddy * @dev: net_device pointer 3896cede1f1SRahul Lakkireddy * @arg: Entity opaque data 3906cede1f1SRahul Lakkireddy * @type: Entity type (Queue) 3916cede1f1SRahul Lakkireddy * 3926cede1f1SRahul Lakkireddy * Binds an entity (queue) to a scheduling class. If the entity 3936cede1f1SRahul Lakkireddy * is bound to another class, it will be unbound from the other class 3946cede1f1SRahul Lakkireddy * and bound to the class specified in @arg. 3956cede1f1SRahul Lakkireddy */ 3966cede1f1SRahul Lakkireddy int cxgb4_sched_class_bind(struct net_device *dev, void *arg, 3976cede1f1SRahul Lakkireddy enum sched_bind_type type) 3986cede1f1SRahul Lakkireddy { 3996cede1f1SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 4006cede1f1SRahul Lakkireddy u8 class_id; 4016cede1f1SRahul Lakkireddy 4026cede1f1SRahul Lakkireddy if (!can_sched(dev)) 4036cede1f1SRahul Lakkireddy return -ENOTSUPP; 4046cede1f1SRahul Lakkireddy 4056cede1f1SRahul Lakkireddy if (!arg) 4066cede1f1SRahul Lakkireddy return -EINVAL; 4076cede1f1SRahul Lakkireddy 4086cede1f1SRahul Lakkireddy switch (type) { 4096cede1f1SRahul Lakkireddy case SCHED_QUEUE: { 4106cede1f1SRahul Lakkireddy struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 4116cede1f1SRahul Lakkireddy 4126cede1f1SRahul Lakkireddy class_id = qe->class; 4136cede1f1SRahul Lakkireddy break; 4146cede1f1SRahul Lakkireddy } 4150e395b3cSRahul Lakkireddy case SCHED_FLOWC: { 4160e395b3cSRahul Lakkireddy struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; 4170e395b3cSRahul Lakkireddy 4180e395b3cSRahul Lakkireddy class_id = fe->class; 4190e395b3cSRahul Lakkireddy break; 4200e395b3cSRahul Lakkireddy } 4216cede1f1SRahul Lakkireddy default: 4226cede1f1SRahul Lakkireddy return -ENOTSUPP; 4236cede1f1SRahul Lakkireddy } 4246cede1f1SRahul Lakkireddy 4256cede1f1SRahul Lakkireddy if (!valid_class_id(dev, class_id)) 4266cede1f1SRahul Lakkireddy return -EINVAL; 4276cede1f1SRahul Lakkireddy 4286cede1f1SRahul Lakkireddy if (class_id == SCHED_CLS_NONE) 4296cede1f1SRahul Lakkireddy return -ENOTSUPP; 4306cede1f1SRahul Lakkireddy 431db3408a1SGanesh Goudar return t4_sched_class_bind_unbind_op(pi, arg, type, true); 4326cede1f1SRahul Lakkireddy 4336cede1f1SRahul Lakkireddy } 4346cede1f1SRahul Lakkireddy 4356cede1f1SRahul Lakkireddy /** 4366cede1f1SRahul Lakkireddy * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class 4376cede1f1SRahul Lakkireddy * @dev: net_device pointer 4386cede1f1SRahul Lakkireddy * @arg: Entity opaque data 4396cede1f1SRahul Lakkireddy * @type: Entity type (Queue) 4406cede1f1SRahul Lakkireddy * 4416cede1f1SRahul Lakkireddy * Unbinds an entity (queue) from a scheduling class. 4426cede1f1SRahul Lakkireddy */ 4436cede1f1SRahul Lakkireddy int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, 4446cede1f1SRahul Lakkireddy enum sched_bind_type type) 4456cede1f1SRahul Lakkireddy { 4466cede1f1SRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 4476cede1f1SRahul Lakkireddy u8 class_id; 4486cede1f1SRahul Lakkireddy 4496cede1f1SRahul Lakkireddy if (!can_sched(dev)) 4506cede1f1SRahul Lakkireddy return -ENOTSUPP; 4516cede1f1SRahul Lakkireddy 4526cede1f1SRahul Lakkireddy if (!arg) 4536cede1f1SRahul Lakkireddy return -EINVAL; 4546cede1f1SRahul Lakkireddy 4556cede1f1SRahul Lakkireddy switch (type) { 4566cede1f1SRahul Lakkireddy case SCHED_QUEUE: { 4576cede1f1SRahul Lakkireddy struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; 4586cede1f1SRahul Lakkireddy 4596cede1f1SRahul Lakkireddy class_id = qe->class; 4606cede1f1SRahul Lakkireddy break; 4616cede1f1SRahul Lakkireddy } 4620e395b3cSRahul Lakkireddy case SCHED_FLOWC: { 4630e395b3cSRahul Lakkireddy struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; 4640e395b3cSRahul Lakkireddy 4650e395b3cSRahul Lakkireddy class_id = fe->class; 4660e395b3cSRahul Lakkireddy break; 4670e395b3cSRahul Lakkireddy } 4686cede1f1SRahul Lakkireddy default: 4696cede1f1SRahul Lakkireddy return -ENOTSUPP; 4706cede1f1SRahul Lakkireddy } 4716cede1f1SRahul Lakkireddy 4726cede1f1SRahul Lakkireddy if (!valid_class_id(dev, class_id)) 4736cede1f1SRahul Lakkireddy return -EINVAL; 4746cede1f1SRahul Lakkireddy 475db3408a1SGanesh Goudar return t4_sched_class_bind_unbind_op(pi, arg, type, false); 4766cede1f1SRahul Lakkireddy } 4776cede1f1SRahul Lakkireddy 478b72a32daSRahul Lakkireddy /* If @p is NULL, fetch any available unused class */ 479b72a32daSRahul Lakkireddy static struct sched_class *t4_sched_class_lookup(struct port_info *pi, 480b72a32daSRahul Lakkireddy const struct ch_sched_params *p) 481b72a32daSRahul Lakkireddy { 482b72a32daSRahul Lakkireddy struct sched_table *s = pi->sched_tbl; 483b72a32daSRahul Lakkireddy struct sched_class *found = NULL; 4840e395b3cSRahul Lakkireddy struct sched_class *e, *end; 485b72a32daSRahul Lakkireddy 4864ec4762dSRahul Lakkireddy if (!p) { 487b72a32daSRahul Lakkireddy /* Get any available unused class */ 488b72a32daSRahul Lakkireddy end = &s->tab[s->sched_size]; 489b72a32daSRahul Lakkireddy for (e = &s->tab[0]; e != end; ++e) { 490b72a32daSRahul Lakkireddy if (e->state == SCHED_STATE_UNUSED) { 491b72a32daSRahul Lakkireddy found = e; 492b72a32daSRahul Lakkireddy break; 493b72a32daSRahul Lakkireddy } 494b72a32daSRahul Lakkireddy } 495b72a32daSRahul Lakkireddy } else { 496b72a32daSRahul Lakkireddy /* Look for a class with matching scheduling parameters */ 497b72a32daSRahul Lakkireddy struct ch_sched_params info; 498b72a32daSRahul Lakkireddy struct ch_sched_params tp; 499b72a32daSRahul Lakkireddy 500b72a32daSRahul Lakkireddy memcpy(&tp, p, sizeof(tp)); 501b72a32daSRahul Lakkireddy /* Don't try to match class parameter */ 502b72a32daSRahul Lakkireddy tp.u.params.class = SCHED_CLS_NONE; 503b72a32daSRahul Lakkireddy 504b72a32daSRahul Lakkireddy end = &s->tab[s->sched_size]; 505b72a32daSRahul Lakkireddy for (e = &s->tab[0]; e != end; ++e) { 506b72a32daSRahul Lakkireddy if (e->state == SCHED_STATE_UNUSED) 507b72a32daSRahul Lakkireddy continue; 508b72a32daSRahul Lakkireddy 509b72a32daSRahul Lakkireddy memcpy(&info, &e->info, sizeof(info)); 510b72a32daSRahul Lakkireddy /* Don't try to match class parameter */ 511b72a32daSRahul Lakkireddy info.u.params.class = SCHED_CLS_NONE; 512b72a32daSRahul Lakkireddy 513b72a32daSRahul Lakkireddy if ((info.type == tp.type) && 514b72a32daSRahul Lakkireddy (!memcmp(&info.u.params, &tp.u.params, 515b72a32daSRahul Lakkireddy sizeof(info.u.params)))) { 516b72a32daSRahul Lakkireddy found = e; 517b72a32daSRahul Lakkireddy break; 518b72a32daSRahul Lakkireddy } 519b72a32daSRahul Lakkireddy } 520b72a32daSRahul Lakkireddy } 521b72a32daSRahul Lakkireddy 522b72a32daSRahul Lakkireddy return found; 523b72a32daSRahul Lakkireddy } 524b72a32daSRahul Lakkireddy 525b72a32daSRahul Lakkireddy static struct sched_class *t4_sched_class_alloc(struct port_info *pi, 526b72a32daSRahul Lakkireddy struct ch_sched_params *p) 527b72a32daSRahul Lakkireddy { 5284ec4762dSRahul Lakkireddy struct sched_class *e = NULL; 529b72a32daSRahul Lakkireddy u8 class_id; 530b72a32daSRahul Lakkireddy int err; 531b72a32daSRahul Lakkireddy 532b72a32daSRahul Lakkireddy if (!p) 533b72a32daSRahul Lakkireddy return NULL; 534b72a32daSRahul Lakkireddy 535b72a32daSRahul Lakkireddy class_id = p->u.params.class; 536b72a32daSRahul Lakkireddy 537b72a32daSRahul Lakkireddy /* Only accept search for existing class with matching params 538b72a32daSRahul Lakkireddy * or allocation of new class with specified params 539b72a32daSRahul Lakkireddy */ 540b72a32daSRahul Lakkireddy if (class_id != SCHED_CLS_NONE) 541b72a32daSRahul Lakkireddy return NULL; 542b72a32daSRahul Lakkireddy 5434ec4762dSRahul Lakkireddy /* See if there's an exisiting class with same requested sched 5444ec4762dSRahul Lakkireddy * params. Classes can only be shared among FLOWC types. For 5454ec4762dSRahul Lakkireddy * other types, always request a new class. 546b72a32daSRahul Lakkireddy */ 5474ec4762dSRahul Lakkireddy if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 548b72a32daSRahul Lakkireddy e = t4_sched_class_lookup(pi, p); 5494ec4762dSRahul Lakkireddy 550b72a32daSRahul Lakkireddy if (!e) { 551b72a32daSRahul Lakkireddy struct ch_sched_params np; 552b72a32daSRahul Lakkireddy 553b72a32daSRahul Lakkireddy /* Fetch any available unused class */ 554b72a32daSRahul Lakkireddy e = t4_sched_class_lookup(pi, NULL); 555b72a32daSRahul Lakkireddy if (!e) 556db3408a1SGanesh Goudar return NULL; 557b72a32daSRahul Lakkireddy 558b72a32daSRahul Lakkireddy memcpy(&np, p, sizeof(np)); 559b72a32daSRahul Lakkireddy np.u.params.class = e->idx; 560b72a32daSRahul Lakkireddy /* New class */ 561b72a32daSRahul Lakkireddy err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); 562db3408a1SGanesh Goudar if (err) 563db3408a1SGanesh Goudar return NULL; 564b72a32daSRahul Lakkireddy memcpy(&e->info, &np, sizeof(e->info)); 565b72a32daSRahul Lakkireddy atomic_set(&e->refcnt, 0); 566b72a32daSRahul Lakkireddy e->state = SCHED_STATE_ACTIVE; 567b72a32daSRahul Lakkireddy } 568b72a32daSRahul Lakkireddy 569b72a32daSRahul Lakkireddy return e; 570b72a32daSRahul Lakkireddy } 571b72a32daSRahul Lakkireddy 572b72a32daSRahul Lakkireddy /** 573b72a32daSRahul Lakkireddy * cxgb4_sched_class_alloc - allocate a scheduling class 574b72a32daSRahul Lakkireddy * @dev: net_device pointer 575b72a32daSRahul Lakkireddy * @p: new scheduling class to create. 576b72a32daSRahul Lakkireddy * 577b72a32daSRahul Lakkireddy * Returns pointer to the scheduling class created. If @p is NULL, then 578b72a32daSRahul Lakkireddy * it allocates and returns any available unused scheduling class. If a 579b72a32daSRahul Lakkireddy * scheduling class with matching @p is found, then the matching class is 580b72a32daSRahul Lakkireddy * returned. 581b72a32daSRahul Lakkireddy */ 582b72a32daSRahul Lakkireddy struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, 583b72a32daSRahul Lakkireddy struct ch_sched_params *p) 584b72a32daSRahul Lakkireddy { 585b72a32daSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 586b72a32daSRahul Lakkireddy u8 class_id; 587b72a32daSRahul Lakkireddy 588b72a32daSRahul Lakkireddy if (!can_sched(dev)) 589b72a32daSRahul Lakkireddy return NULL; 590b72a32daSRahul Lakkireddy 591b72a32daSRahul Lakkireddy class_id = p->u.params.class; 592b72a32daSRahul Lakkireddy if (!valid_class_id(dev, class_id)) 593b72a32daSRahul Lakkireddy return NULL; 594b72a32daSRahul Lakkireddy 595b72a32daSRahul Lakkireddy return t4_sched_class_alloc(pi, p); 596b72a32daSRahul Lakkireddy } 597b72a32daSRahul Lakkireddy 5980e395b3cSRahul Lakkireddy /** 5990e395b3cSRahul Lakkireddy * cxgb4_sched_class_free - free a scheduling class 6000e395b3cSRahul Lakkireddy * @dev: net_device pointer 601*29bbf5d7SRahul Lakkireddy * @classid: scheduling class id to free 6020e395b3cSRahul Lakkireddy * 6030e395b3cSRahul Lakkireddy * Frees a scheduling class if there are no users. 6040e395b3cSRahul Lakkireddy */ 6050e395b3cSRahul Lakkireddy void cxgb4_sched_class_free(struct net_device *dev, u8 classid) 6066cede1f1SRahul Lakkireddy { 6070e395b3cSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 6080e395b3cSRahul Lakkireddy struct sched_table *s = pi->sched_tbl; 6094ec4762dSRahul Lakkireddy struct ch_sched_params p; 6100e395b3cSRahul Lakkireddy struct sched_class *e; 6114ec4762dSRahul Lakkireddy u32 speed; 6124ec4762dSRahul Lakkireddy int ret; 6130e395b3cSRahul Lakkireddy 6140e395b3cSRahul Lakkireddy e = &s->tab[classid]; 6154ec4762dSRahul Lakkireddy if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) { 6164ec4762dSRahul Lakkireddy /* Port based rate limiting needs explicit reset back 6174ec4762dSRahul Lakkireddy * to max rate. But, we'll do explicit reset for all 6184ec4762dSRahul Lakkireddy * types, instead of just port based type, to be on 6194ec4762dSRahul Lakkireddy * the safer side. 6204ec4762dSRahul Lakkireddy */ 6214ec4762dSRahul Lakkireddy memcpy(&p, &e->info, sizeof(p)); 6224ec4762dSRahul Lakkireddy /* Always reset mode to 0. Otherwise, FLOWC mode will 6234ec4762dSRahul Lakkireddy * still be enabled even after resetting the traffic 6244ec4762dSRahul Lakkireddy * class. 6254ec4762dSRahul Lakkireddy */ 6264ec4762dSRahul Lakkireddy p.u.params.mode = 0; 6274ec4762dSRahul Lakkireddy p.u.params.minrate = 0; 6284ec4762dSRahul Lakkireddy p.u.params.pktsize = 0; 6294ec4762dSRahul Lakkireddy 6304ec4762dSRahul Lakkireddy ret = t4_get_link_params(pi, NULL, &speed, NULL); 6314ec4762dSRahul Lakkireddy if (!ret) 6324ec4762dSRahul Lakkireddy p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */ 6334ec4762dSRahul Lakkireddy else 6344ec4762dSRahul Lakkireddy p.u.params.maxrate = SCHED_MAX_RATE_KBPS; 6354ec4762dSRahul Lakkireddy 6364ec4762dSRahul Lakkireddy t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL); 6374ec4762dSRahul Lakkireddy 6380e395b3cSRahul Lakkireddy e->state = SCHED_STATE_UNUSED; 6390e395b3cSRahul Lakkireddy memset(&e->info, 0, sizeof(e->info)); 6400e395b3cSRahul Lakkireddy } 6410e395b3cSRahul Lakkireddy } 6420e395b3cSRahul Lakkireddy 6430e395b3cSRahul Lakkireddy static void t4_sched_class_free(struct net_device *dev, struct sched_class *e) 6440e395b3cSRahul Lakkireddy { 6450e395b3cSRahul Lakkireddy struct port_info *pi = netdev2pinfo(dev); 6460e395b3cSRahul Lakkireddy 6470e395b3cSRahul Lakkireddy t4_sched_class_unbind_all(pi, e, e->bind_type); 6480e395b3cSRahul Lakkireddy cxgb4_sched_class_free(dev, e->idx); 6496cede1f1SRahul Lakkireddy } 6506cede1f1SRahul Lakkireddy 651b72a32daSRahul Lakkireddy struct sched_table *t4_init_sched(unsigned int sched_size) 652b72a32daSRahul Lakkireddy { 653b72a32daSRahul Lakkireddy struct sched_table *s; 654b72a32daSRahul Lakkireddy unsigned int i; 655b72a32daSRahul Lakkireddy 6563ebb18a4SGustavo A. R. Silva s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL); 657b72a32daSRahul Lakkireddy if (!s) 658b72a32daSRahul Lakkireddy return NULL; 659b72a32daSRahul Lakkireddy 660b72a32daSRahul Lakkireddy s->sched_size = sched_size; 661b72a32daSRahul Lakkireddy 662b72a32daSRahul Lakkireddy for (i = 0; i < s->sched_size; i++) { 663b72a32daSRahul Lakkireddy memset(&s->tab[i], 0, sizeof(struct sched_class)); 664b72a32daSRahul Lakkireddy s->tab[i].idx = i; 665b72a32daSRahul Lakkireddy s->tab[i].state = SCHED_STATE_UNUSED; 6660e395b3cSRahul Lakkireddy INIT_LIST_HEAD(&s->tab[i].entry_list); 667b72a32daSRahul Lakkireddy atomic_set(&s->tab[i].refcnt, 0); 668b72a32daSRahul Lakkireddy } 669b72a32daSRahul Lakkireddy return s; 670b72a32daSRahul Lakkireddy } 671b72a32daSRahul Lakkireddy 672b72a32daSRahul Lakkireddy void t4_cleanup_sched(struct adapter *adap) 673b72a32daSRahul Lakkireddy { 674b72a32daSRahul Lakkireddy struct sched_table *s; 675128416acSGanesh Goudar unsigned int j, i; 676b72a32daSRahul Lakkireddy 677128416acSGanesh Goudar for_each_port(adap, j) { 678128416acSGanesh Goudar struct port_info *pi = netdev2pinfo(adap->port[j]); 679b72a32daSRahul Lakkireddy 680b72a32daSRahul Lakkireddy s = pi->sched_tbl; 681a081e115SCasey Leedom if (!s) 682a081e115SCasey Leedom continue; 683a081e115SCasey Leedom 6846cede1f1SRahul Lakkireddy for (i = 0; i < s->sched_size; i++) { 6856cede1f1SRahul Lakkireddy struct sched_class *e; 6866cede1f1SRahul Lakkireddy 6876cede1f1SRahul Lakkireddy e = &s->tab[i]; 6886cede1f1SRahul Lakkireddy if (e->state == SCHED_STATE_ACTIVE) 6890e395b3cSRahul Lakkireddy t4_sched_class_free(adap->port[j], e); 6906cede1f1SRahul Lakkireddy } 691752ade68SMichal Hocko kvfree(s); 692b72a32daSRahul Lakkireddy } 693b72a32daSRahul Lakkireddy } 694