1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_virtchnl.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice.h"
7 #include "ice_base.h"
8 #include "ice_lib.h"
9 #include "ice_fltr.h"
10 #include "ice_virtchnl_allowlist.h"
11 #include "ice_vf_vsi_vlan_ops.h"
12 #include "ice_vlan.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_dcb_lib.h"
15 
16 #define FIELD_SELECTOR(proto_hdr_field) \
17 		BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
18 
19 struct ice_vc_hdr_match_type {
20 	u32 vc_hdr;	/* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
21 	u32 ice_hdr;	/* ice headers (ICE_FLOW_SEG_HDR_XXX) */
22 };
23 
24 static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
25 	{VIRTCHNL_PROTO_HDR_NONE,	ICE_FLOW_SEG_HDR_NONE},
26 	{VIRTCHNL_PROTO_HDR_ETH,	ICE_FLOW_SEG_HDR_ETH},
27 	{VIRTCHNL_PROTO_HDR_S_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
28 	{VIRTCHNL_PROTO_HDR_C_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
29 	{VIRTCHNL_PROTO_HDR_IPV4,	ICE_FLOW_SEG_HDR_IPV4 |
30 					ICE_FLOW_SEG_HDR_IPV_OTHER},
31 	{VIRTCHNL_PROTO_HDR_IPV6,	ICE_FLOW_SEG_HDR_IPV6 |
32 					ICE_FLOW_SEG_HDR_IPV_OTHER},
33 	{VIRTCHNL_PROTO_HDR_TCP,	ICE_FLOW_SEG_HDR_TCP},
34 	{VIRTCHNL_PROTO_HDR_UDP,	ICE_FLOW_SEG_HDR_UDP},
35 	{VIRTCHNL_PROTO_HDR_SCTP,	ICE_FLOW_SEG_HDR_SCTP},
36 	{VIRTCHNL_PROTO_HDR_PPPOE,	ICE_FLOW_SEG_HDR_PPPOE},
37 	{VIRTCHNL_PROTO_HDR_GTPU_IP,	ICE_FLOW_SEG_HDR_GTPU_IP},
38 	{VIRTCHNL_PROTO_HDR_GTPU_EH,	ICE_FLOW_SEG_HDR_GTPU_EH},
39 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
40 					ICE_FLOW_SEG_HDR_GTPU_DWN},
41 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
42 					ICE_FLOW_SEG_HDR_GTPU_UP},
43 	{VIRTCHNL_PROTO_HDR_L2TPV3,	ICE_FLOW_SEG_HDR_L2TPV3},
44 	{VIRTCHNL_PROTO_HDR_ESP,	ICE_FLOW_SEG_HDR_ESP},
45 	{VIRTCHNL_PROTO_HDR_AH,		ICE_FLOW_SEG_HDR_AH},
46 	{VIRTCHNL_PROTO_HDR_PFCP,	ICE_FLOW_SEG_HDR_PFCP_SESSION},
47 };
48 
49 struct ice_vc_hash_field_match_type {
50 	u32 vc_hdr;		/* virtchnl headers
51 				 * (VIRTCHNL_PROTO_HDR_XXX)
52 				 */
53 	u32 vc_hash_field;	/* virtchnl hash fields selector
54 				 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
55 				 */
56 	u64 ice_hash_field;	/* ice hash fields
57 				 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
58 				 */
59 };
60 
61 static const struct
62 ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
63 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
64 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
65 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
66 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
67 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
68 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
69 		ICE_FLOW_HASH_ETH},
70 	{VIRTCHNL_PROTO_HDR_ETH,
71 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
72 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
73 	{VIRTCHNL_PROTO_HDR_S_VLAN,
74 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
75 		BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
76 	{VIRTCHNL_PROTO_HDR_C_VLAN,
77 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
78 		BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
79 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
80 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
81 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
82 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
83 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
84 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
85 		ICE_FLOW_HASH_IPV4},
86 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
87 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
88 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
89 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
91 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
92 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
93 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
94 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
95 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
96 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
97 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
98 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
99 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
100 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
101 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
102 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
103 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
104 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
105 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
106 		ICE_FLOW_HASH_IPV6},
107 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
108 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
109 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
110 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
112 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
113 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
114 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
115 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
116 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
117 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
118 		ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
119 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
120 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
121 	{VIRTCHNL_PROTO_HDR_TCP,
122 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
123 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
124 	{VIRTCHNL_PROTO_HDR_TCP,
125 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
126 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
127 	{VIRTCHNL_PROTO_HDR_TCP,
128 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
129 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
130 		ICE_FLOW_HASH_TCP_PORT},
131 	{VIRTCHNL_PROTO_HDR_UDP,
132 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
133 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
134 	{VIRTCHNL_PROTO_HDR_UDP,
135 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
136 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
137 	{VIRTCHNL_PROTO_HDR_UDP,
138 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
139 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
140 		ICE_FLOW_HASH_UDP_PORT},
141 	{VIRTCHNL_PROTO_HDR_SCTP,
142 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
143 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
144 	{VIRTCHNL_PROTO_HDR_SCTP,
145 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
146 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
147 	{VIRTCHNL_PROTO_HDR_SCTP,
148 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
149 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
150 		ICE_FLOW_HASH_SCTP_PORT},
151 	{VIRTCHNL_PROTO_HDR_PPPOE,
152 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
153 		BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
154 	{VIRTCHNL_PROTO_HDR_GTPU_IP,
155 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
156 		BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
157 	{VIRTCHNL_PROTO_HDR_L2TPV3,
158 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
159 		BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
160 	{VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
161 		BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
162 	{VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
163 		BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
164 	{VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
165 		BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
166 };
167 
168 /**
169  * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
170  * @pf: pointer to the PF structure
171  * @v_opcode: operation code
172  * @v_retval: return value
173  * @msg: pointer to the msg buffer
174  * @msglen: msg length
175  */
176 static void
ice_vc_vf_broadcast(struct ice_pf * pf,enum virtchnl_ops v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)177 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
178 		    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
179 {
180 	struct ice_hw *hw = &pf->hw;
181 	struct ice_vf *vf;
182 	unsigned int bkt;
183 
184 	mutex_lock(&pf->vfs.table_lock);
185 	ice_for_each_vf(pf, bkt, vf) {
186 		/* Not all vfs are enabled so skip the ones that are not */
187 		if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
188 		    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
189 			continue;
190 
191 		/* Ignore return value on purpose - a given VF may fail, but
192 		 * we need to keep going and send to all of them
193 		 */
194 		ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
195 				      msglen, NULL);
196 	}
197 	mutex_unlock(&pf->vfs.table_lock);
198 }
199 
200 /**
201  * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
202  * @vf: pointer to the VF structure
203  * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
204  * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
205  * @link_up: whether or not to set the link up/down
206  */
207 static void
ice_set_pfe_link(struct ice_vf * vf,struct virtchnl_pf_event * pfe,int ice_link_speed,bool link_up)208 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
209 		 int ice_link_speed, bool link_up)
210 {
211 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
212 		pfe->event_data.link_event_adv.link_status = link_up;
213 		/* Speed in Mbps */
214 		pfe->event_data.link_event_adv.link_speed =
215 			ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
216 	} else {
217 		pfe->event_data.link_event.link_status = link_up;
218 		/* Legacy method for virtchnl link speeds */
219 		pfe->event_data.link_event.link_speed =
220 			(enum virtchnl_link_speed)
221 			ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
222 	}
223 }
224 
225 /**
226  * ice_vc_notify_vf_link_state - Inform a VF of link status
227  * @vf: pointer to the VF structure
228  *
229  * send a link status message to a single VF
230  */
ice_vc_notify_vf_link_state(struct ice_vf * vf)231 void ice_vc_notify_vf_link_state(struct ice_vf *vf)
232 {
233 	struct virtchnl_pf_event pfe = { 0 };
234 	struct ice_hw *hw = &vf->pf->hw;
235 
236 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
237 	pfe.severity = PF_EVENT_SEVERITY_INFO;
238 
239 	if (ice_is_vf_link_up(vf))
240 		ice_set_pfe_link(vf, &pfe,
241 				 hw->port_info->phy.link_info.link_speed, true);
242 	else
243 		ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
244 
245 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
246 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
247 			      sizeof(pfe), NULL);
248 }
249 
250 /**
251  * ice_vc_notify_link_state - Inform all VFs on a PF of link status
252  * @pf: pointer to the PF structure
253  */
ice_vc_notify_link_state(struct ice_pf * pf)254 void ice_vc_notify_link_state(struct ice_pf *pf)
255 {
256 	struct ice_vf *vf;
257 	unsigned int bkt;
258 
259 	mutex_lock(&pf->vfs.table_lock);
260 	ice_for_each_vf(pf, bkt, vf)
261 		ice_vc_notify_vf_link_state(vf);
262 	mutex_unlock(&pf->vfs.table_lock);
263 }
264 
265 /**
266  * ice_vc_notify_reset - Send pending reset message to all VFs
267  * @pf: pointer to the PF structure
268  *
269  * indicate a pending reset to all VFs on a given PF
270  */
ice_vc_notify_reset(struct ice_pf * pf)271 void ice_vc_notify_reset(struct ice_pf *pf)
272 {
273 	struct virtchnl_pf_event pfe;
274 
275 	if (!ice_has_vfs(pf))
276 		return;
277 
278 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
279 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
280 	ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
281 			    (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
282 }
283 
284 /**
285  * ice_vc_send_msg_to_vf - Send message to VF
286  * @vf: pointer to the VF info
287  * @v_opcode: virtual channel opcode
288  * @v_retval: virtual channel return value
289  * @msg: pointer to the msg buffer
290  * @msglen: msg length
291  *
292  * send msg to VF
293  */
294 int
ice_vc_send_msg_to_vf(struct ice_vf * vf,u32 v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)295 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
296 		      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
297 {
298 	struct device *dev;
299 	struct ice_pf *pf;
300 	int aq_ret;
301 
302 	pf = vf->pf;
303 	dev = ice_pf_to_dev(pf);
304 
305 	aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
306 				       msg, msglen, NULL);
307 	if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
308 		dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
309 			 vf->vf_id, aq_ret,
310 			 ice_aq_str(pf->hw.mailboxq.sq_last_status));
311 		return -EIO;
312 	}
313 
314 	return 0;
315 }
316 
317 /**
318  * ice_vc_get_ver_msg
319  * @vf: pointer to the VF info
320  * @msg: pointer to the msg buffer
321  *
322  * called from the VF to request the API version used by the PF
323  */
ice_vc_get_ver_msg(struct ice_vf * vf,u8 * msg)324 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
325 {
326 	struct virtchnl_version_info info = {
327 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
328 	};
329 
330 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
331 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
332 	if (VF_IS_V10(&vf->vf_ver))
333 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
334 
335 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
336 				     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
337 				     sizeof(struct virtchnl_version_info));
338 }
339 
340 /**
341  * ice_vc_get_max_frame_size - get max frame size allowed for VF
342  * @vf: VF used to determine max frame size
343  *
344  * Max frame size is determined based on the current port's max frame size and
345  * whether a port VLAN is configured on this VF. The VF is not aware whether
346  * it's in a port VLAN so the PF needs to account for this in max frame size
347  * checks and sending the max frame size to the VF.
348  */
ice_vc_get_max_frame_size(struct ice_vf * vf)349 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
350 {
351 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
352 	u16 max_frame_size;
353 
354 	max_frame_size = pi->phy.link_info.max_frame_size;
355 
356 	if (ice_vf_is_port_vlan_ena(vf))
357 		max_frame_size -= VLAN_HLEN;
358 
359 	return max_frame_size;
360 }
361 
362 /**
363  * ice_vc_get_vlan_caps
364  * @hw: pointer to the hw
365  * @vf: pointer to the VF info
366  * @vsi: pointer to the VSI
367  * @driver_caps: current driver caps
368  *
369  * Return 0 if there is no VLAN caps supported, or VLAN caps value
370  */
371 static u32
ice_vc_get_vlan_caps(struct ice_hw * hw,struct ice_vf * vf,struct ice_vsi * vsi,u32 driver_caps)372 ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
373 		     u32 driver_caps)
374 {
375 	if (ice_is_eswitch_mode_switchdev(vf->pf))
376 		/* In switchdev setting VLAN from VF isn't supported */
377 		return 0;
378 
379 	if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
380 		/* VLAN offloads based on current device configuration */
381 		return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
382 	} else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
383 		/* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
384 		 * these two conditions, which amounts to guest VLAN filtering
385 		 * and offloads being based on the inner VLAN or the
386 		 * inner/single VLAN respectively and don't allow VF to
387 		 * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
388 		 */
389 		if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
390 			return VIRTCHNL_VF_OFFLOAD_VLAN;
391 		} else if (!ice_is_dvm_ena(hw) &&
392 			   !ice_vf_is_port_vlan_ena(vf)) {
393 			/* configure backward compatible support for VFs that
394 			 * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
395 			 * configured in SVM, and no port VLAN is configured
396 			 */
397 			ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
398 			return VIRTCHNL_VF_OFFLOAD_VLAN;
399 		} else if (ice_is_dvm_ena(hw)) {
400 			/* configure software offloaded VLAN support when DVM
401 			 * is enabled, but no port VLAN is enabled
402 			 */
403 			ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
404 		}
405 	}
406 
407 	return 0;
408 }
409 
410 /**
411  * ice_vc_get_vf_res_msg
412  * @vf: pointer to the VF info
413  * @msg: pointer to the msg buffer
414  *
415  * called from the VF to request its resources
416  */
ice_vc_get_vf_res_msg(struct ice_vf * vf,u8 * msg)417 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
418 {
419 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
420 	struct virtchnl_vf_resource *vfres = NULL;
421 	struct ice_hw *hw = &vf->pf->hw;
422 	struct ice_vsi *vsi;
423 	int len = 0;
424 	int ret;
425 
426 	if (ice_check_vf_init(vf)) {
427 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
428 		goto err;
429 	}
430 
431 	len = virtchnl_struct_size(vfres, vsi_res, 0);
432 
433 	vfres = kzalloc(len, GFP_KERNEL);
434 	if (!vfres) {
435 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
436 		len = 0;
437 		goto err;
438 	}
439 	if (VF_IS_V11(&vf->vf_ver))
440 		vf->driver_caps = *(u32 *)msg;
441 	else
442 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
443 				  VIRTCHNL_VF_OFFLOAD_VLAN;
444 
445 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
446 	vsi = ice_get_vf_vsi(vf);
447 	if (!vsi) {
448 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
449 		goto err;
450 	}
451 
452 	vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
453 						    vf->driver_caps);
454 
455 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
456 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
457 
458 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
459 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
460 
461 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
462 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
463 
464 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
465 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
466 
467 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
468 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
469 
470 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
471 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
472 
473 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
474 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
475 
476 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
477 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
478 
479 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
480 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
481 
482 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC)
483 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_CRC;
484 
485 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
486 		vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
487 
488 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
489 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
490 
491 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
492 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
493 
494 	vfres->num_vsis = 1;
495 	/* Tx and Rx queue are equal for VF */
496 	vfres->num_queue_pairs = vsi->num_txq;
497 	vfres->max_vectors = vf->num_msix;
498 	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
499 	vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
500 	vfres->max_mtu = ice_vc_get_max_frame_size(vf);
501 
502 	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
503 	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
504 	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
505 	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
506 			vf->hw_lan_addr);
507 
508 	/* match guest capabilities */
509 	vf->driver_caps = vfres->vf_cap_flags;
510 
511 	ice_vc_set_caps_allowlist(vf);
512 	ice_vc_set_working_allowlist(vf);
513 
514 	set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
515 
516 err:
517 	/* send the response back to the VF */
518 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
519 				    (u8 *)vfres, len);
520 
521 	kfree(vfres);
522 	return ret;
523 }
524 
525 /**
526  * ice_vc_reset_vf_msg
527  * @vf: pointer to the VF info
528  *
529  * called from the VF to reset itself,
530  * unlike other virtchnl messages, PF driver
531  * doesn't send the response back to the VF
532  */
ice_vc_reset_vf_msg(struct ice_vf * vf)533 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
534 {
535 	if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
536 		ice_reset_vf(vf, 0);
537 }
538 
539 /**
540  * ice_vc_isvalid_vsi_id
541  * @vf: pointer to the VF info
542  * @vsi_id: VF relative VSI ID
543  *
544  * check for the valid VSI ID
545  */
ice_vc_isvalid_vsi_id(struct ice_vf * vf,u16 vsi_id)546 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
547 {
548 	struct ice_pf *pf = vf->pf;
549 	struct ice_vsi *vsi;
550 
551 	vsi = ice_find_vsi(pf, vsi_id);
552 
553 	return (vsi && (vsi->vf == vf));
554 }
555 
556 /**
557  * ice_vc_isvalid_q_id
558  * @vf: pointer to the VF info
559  * @vsi_id: VSI ID
560  * @qid: VSI relative queue ID
561  *
562  * check for the valid queue ID
563  */
ice_vc_isvalid_q_id(struct ice_vf * vf,u16 vsi_id,u8 qid)564 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
565 {
566 	struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
567 	/* allocated Tx and Rx queues should be always equal for VF VSI */
568 	return (vsi && (qid < vsi->alloc_txq));
569 }
570 
571 /**
572  * ice_vc_isvalid_ring_len
573  * @ring_len: length of ring
574  *
575  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
576  * or zero
577  */
ice_vc_isvalid_ring_len(u16 ring_len)578 static bool ice_vc_isvalid_ring_len(u16 ring_len)
579 {
580 	return ring_len == 0 ||
581 	       (ring_len >= ICE_MIN_NUM_DESC &&
582 		ring_len <= ICE_MAX_NUM_DESC &&
583 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
584 }
585 
586 /**
587  * ice_vc_validate_pattern
588  * @vf: pointer to the VF info
589  * @proto: virtchnl protocol headers
590  *
591  * validate the pattern is supported or not.
592  *
593  * Return: true on success, false on error.
594  */
595 bool
ice_vc_validate_pattern(struct ice_vf * vf,struct virtchnl_proto_hdrs * proto)596 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
597 {
598 	bool is_ipv4 = false;
599 	bool is_ipv6 = false;
600 	bool is_udp = false;
601 	u16 ptype = -1;
602 	int i = 0;
603 
604 	while (i < proto->count &&
605 	       proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
606 		switch (proto->proto_hdr[i].type) {
607 		case VIRTCHNL_PROTO_HDR_ETH:
608 			ptype = ICE_PTYPE_MAC_PAY;
609 			break;
610 		case VIRTCHNL_PROTO_HDR_IPV4:
611 			ptype = ICE_PTYPE_IPV4_PAY;
612 			is_ipv4 = true;
613 			break;
614 		case VIRTCHNL_PROTO_HDR_IPV6:
615 			ptype = ICE_PTYPE_IPV6_PAY;
616 			is_ipv6 = true;
617 			break;
618 		case VIRTCHNL_PROTO_HDR_UDP:
619 			if (is_ipv4)
620 				ptype = ICE_PTYPE_IPV4_UDP_PAY;
621 			else if (is_ipv6)
622 				ptype = ICE_PTYPE_IPV6_UDP_PAY;
623 			is_udp = true;
624 			break;
625 		case VIRTCHNL_PROTO_HDR_TCP:
626 			if (is_ipv4)
627 				ptype = ICE_PTYPE_IPV4_TCP_PAY;
628 			else if (is_ipv6)
629 				ptype = ICE_PTYPE_IPV6_TCP_PAY;
630 			break;
631 		case VIRTCHNL_PROTO_HDR_SCTP:
632 			if (is_ipv4)
633 				ptype = ICE_PTYPE_IPV4_SCTP_PAY;
634 			else if (is_ipv6)
635 				ptype = ICE_PTYPE_IPV6_SCTP_PAY;
636 			break;
637 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
638 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
639 			if (is_ipv4)
640 				ptype = ICE_MAC_IPV4_GTPU;
641 			else if (is_ipv6)
642 				ptype = ICE_MAC_IPV6_GTPU;
643 			goto out;
644 		case VIRTCHNL_PROTO_HDR_L2TPV3:
645 			if (is_ipv4)
646 				ptype = ICE_MAC_IPV4_L2TPV3;
647 			else if (is_ipv6)
648 				ptype = ICE_MAC_IPV6_L2TPV3;
649 			goto out;
650 		case VIRTCHNL_PROTO_HDR_ESP:
651 			if (is_ipv4)
652 				ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
653 						ICE_MAC_IPV4_ESP;
654 			else if (is_ipv6)
655 				ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
656 						ICE_MAC_IPV6_ESP;
657 			goto out;
658 		case VIRTCHNL_PROTO_HDR_AH:
659 			if (is_ipv4)
660 				ptype = ICE_MAC_IPV4_AH;
661 			else if (is_ipv6)
662 				ptype = ICE_MAC_IPV6_AH;
663 			goto out;
664 		case VIRTCHNL_PROTO_HDR_PFCP:
665 			if (is_ipv4)
666 				ptype = ICE_MAC_IPV4_PFCP_SESSION;
667 			else if (is_ipv6)
668 				ptype = ICE_MAC_IPV6_PFCP_SESSION;
669 			goto out;
670 		default:
671 			break;
672 		}
673 		i++;
674 	}
675 
676 out:
677 	return ice_hw_ptype_ena(&vf->pf->hw, ptype);
678 }
679 
680 /**
681  * ice_vc_parse_rss_cfg - parses hash fields and headers from
682  * a specific virtchnl RSS cfg
683  * @hw: pointer to the hardware
684  * @rss_cfg: pointer to the virtchnl RSS cfg
685  * @hash_cfg: pointer to the HW hash configuration
686  *
687  * Return true if all the protocol header and hash fields in the RSS cfg could
688  * be parsed, else return false
689  *
690  * This function parses the virtchnl RSS cfg to be the intended
691  * hash fields and the intended header for RSS configuration
692  */
ice_vc_parse_rss_cfg(struct ice_hw * hw,struct virtchnl_rss_cfg * rss_cfg,struct ice_rss_hash_cfg * hash_cfg)693 static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
694 				 struct virtchnl_rss_cfg *rss_cfg,
695 				 struct ice_rss_hash_cfg *hash_cfg)
696 {
697 	const struct ice_vc_hash_field_match_type *hf_list;
698 	const struct ice_vc_hdr_match_type *hdr_list;
699 	int i, hf_list_len, hdr_list_len;
700 	u32 *addl_hdrs = &hash_cfg->addl_hdrs;
701 	u64 *hash_flds = &hash_cfg->hash_flds;
702 
703 	/* set outer layer RSS as default */
704 	hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
705 
706 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
707 		hash_cfg->symm = true;
708 	else
709 		hash_cfg->symm = false;
710 
711 	hf_list = ice_vc_hash_field_list;
712 	hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
713 	hdr_list = ice_vc_hdr_list;
714 	hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
715 
716 	for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
717 		struct virtchnl_proto_hdr *proto_hdr =
718 					&rss_cfg->proto_hdrs.proto_hdr[i];
719 		bool hdr_found = false;
720 		int j;
721 
722 		/* Find matched ice headers according to virtchnl headers. */
723 		for (j = 0; j < hdr_list_len; j++) {
724 			struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
725 
726 			if (proto_hdr->type == hdr_map.vc_hdr) {
727 				*addl_hdrs |= hdr_map.ice_hdr;
728 				hdr_found = true;
729 			}
730 		}
731 
732 		if (!hdr_found)
733 			return false;
734 
735 		/* Find matched ice hash fields according to
736 		 * virtchnl hash fields.
737 		 */
738 		for (j = 0; j < hf_list_len; j++) {
739 			struct ice_vc_hash_field_match_type hf_map = hf_list[j];
740 
741 			if (proto_hdr->type == hf_map.vc_hdr &&
742 			    proto_hdr->field_selector == hf_map.vc_hash_field) {
743 				*hash_flds |= hf_map.ice_hash_field;
744 				break;
745 			}
746 		}
747 	}
748 
749 	return true;
750 }
751 
752 /**
753  * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
754  * RSS offloads
755  * @caps: VF driver negotiated capabilities
756  *
757  * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
758  * else return false
759  */
ice_vf_adv_rss_offload_ena(u32 caps)760 static bool ice_vf_adv_rss_offload_ena(u32 caps)
761 {
762 	return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
763 }
764 
765 /**
766  * ice_vc_handle_rss_cfg
767  * @vf: pointer to the VF info
768  * @msg: pointer to the message buffer
769  * @add: add a RSS config if true, otherwise delete a RSS config
770  *
771  * This function adds/deletes a RSS config
772  */
ice_vc_handle_rss_cfg(struct ice_vf * vf,u8 * msg,bool add)773 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
774 {
775 	u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
776 	struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
777 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
778 	struct device *dev = ice_pf_to_dev(vf->pf);
779 	struct ice_hw *hw = &vf->pf->hw;
780 	struct ice_vsi *vsi;
781 
782 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
783 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
784 			vf->vf_id);
785 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
786 		goto error_param;
787 	}
788 
789 	if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
790 		dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
791 			vf->vf_id);
792 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
793 		goto error_param;
794 	}
795 
796 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
797 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
798 		goto error_param;
799 	}
800 
801 	if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
802 	    rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
803 	    rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
804 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
805 			vf->vf_id);
806 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
807 		goto error_param;
808 	}
809 
810 	vsi = ice_get_vf_vsi(vf);
811 	if (!vsi) {
812 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
813 		goto error_param;
814 	}
815 
816 	if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
817 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
818 		goto error_param;
819 	}
820 
821 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
822 		struct ice_vsi_ctx *ctx;
823 		u8 lut_type, hash_type;
824 		int status;
825 
826 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
827 		hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
828 				ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
829 
830 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
831 		if (!ctx) {
832 			v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
833 			goto error_param;
834 		}
835 
836 		ctx->info.q_opt_rss =
837 			FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
838 			FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
839 
840 		/* Preserve existing queueing option setting */
841 		ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
842 					  ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
843 		ctx->info.q_opt_tc = vsi->info.q_opt_tc;
844 		ctx->info.q_opt_flags = vsi->info.q_opt_rss;
845 
846 		ctx->info.valid_sections =
847 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
848 
849 		status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
850 		if (status) {
851 			dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
852 				status, ice_aq_str(hw->adminq.sq_last_status));
853 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
854 		} else {
855 			vsi->info.q_opt_rss = ctx->info.q_opt_rss;
856 		}
857 
858 		kfree(ctx);
859 	} else {
860 		struct ice_rss_hash_cfg cfg;
861 
862 		/* Only check for none raw pattern case */
863 		if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
864 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
865 			goto error_param;
866 		}
867 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
868 		cfg.hash_flds = ICE_HASH_INVALID;
869 		cfg.hdr_type = ICE_RSS_ANY_HEADERS;
870 
871 		if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
872 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
873 			goto error_param;
874 		}
875 
876 		if (add) {
877 			if (ice_add_rss_cfg(hw, vsi, &cfg)) {
878 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
879 				dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
880 					vsi->vsi_num, v_ret);
881 			}
882 		} else {
883 			int status;
884 
885 			status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
886 			/* We just ignore -ENOENT, because if two configurations
887 			 * share the same profile remove one of them actually
888 			 * removes both, since the profile is deleted.
889 			 */
890 			if (status && status != -ENOENT) {
891 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
892 				dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
893 					vf->vf_id, status);
894 			}
895 		}
896 	}
897 
898 error_param:
899 	return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
900 }
901 
902 /**
903  * ice_vc_config_rss_key
904  * @vf: pointer to the VF info
905  * @msg: pointer to the msg buffer
906  *
907  * Configure the VF's RSS key
908  */
ice_vc_config_rss_key(struct ice_vf * vf,u8 * msg)909 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
910 {
911 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
912 	struct virtchnl_rss_key *vrk =
913 		(struct virtchnl_rss_key *)msg;
914 	struct ice_vsi *vsi;
915 
916 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
917 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
918 		goto error_param;
919 	}
920 
921 	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
922 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
923 		goto error_param;
924 	}
925 
926 	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
927 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
928 		goto error_param;
929 	}
930 
931 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
932 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
933 		goto error_param;
934 	}
935 
936 	vsi = ice_get_vf_vsi(vf);
937 	if (!vsi) {
938 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
939 		goto error_param;
940 	}
941 
942 	if (ice_set_rss_key(vsi, vrk->key))
943 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
944 error_param:
945 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
946 				     NULL, 0);
947 }
948 
949 /**
950  * ice_vc_config_rss_lut
951  * @vf: pointer to the VF info
952  * @msg: pointer to the msg buffer
953  *
954  * Configure the VF's RSS LUT
955  */
ice_vc_config_rss_lut(struct ice_vf * vf,u8 * msg)956 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
957 {
958 	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
959 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
960 	struct ice_vsi *vsi;
961 
962 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
963 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
964 		goto error_param;
965 	}
966 
967 	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
968 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
969 		goto error_param;
970 	}
971 
972 	if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
973 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
974 		goto error_param;
975 	}
976 
977 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
978 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
979 		goto error_param;
980 	}
981 
982 	vsi = ice_get_vf_vsi(vf);
983 	if (!vsi) {
984 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
985 		goto error_param;
986 	}
987 
988 	if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
989 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
990 error_param:
991 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
992 				     NULL, 0);
993 }
994 
995 /**
996  * ice_vc_config_rss_hfunc
997  * @vf: pointer to the VF info
998  * @msg: pointer to the msg buffer
999  *
1000  * Configure the VF's RSS Hash function
1001  */
ice_vc_config_rss_hfunc(struct ice_vf * vf,u8 * msg)1002 static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
1003 {
1004 	struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
1005 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1006 	u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1007 	struct ice_vsi *vsi;
1008 
1009 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1010 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1011 		goto error_param;
1012 	}
1013 
1014 	if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
1015 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1016 		goto error_param;
1017 	}
1018 
1019 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1020 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1021 		goto error_param;
1022 	}
1023 
1024 	vsi = ice_get_vf_vsi(vf);
1025 	if (!vsi) {
1026 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1027 		goto error_param;
1028 	}
1029 
1030 	if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
1031 		hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
1032 
1033 	if (ice_set_rss_hfunc(vsi, hfunc))
1034 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1035 error_param:
1036 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
1037 				     NULL, 0);
1038 }
1039 
1040 /**
1041  * ice_vc_cfg_promiscuous_mode_msg
1042  * @vf: pointer to the VF info
1043  * @msg: pointer to the msg buffer
1044  *
1045  * called from the VF to configure VF VSIs promiscuous mode
1046  */
ice_vc_cfg_promiscuous_mode_msg(struct ice_vf * vf,u8 * msg)1047 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
1048 {
1049 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1050 	bool rm_promisc, alluni = false, allmulti = false;
1051 	struct virtchnl_promisc_info *info =
1052 	    (struct virtchnl_promisc_info *)msg;
1053 	struct ice_vsi_vlan_ops *vlan_ops;
1054 	int mcast_err = 0, ucast_err = 0;
1055 	struct ice_pf *pf = vf->pf;
1056 	struct ice_vsi *vsi;
1057 	u8 mcast_m, ucast_m;
1058 	struct device *dev;
1059 	int ret = 0;
1060 
1061 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1062 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1063 		goto error_param;
1064 	}
1065 
1066 	if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
1067 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1068 		goto error_param;
1069 	}
1070 
1071 	vsi = ice_get_vf_vsi(vf);
1072 	if (!vsi) {
1073 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1074 		goto error_param;
1075 	}
1076 
1077 	dev = ice_pf_to_dev(pf);
1078 	if (!ice_is_vf_trusted(vf)) {
1079 		dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1080 			vf->vf_id);
1081 		/* Leave v_ret alone, lie to the VF on purpose. */
1082 		goto error_param;
1083 	}
1084 
1085 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
1086 		alluni = true;
1087 
1088 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1089 		allmulti = true;
1090 
1091 	rm_promisc = !allmulti && !alluni;
1092 
1093 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1094 	if (rm_promisc)
1095 		ret = vlan_ops->ena_rx_filtering(vsi);
1096 	else
1097 		ret = vlan_ops->dis_rx_filtering(vsi);
1098 	if (ret) {
1099 		dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
1100 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1101 		goto error_param;
1102 	}
1103 
1104 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
1105 
1106 	if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
1107 		if (alluni) {
1108 			/* in this case we're turning on promiscuous mode */
1109 			ret = ice_set_dflt_vsi(vsi);
1110 		} else {
1111 			/* in this case we're turning off promiscuous mode */
1112 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
1113 				ret = ice_clear_dflt_vsi(vsi);
1114 		}
1115 
1116 		/* in this case we're turning on/off only
1117 		 * allmulticast
1118 		 */
1119 		if (allmulti)
1120 			mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
1121 		else
1122 			mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
1123 
1124 		if (ret) {
1125 			dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
1126 				vf->vf_id, ret);
1127 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1128 			goto error_param;
1129 		}
1130 	} else {
1131 		if (alluni)
1132 			ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
1133 		else
1134 			ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
1135 
1136 		if (allmulti)
1137 			mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
1138 		else
1139 			mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
1140 
1141 		if (ucast_err || mcast_err)
1142 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1143 	}
1144 
1145 	if (!mcast_err) {
1146 		if (allmulti &&
1147 		    !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
1148 			dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
1149 				 vf->vf_id);
1150 		else if (!allmulti &&
1151 			 test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
1152 					    vf->vf_states))
1153 			dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
1154 				 vf->vf_id);
1155 	} else {
1156 		dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
1157 			vf->vf_id, mcast_err);
1158 	}
1159 
1160 	if (!ucast_err) {
1161 		if (alluni &&
1162 		    !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
1163 			dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
1164 				 vf->vf_id);
1165 		else if (!alluni &&
1166 			 test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
1167 					    vf->vf_states))
1168 			dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
1169 				 vf->vf_id);
1170 	} else {
1171 		dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
1172 			vf->vf_id, ucast_err);
1173 	}
1174 
1175 error_param:
1176 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1177 				     v_ret, NULL, 0);
1178 }
1179 
1180 /**
1181  * ice_vc_get_stats_msg
1182  * @vf: pointer to the VF info
1183  * @msg: pointer to the msg buffer
1184  *
1185  * called from the VF to get VSI stats
1186  */
ice_vc_get_stats_msg(struct ice_vf * vf,u8 * msg)1187 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1188 {
1189 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1190 	struct virtchnl_queue_select *vqs =
1191 		(struct virtchnl_queue_select *)msg;
1192 	struct ice_eth_stats stats = { 0 };
1193 	struct ice_vsi *vsi;
1194 
1195 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1196 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1197 		goto error_param;
1198 	}
1199 
1200 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1201 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1202 		goto error_param;
1203 	}
1204 
1205 	vsi = ice_get_vf_vsi(vf);
1206 	if (!vsi) {
1207 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1208 		goto error_param;
1209 	}
1210 
1211 	ice_update_eth_stats(vsi);
1212 
1213 	stats = vsi->eth_stats;
1214 
1215 error_param:
1216 	/* send the response to the VF */
1217 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1218 				     (u8 *)&stats, sizeof(stats));
1219 }
1220 
1221 /**
1222  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
1223  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
1224  *
1225  * Return true on successful validation, else false
1226  */
ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)1227 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
1228 {
1229 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
1230 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
1231 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
1232 		return false;
1233 
1234 	return true;
1235 }
1236 
1237 /**
1238  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
1239  * @vsi: VSI of the VF to configure
1240  * @q_idx: VF queue index used to determine the queue in the PF's space
1241  */
ice_vf_ena_txq_interrupt(struct ice_vsi * vsi,u32 q_idx)1242 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1243 {
1244 	struct ice_hw *hw = &vsi->back->hw;
1245 	u32 pfq = vsi->txq_map[q_idx];
1246 	u32 reg;
1247 
1248 	reg = rd32(hw, QINT_TQCTL(pfq));
1249 
1250 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
1251 	 * this is most likely a poll mode VF driver, so don't enable an
1252 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1253 	 */
1254 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
1255 		return;
1256 
1257 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
1258 }
1259 
1260 /**
1261  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
1262  * @vsi: VSI of the VF to configure
1263  * @q_idx: VF queue index used to determine the queue in the PF's space
1264  */
ice_vf_ena_rxq_interrupt(struct ice_vsi * vsi,u32 q_idx)1265 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1266 {
1267 	struct ice_hw *hw = &vsi->back->hw;
1268 	u32 pfq = vsi->rxq_map[q_idx];
1269 	u32 reg;
1270 
1271 	reg = rd32(hw, QINT_RQCTL(pfq));
1272 
1273 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
1274 	 * this is most likely a poll mode VF driver, so don't enable an
1275 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1276 	 */
1277 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
1278 		return;
1279 
1280 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
1281 }
1282 
1283 /**
1284  * ice_vc_ena_qs_msg
1285  * @vf: pointer to the VF info
1286  * @msg: pointer to the msg buffer
1287  *
1288  * called from the VF to enable all or specific queue(s)
1289  */
ice_vc_ena_qs_msg(struct ice_vf * vf,u8 * msg)1290 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1291 {
1292 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1293 	struct virtchnl_queue_select *vqs =
1294 	    (struct virtchnl_queue_select *)msg;
1295 	struct ice_vsi *vsi;
1296 	unsigned long q_map;
1297 	u16 vf_q_id;
1298 
1299 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1300 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1301 		goto error_param;
1302 	}
1303 
1304 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1305 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1306 		goto error_param;
1307 	}
1308 
1309 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1310 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1311 		goto error_param;
1312 	}
1313 
1314 	vsi = ice_get_vf_vsi(vf);
1315 	if (!vsi) {
1316 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1317 		goto error_param;
1318 	}
1319 
1320 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
1321 	 * Tx queue group list was configured and the context bits were
1322 	 * programmed using ice_vsi_cfg_txqs
1323 	 */
1324 	q_map = vqs->rx_queues;
1325 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1326 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1327 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1328 			goto error_param;
1329 		}
1330 
1331 		/* Skip queue if enabled */
1332 		if (test_bit(vf_q_id, vf->rxq_ena))
1333 			continue;
1334 
1335 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
1336 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
1337 				vf_q_id, vsi->vsi_num);
1338 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1339 			goto error_param;
1340 		}
1341 
1342 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
1343 		set_bit(vf_q_id, vf->rxq_ena);
1344 	}
1345 
1346 	q_map = vqs->tx_queues;
1347 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1348 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1349 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1350 			goto error_param;
1351 		}
1352 
1353 		/* Skip queue if enabled */
1354 		if (test_bit(vf_q_id, vf->txq_ena))
1355 			continue;
1356 
1357 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
1358 		set_bit(vf_q_id, vf->txq_ena);
1359 	}
1360 
1361 	/* Set flag to indicate that queues are enabled */
1362 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1363 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1364 
1365 error_param:
1366 	/* send the response to the VF */
1367 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1368 				     NULL, 0);
1369 }
1370 
1371 /**
1372  * ice_vf_vsi_dis_single_txq - disable a single Tx queue
1373  * @vf: VF to disable queue for
1374  * @vsi: VSI for the VF
1375  * @q_id: VF relative (0-based) queue ID
1376  *
1377  * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
1378  * disabled then clear q_id bit in the enabled queues bitmap and return
1379  * success. Otherwise return error.
1380  */
1381 static int
ice_vf_vsi_dis_single_txq(struct ice_vf * vf,struct ice_vsi * vsi,u16 q_id)1382 ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
1383 {
1384 	struct ice_txq_meta txq_meta = { 0 };
1385 	struct ice_tx_ring *ring;
1386 	int err;
1387 
1388 	if (!test_bit(q_id, vf->txq_ena))
1389 		dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
1390 			q_id, vsi->vsi_num);
1391 
1392 	ring = vsi->tx_rings[q_id];
1393 	if (!ring)
1394 		return -EINVAL;
1395 
1396 	ice_fill_txq_meta(vsi, ring, &txq_meta);
1397 
1398 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
1399 	if (err) {
1400 		dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
1401 			q_id, vsi->vsi_num);
1402 		return err;
1403 	}
1404 
1405 	/* Clear enabled queues flag */
1406 	clear_bit(q_id, vf->txq_ena);
1407 
1408 	return 0;
1409 }
1410 
1411 /**
1412  * ice_vc_dis_qs_msg
1413  * @vf: pointer to the VF info
1414  * @msg: pointer to the msg buffer
1415  *
1416  * called from the VF to disable all or specific queue(s)
1417  */
ice_vc_dis_qs_msg(struct ice_vf * vf,u8 * msg)1418 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1419 {
1420 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1421 	struct virtchnl_queue_select *vqs =
1422 	    (struct virtchnl_queue_select *)msg;
1423 	struct ice_vsi *vsi;
1424 	unsigned long q_map;
1425 	u16 vf_q_id;
1426 
1427 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
1428 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
1429 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1430 		goto error_param;
1431 	}
1432 
1433 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1434 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1435 		goto error_param;
1436 	}
1437 
1438 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1439 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1440 		goto error_param;
1441 	}
1442 
1443 	vsi = ice_get_vf_vsi(vf);
1444 	if (!vsi) {
1445 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1446 		goto error_param;
1447 	}
1448 
1449 	if (vqs->tx_queues) {
1450 		q_map = vqs->tx_queues;
1451 
1452 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1453 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1454 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1455 				goto error_param;
1456 			}
1457 
1458 			if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
1459 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1460 				goto error_param;
1461 			}
1462 		}
1463 	}
1464 
1465 	q_map = vqs->rx_queues;
1466 	/* speed up Rx queue disable by batching them if possible */
1467 	if (q_map &&
1468 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
1469 		if (ice_vsi_stop_all_rx_rings(vsi)) {
1470 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
1471 				vsi->vsi_num);
1472 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1473 			goto error_param;
1474 		}
1475 
1476 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
1477 	} else if (q_map) {
1478 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1479 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1480 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1481 				goto error_param;
1482 			}
1483 
1484 			/* Skip queue if not enabled */
1485 			if (!test_bit(vf_q_id, vf->rxq_ena))
1486 				continue;
1487 
1488 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
1489 						     true)) {
1490 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
1491 					vf_q_id, vsi->vsi_num);
1492 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1493 				goto error_param;
1494 			}
1495 
1496 			/* Clear enabled queues flag */
1497 			clear_bit(vf_q_id, vf->rxq_ena);
1498 		}
1499 	}
1500 
1501 	/* Clear enabled queues flag */
1502 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
1503 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1504 
1505 error_param:
1506 	/* send the response to the VF */
1507 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1508 				     NULL, 0);
1509 }
1510 
1511 /**
1512  * ice_cfg_interrupt
1513  * @vf: pointer to the VF info
1514  * @vsi: the VSI being configured
1515  * @vector_id: vector ID
1516  * @map: vector map for mapping vectors to queues
1517  * @q_vector: structure for interrupt vector
1518  * configure the IRQ to queue map
1519  */
1520 static int
ice_cfg_interrupt(struct ice_vf * vf,struct ice_vsi * vsi,u16 vector_id,struct virtchnl_vector_map * map,struct ice_q_vector * q_vector)1521 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
1522 		  struct virtchnl_vector_map *map,
1523 		  struct ice_q_vector *q_vector)
1524 {
1525 	u16 vsi_q_id, vsi_q_id_idx;
1526 	unsigned long qmap;
1527 
1528 	q_vector->num_ring_rx = 0;
1529 	q_vector->num_ring_tx = 0;
1530 
1531 	qmap = map->rxq_map;
1532 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1533 		vsi_q_id = vsi_q_id_idx;
1534 
1535 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1536 			return VIRTCHNL_STATUS_ERR_PARAM;
1537 
1538 		q_vector->num_ring_rx++;
1539 		q_vector->rx.itr_idx = map->rxitr_idx;
1540 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
1541 		ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
1542 				      q_vector->rx.itr_idx);
1543 	}
1544 
1545 	qmap = map->txq_map;
1546 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1547 		vsi_q_id = vsi_q_id_idx;
1548 
1549 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1550 			return VIRTCHNL_STATUS_ERR_PARAM;
1551 
1552 		q_vector->num_ring_tx++;
1553 		q_vector->tx.itr_idx = map->txitr_idx;
1554 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
1555 		ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
1556 				      q_vector->tx.itr_idx);
1557 	}
1558 
1559 	return VIRTCHNL_STATUS_SUCCESS;
1560 }
1561 
1562 /**
1563  * ice_vc_cfg_irq_map_msg
1564  * @vf: pointer to the VF info
1565  * @msg: pointer to the msg buffer
1566  *
1567  * called from the VF to configure the IRQ to queue map
1568  */
ice_vc_cfg_irq_map_msg(struct ice_vf * vf,u8 * msg)1569 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1570 {
1571 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1572 	u16 num_q_vectors_mapped, vsi_id, vector_id;
1573 	struct virtchnl_irq_map_info *irqmap_info;
1574 	struct virtchnl_vector_map *map;
1575 	struct ice_vsi *vsi;
1576 	int i;
1577 
1578 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
1579 	num_q_vectors_mapped = irqmap_info->num_vectors;
1580 
1581 	/* Check to make sure number of VF vectors mapped is not greater than
1582 	 * number of VF vectors originally allocated, and check that
1583 	 * there is actually at least a single VF queue vector mapped
1584 	 */
1585 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1586 	    vf->num_msix < num_q_vectors_mapped ||
1587 	    !num_q_vectors_mapped) {
1588 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1589 		goto error_param;
1590 	}
1591 
1592 	vsi = ice_get_vf_vsi(vf);
1593 	if (!vsi) {
1594 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1595 		goto error_param;
1596 	}
1597 
1598 	for (i = 0; i < num_q_vectors_mapped; i++) {
1599 		struct ice_q_vector *q_vector;
1600 
1601 		map = &irqmap_info->vecmap[i];
1602 
1603 		vector_id = map->vector_id;
1604 		vsi_id = map->vsi_id;
1605 		/* vector_id is always 0-based for each VF, and can never be
1606 		 * larger than or equal to the max allowed interrupts per VF
1607 		 */
1608 		if (!(vector_id < vf->num_msix) ||
1609 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
1610 		    (!vector_id && (map->rxq_map || map->txq_map))) {
1611 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1612 			goto error_param;
1613 		}
1614 
1615 		/* No need to map VF miscellaneous or rogue vector */
1616 		if (!vector_id)
1617 			continue;
1618 
1619 		/* Subtract non queue vector from vector_id passed by VF
1620 		 * to get actual number of VSI queue vector array index
1621 		 */
1622 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
1623 		if (!q_vector) {
1624 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1625 			goto error_param;
1626 		}
1627 
1628 		/* lookout for the invalid queue index */
1629 		v_ret = (enum virtchnl_status_code)
1630 			ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
1631 		if (v_ret)
1632 			goto error_param;
1633 	}
1634 
1635 error_param:
1636 	/* send the response to the VF */
1637 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1638 				     NULL, 0);
1639 }
1640 
1641 /**
1642  * ice_vc_cfg_qs_msg
1643  * @vf: pointer to the VF info
1644  * @msg: pointer to the msg buffer
1645  *
1646  * called from the VF to configure the Rx/Tx queues
1647  */
ice_vc_cfg_qs_msg(struct ice_vf * vf,u8 * msg)1648 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
1649 {
1650 	struct virtchnl_vsi_queue_config_info *qci =
1651 	    (struct virtchnl_vsi_queue_config_info *)msg;
1652 	struct virtchnl_queue_pair_info *qpi;
1653 	struct ice_pf *pf = vf->pf;
1654 	struct ice_lag *lag;
1655 	struct ice_vsi *vsi;
1656 	u8 act_prt, pri_prt;
1657 	int i = -1, q_idx;
1658 
1659 	lag = pf->lag;
1660 	mutex_lock(&pf->lag_mutex);
1661 	act_prt = ICE_LAG_INVALID_PORT;
1662 	pri_prt = pf->hw.port_info->lport;
1663 	if (lag && lag->bonded && lag->primary) {
1664 		act_prt = lag->active_port;
1665 		if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
1666 		    lag->upper_netdev)
1667 			ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
1668 		else
1669 			act_prt = ICE_LAG_INVALID_PORT;
1670 	}
1671 
1672 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1673 		goto error_param;
1674 
1675 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
1676 		goto error_param;
1677 
1678 	vsi = ice_get_vf_vsi(vf);
1679 	if (!vsi)
1680 		goto error_param;
1681 
1682 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
1683 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
1684 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
1685 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
1686 		goto error_param;
1687 	}
1688 
1689 	for (i = 0; i < qci->num_queue_pairs; i++) {
1690 		if (!qci->qpair[i].rxq.crc_disable)
1691 			continue;
1692 
1693 		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
1694 		    vf->vlan_strip_ena)
1695 			goto error_param;
1696 	}
1697 
1698 	for (i = 0; i < qci->num_queue_pairs; i++) {
1699 		qpi = &qci->qpair[i];
1700 		if (qpi->txq.vsi_id != qci->vsi_id ||
1701 		    qpi->rxq.vsi_id != qci->vsi_id ||
1702 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
1703 		    qpi->txq.headwb_enabled ||
1704 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
1705 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1706 		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
1707 			goto error_param;
1708 		}
1709 
1710 		q_idx = qpi->rxq.queue_id;
1711 
1712 		/* make sure selected "q_idx" is in valid range of queues
1713 		 * for selected "vsi"
1714 		 */
1715 		if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
1716 			goto error_param;
1717 		}
1718 
1719 		/* copy Tx queue info from VF into VSI */
1720 		if (qpi->txq.ring_len > 0) {
1721 			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
1722 			vsi->tx_rings[i]->count = qpi->txq.ring_len;
1723 
1724 			/* Disable any existing queue first */
1725 			if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
1726 				goto error_param;
1727 
1728 			/* Configure a queue with the requested settings */
1729 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
1730 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
1731 					 vf->vf_id, i);
1732 				goto error_param;
1733 			}
1734 		}
1735 
1736 		/* copy Rx queue info from VF into VSI */
1737 		if (qpi->rxq.ring_len > 0) {
1738 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
1739 			u32 rxdid;
1740 
1741 			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
1742 			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
1743 
1744 			if (qpi->rxq.crc_disable)
1745 				vsi->rx_rings[q_idx]->flags |=
1746 					ICE_RX_FLAGS_CRC_STRIP_DIS;
1747 			else
1748 				vsi->rx_rings[q_idx]->flags &=
1749 					~ICE_RX_FLAGS_CRC_STRIP_DIS;
1750 
1751 			if (qpi->rxq.databuffer_size != 0 &&
1752 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
1753 			     qpi->rxq.databuffer_size < 1024))
1754 				goto error_param;
1755 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
1756 			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
1757 			if (qpi->rxq.max_pkt_size > max_frame_size ||
1758 			    qpi->rxq.max_pkt_size < 64)
1759 				goto error_param;
1760 
1761 			vsi->max_frame = qpi->rxq.max_pkt_size;
1762 			/* add space for the port VLAN since the VF driver is
1763 			 * not expected to account for it in the MTU
1764 			 * calculation
1765 			 */
1766 			if (ice_vf_is_port_vlan_ena(vf))
1767 				vsi->max_frame += VLAN_HLEN;
1768 
1769 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
1770 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
1771 					 vf->vf_id, i);
1772 				goto error_param;
1773 			}
1774 
1775 			/* If Rx flex desc is supported, select RXDID for Rx
1776 			 * queues. Otherwise, use legacy 32byte descriptor
1777 			 * format. Legacy 16byte descriptor is not supported.
1778 			 * If this RXDID is selected, return error.
1779 			 */
1780 			if (vf->driver_caps &
1781 			    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
1782 				rxdid = qpi->rxq.rxdid;
1783 				if (!(BIT(rxdid) & pf->supported_rxdids))
1784 					goto error_param;
1785 			} else {
1786 				rxdid = ICE_RXDID_LEGACY_1;
1787 			}
1788 
1789 			ice_write_qrxflxp_cntxt(&vsi->back->hw,
1790 						vsi->rxq_map[q_idx],
1791 						rxdid, 0x03, false);
1792 		}
1793 	}
1794 
1795 	if (lag && lag->bonded && lag->primary &&
1796 	    act_prt != ICE_LAG_INVALID_PORT)
1797 		ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
1798 	mutex_unlock(&pf->lag_mutex);
1799 
1800 	/* send the response to the VF */
1801 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1802 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
1803 error_param:
1804 	/* disable whatever we can */
1805 	for (; i >= 0; i--) {
1806 		if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
1807 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
1808 				vf->vf_id, i);
1809 		if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
1810 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
1811 				vf->vf_id, i);
1812 	}
1813 
1814 	if (lag && lag->bonded && lag->primary &&
1815 	    act_prt != ICE_LAG_INVALID_PORT)
1816 		ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
1817 	mutex_unlock(&pf->lag_mutex);
1818 
1819 	ice_lag_move_new_vf_nodes(vf);
1820 
1821 	/* send the response to the VF */
1822 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1823 				     VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
1824 }
1825 
1826 /**
1827  * ice_can_vf_change_mac
1828  * @vf: pointer to the VF info
1829  *
1830  * Return true if the VF is allowed to change its MAC filters, false otherwise
1831  */
ice_can_vf_change_mac(struct ice_vf * vf)1832 static bool ice_can_vf_change_mac(struct ice_vf *vf)
1833 {
1834 	/* If the VF MAC address has been set administratively (via the
1835 	 * ndo_set_vf_mac command), then deny permission to the VF to
1836 	 * add/delete unicast MAC addresses, unless the VF is trusted
1837 	 */
1838 	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
1839 		return false;
1840 
1841 	return true;
1842 }
1843 
1844 /**
1845  * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
1846  * @vc_ether_addr: used to extract the type
1847  */
1848 static u8
ice_vc_ether_addr_type(struct virtchnl_ether_addr * vc_ether_addr)1849 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
1850 {
1851 	return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
1852 }
1853 
1854 /**
1855  * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
1856  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1857  */
1858 static bool
ice_is_vc_addr_legacy(struct virtchnl_ether_addr * vc_ether_addr)1859 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
1860 {
1861 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1862 
1863 	return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
1864 }
1865 
1866 /**
1867  * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
1868  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1869  *
1870  * This function should only be called when the MAC address in
1871  * virtchnl_ether_addr is a valid unicast MAC
1872  */
1873 static bool
ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused * vc_ether_addr)1874 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
1875 {
1876 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1877 
1878 	return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
1879 }
1880 
1881 /**
1882  * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
1883  * @vf: VF to update
1884  * @vc_ether_addr: structure from VIRTCHNL with MAC to add
1885  */
1886 static void
ice_vfhw_mac_add(struct ice_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)1887 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1888 {
1889 	u8 *mac_addr = vc_ether_addr->addr;
1890 
1891 	if (!is_valid_ether_addr(mac_addr))
1892 		return;
1893 
1894 	/* only allow legacy VF drivers to set the device and hardware MAC if it
1895 	 * is zero and allow new VF drivers to set the hardware MAC if the type
1896 	 * was correctly specified over VIRTCHNL
1897 	 */
1898 	if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
1899 	     is_zero_ether_addr(vf->hw_lan_addr)) ||
1900 	    ice_is_vc_addr_primary(vc_ether_addr)) {
1901 		ether_addr_copy(vf->dev_lan_addr, mac_addr);
1902 		ether_addr_copy(vf->hw_lan_addr, mac_addr);
1903 	}
1904 
1905 	/* hardware and device MACs are already set, but its possible that the
1906 	 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
1907 	 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
1908 	 * away for the legacy VF driver case as it will be updated in the
1909 	 * delete flow for this case
1910 	 */
1911 	if (ice_is_vc_addr_legacy(vc_ether_addr)) {
1912 		ether_addr_copy(vf->legacy_last_added_umac.addr,
1913 				mac_addr);
1914 		vf->legacy_last_added_umac.time_modified = jiffies;
1915 	}
1916 }
1917 
1918 /**
1919  * ice_vc_add_mac_addr - attempt to add the MAC address passed in
1920  * @vf: pointer to the VF info
1921  * @vsi: pointer to the VF's VSI
1922  * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
1923  */
1924 static int
ice_vc_add_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_ether_addr * vc_ether_addr)1925 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1926 		    struct virtchnl_ether_addr *vc_ether_addr)
1927 {
1928 	struct device *dev = ice_pf_to_dev(vf->pf);
1929 	u8 *mac_addr = vc_ether_addr->addr;
1930 	int ret;
1931 
1932 	/* device MAC already added */
1933 	if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
1934 		return 0;
1935 
1936 	if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
1937 		dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
1938 		return -EPERM;
1939 	}
1940 
1941 	ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1942 	if (ret == -EEXIST) {
1943 		dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
1944 			vf->vf_id);
1945 		/* don't return since we might need to update
1946 		 * the primary MAC in ice_vfhw_mac_add() below
1947 		 */
1948 	} else if (ret) {
1949 		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
1950 			mac_addr, vf->vf_id, ret);
1951 		return ret;
1952 	} else {
1953 		vf->num_mac++;
1954 	}
1955 
1956 	ice_vfhw_mac_add(vf, vc_ether_addr);
1957 
1958 	return ret;
1959 }
1960 
1961 /**
1962  * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
1963  * @last_added_umac: structure used to check expiration
1964  */
ice_is_legacy_umac_expired(struct ice_time_mac * last_added_umac)1965 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
1966 {
1967 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME	msecs_to_jiffies(3000)
1968 	return time_is_before_jiffies(last_added_umac->time_modified +
1969 				      ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
1970 }
1971 
1972 /**
1973  * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
1974  * @vf: VF to update
1975  * @vc_ether_addr: structure from VIRTCHNL with MAC to check
1976  *
1977  * only update cached hardware MAC for legacy VF drivers on delete
1978  * because we cannot guarantee order/type of MAC from the VF driver
1979  */
1980 static void
ice_update_legacy_cached_mac(struct ice_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)1981 ice_update_legacy_cached_mac(struct ice_vf *vf,
1982 			     struct virtchnl_ether_addr *vc_ether_addr)
1983 {
1984 	if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
1985 	    ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
1986 		return;
1987 
1988 	ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr);
1989 	ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);
1990 }
1991 
1992 /**
1993  * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
1994  * @vf: VF to update
1995  * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
1996  */
1997 static void
ice_vfhw_mac_del(struct ice_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)1998 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1999 {
2000 	u8 *mac_addr = vc_ether_addr->addr;
2001 
2002 	if (!is_valid_ether_addr(mac_addr) ||
2003 	    !ether_addr_equal(vf->dev_lan_addr, mac_addr))
2004 		return;
2005 
2006 	/* allow the device MAC to be repopulated in the add flow and don't
2007 	 * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant
2008 	 * to be persistent on VM reboot and across driver unload/load, which
2009 	 * won't work if we clear the hardware MAC here
2010 	 */
2011 	eth_zero_addr(vf->dev_lan_addr);
2012 
2013 	ice_update_legacy_cached_mac(vf, vc_ether_addr);
2014 }
2015 
2016 /**
2017  * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
2018  * @vf: pointer to the VF info
2019  * @vsi: pointer to the VF's VSI
2020  * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
2021  */
2022 static int
ice_vc_del_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_ether_addr * vc_ether_addr)2023 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
2024 		    struct virtchnl_ether_addr *vc_ether_addr)
2025 {
2026 	struct device *dev = ice_pf_to_dev(vf->pf);
2027 	u8 *mac_addr = vc_ether_addr->addr;
2028 	int status;
2029 
2030 	if (!ice_can_vf_change_mac(vf) &&
2031 	    ether_addr_equal(vf->dev_lan_addr, mac_addr))
2032 		return 0;
2033 
2034 	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
2035 	if (status == -ENOENT) {
2036 		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
2037 			vf->vf_id);
2038 		return -ENOENT;
2039 	} else if (status) {
2040 		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
2041 			mac_addr, vf->vf_id, status);
2042 		return -EIO;
2043 	}
2044 
2045 	ice_vfhw_mac_del(vf, vc_ether_addr);
2046 
2047 	vf->num_mac--;
2048 
2049 	return 0;
2050 }
2051 
2052 /**
2053  * ice_vc_handle_mac_addr_msg
2054  * @vf: pointer to the VF info
2055  * @msg: pointer to the msg buffer
2056  * @set: true if MAC filters are being set, false otherwise
2057  *
2058  * add guest MAC address filter
2059  */
2060 static int
ice_vc_handle_mac_addr_msg(struct ice_vf * vf,u8 * msg,bool set)2061 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2062 {
2063 	int (*ice_vc_cfg_mac)
2064 		(struct ice_vf *vf, struct ice_vsi *vsi,
2065 		 struct virtchnl_ether_addr *virtchnl_ether_addr);
2066 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2067 	struct virtchnl_ether_addr_list *al =
2068 	    (struct virtchnl_ether_addr_list *)msg;
2069 	struct ice_pf *pf = vf->pf;
2070 	enum virtchnl_ops vc_op;
2071 	struct ice_vsi *vsi;
2072 	int i;
2073 
2074 	if (set) {
2075 		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2076 		ice_vc_cfg_mac = ice_vc_add_mac_addr;
2077 	} else {
2078 		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
2079 		ice_vc_cfg_mac = ice_vc_del_mac_addr;
2080 	}
2081 
2082 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2083 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2084 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2085 		goto handle_mac_exit;
2086 	}
2087 
2088 	/* If this VF is not privileged, then we can't add more than a
2089 	 * limited number of addresses. Check to make sure that the
2090 	 * additions do not push us over the limit.
2091 	 */
2092 	if (set && !ice_is_vf_trusted(vf) &&
2093 	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2094 		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2095 			vf->vf_id);
2096 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2097 		goto handle_mac_exit;
2098 	}
2099 
2100 	vsi = ice_get_vf_vsi(vf);
2101 	if (!vsi) {
2102 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2103 		goto handle_mac_exit;
2104 	}
2105 
2106 	for (i = 0; i < al->num_elements; i++) {
2107 		u8 *mac_addr = al->list[i].addr;
2108 		int result;
2109 
2110 		if (is_broadcast_ether_addr(mac_addr) ||
2111 		    is_zero_ether_addr(mac_addr))
2112 			continue;
2113 
2114 		result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
2115 		if (result == -EEXIST || result == -ENOENT) {
2116 			continue;
2117 		} else if (result) {
2118 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2119 			goto handle_mac_exit;
2120 		}
2121 	}
2122 
2123 handle_mac_exit:
2124 	/* send the response to the VF */
2125 	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2126 }
2127 
2128 /**
2129  * ice_vc_add_mac_addr_msg
2130  * @vf: pointer to the VF info
2131  * @msg: pointer to the msg buffer
2132  *
2133  * add guest MAC address filter
2134  */
ice_vc_add_mac_addr_msg(struct ice_vf * vf,u8 * msg)2135 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2136 {
2137 	return ice_vc_handle_mac_addr_msg(vf, msg, true);
2138 }
2139 
2140 /**
2141  * ice_vc_del_mac_addr_msg
2142  * @vf: pointer to the VF info
2143  * @msg: pointer to the msg buffer
2144  *
2145  * remove guest MAC address filter
2146  */
ice_vc_del_mac_addr_msg(struct ice_vf * vf,u8 * msg)2147 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2148 {
2149 	return ice_vc_handle_mac_addr_msg(vf, msg, false);
2150 }
2151 
2152 /**
2153  * ice_vc_request_qs_msg
2154  * @vf: pointer to the VF info
2155  * @msg: pointer to the msg buffer
2156  *
2157  * VFs get a default number of queues but can use this message to request a
2158  * different number. If the request is successful, PF will reset the VF and
2159  * return 0. If unsuccessful, PF will send message informing VF of number of
2160  * available queue pairs via virtchnl message response to VF.
2161  */
ice_vc_request_qs_msg(struct ice_vf * vf,u8 * msg)2162 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2163 {
2164 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2165 	struct virtchnl_vf_res_request *vfres =
2166 		(struct virtchnl_vf_res_request *)msg;
2167 	u16 req_queues = vfres->num_queue_pairs;
2168 	struct ice_pf *pf = vf->pf;
2169 	u16 max_allowed_vf_queues;
2170 	u16 tx_rx_queue_left;
2171 	struct device *dev;
2172 	u16 cur_queues;
2173 
2174 	dev = ice_pf_to_dev(pf);
2175 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2176 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2177 		goto error_param;
2178 	}
2179 
2180 	cur_queues = vf->num_vf_qs;
2181 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2182 				 ice_get_avail_rxq_count(pf));
2183 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2184 	if (!req_queues) {
2185 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
2186 			vf->vf_id);
2187 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
2188 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
2189 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
2190 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
2191 	} else if (req_queues > cur_queues &&
2192 		   req_queues - cur_queues > tx_rx_queue_left) {
2193 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
2194 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2195 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2196 					       ICE_MAX_RSS_QS_PER_VF);
2197 	} else {
2198 		/* request is successful, then reset VF */
2199 		vf->num_req_qs = req_queues;
2200 		ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
2201 		dev_info(dev, "VF %d granted request of %u queues.\n",
2202 			 vf->vf_id, req_queues);
2203 		return 0;
2204 	}
2205 
2206 error_param:
2207 	/* send the response to the VF */
2208 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2209 				     v_ret, (u8 *)vfres, sizeof(*vfres));
2210 }
2211 
2212 /**
2213  * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2214  * @caps: VF driver negotiated capabilities
2215  *
2216  * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2217  */
ice_vf_vlan_offload_ena(u32 caps)2218 static bool ice_vf_vlan_offload_ena(u32 caps)
2219 {
2220 	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
2221 }
2222 
2223 /**
2224  * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
2225  * @vf: VF used to determine if VLAN promiscuous config is allowed
2226  */
ice_is_vlan_promisc_allowed(struct ice_vf * vf)2227 static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
2228 {
2229 	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2230 	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
2231 	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
2232 		return true;
2233 
2234 	return false;
2235 }
2236 
2237 /**
2238  * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
2239  * @vsi: VF's VSI used to enable VLAN promiscuous mode
2240  * @vlan: VLAN used to enable VLAN promiscuous
2241  *
2242  * This function should only be called if VLAN promiscuous mode is allowed,
2243  * which can be determined via ice_is_vlan_promisc_allowed().
2244  */
ice_vf_ena_vlan_promisc(struct ice_vsi * vsi,struct ice_vlan * vlan)2245 static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2246 {
2247 	u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2248 	int status;
2249 
2250 	status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2251 					  vlan->vid);
2252 	if (status && status != -EEXIST)
2253 		return status;
2254 
2255 	return 0;
2256 }
2257 
2258 /**
2259  * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
2260  * @vsi: VF's VSI used to disable VLAN promiscuous mode for
2261  * @vlan: VLAN used to disable VLAN promiscuous
2262  *
2263  * This function should only be called if VLAN promiscuous mode is allowed,
2264  * which can be determined via ice_is_vlan_promisc_allowed().
2265  */
ice_vf_dis_vlan_promisc(struct ice_vsi * vsi,struct ice_vlan * vlan)2266 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2267 {
2268 	u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2269 	int status;
2270 
2271 	status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2272 					    vlan->vid);
2273 	if (status && status != -ENOENT)
2274 		return status;
2275 
2276 	return 0;
2277 }
2278 
2279 /**
2280  * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
2281  * @vf: VF to check against
2282  * @vsi: VF's VSI
2283  *
2284  * If the VF is trusted then the VF is allowed to add as many VLANs as it
2285  * wants to, so return false.
2286  *
2287  * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
2288  * allowed VLANs for an untrusted VF. Return the result of this comparison.
2289  */
ice_vf_has_max_vlans(struct ice_vf * vf,struct ice_vsi * vsi)2290 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
2291 {
2292 	if (ice_is_vf_trusted(vf))
2293 		return false;
2294 
2295 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS	1
2296 	return ((ice_vsi_num_non_zero_vlans(vsi) +
2297 		ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
2298 }
2299 
2300 /**
2301  * ice_vc_process_vlan_msg
2302  * @vf: pointer to the VF info
2303  * @msg: pointer to the msg buffer
2304  * @add_v: Add VLAN if true, otherwise delete VLAN
2305  *
2306  * Process virtchnl op to add or remove programmed guest VLAN ID
2307  */
ice_vc_process_vlan_msg(struct ice_vf * vf,u8 * msg,bool add_v)2308 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2309 {
2310 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2311 	struct virtchnl_vlan_filter_list *vfl =
2312 	    (struct virtchnl_vlan_filter_list *)msg;
2313 	struct ice_pf *pf = vf->pf;
2314 	bool vlan_promisc = false;
2315 	struct ice_vsi *vsi;
2316 	struct device *dev;
2317 	int status = 0;
2318 	int i;
2319 
2320 	dev = ice_pf_to_dev(pf);
2321 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2322 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2323 		goto error_param;
2324 	}
2325 
2326 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2327 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2328 		goto error_param;
2329 	}
2330 
2331 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2332 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2333 		goto error_param;
2334 	}
2335 
2336 	for (i = 0; i < vfl->num_elements; i++) {
2337 		if (vfl->vlan_id[i] >= VLAN_N_VID) {
2338 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2339 			dev_err(dev, "invalid VF VLAN id %d\n",
2340 				vfl->vlan_id[i]);
2341 			goto error_param;
2342 		}
2343 	}
2344 
2345 	vsi = ice_get_vf_vsi(vf);
2346 	if (!vsi) {
2347 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2348 		goto error_param;
2349 	}
2350 
2351 	if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
2352 		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2353 			 vf->vf_id);
2354 		/* There is no need to let VF know about being not trusted,
2355 		 * so we can just return success message here
2356 		 */
2357 		goto error_param;
2358 	}
2359 
2360 	/* in DVM a VF can add/delete inner VLAN filters when
2361 	 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
2362 	 */
2363 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
2364 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2365 		goto error_param;
2366 	}
2367 
2368 	/* in DVM VLAN promiscuous is based on the outer VLAN, which would be
2369 	 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
2370 	 * allow vlan_promisc = true in SVM and if no port VLAN is configured
2371 	 */
2372 	vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
2373 		!ice_is_dvm_ena(&pf->hw) &&
2374 		!ice_vf_is_port_vlan_ena(vf);
2375 
2376 	if (add_v) {
2377 		for (i = 0; i < vfl->num_elements; i++) {
2378 			u16 vid = vfl->vlan_id[i];
2379 			struct ice_vlan vlan;
2380 
2381 			if (ice_vf_has_max_vlans(vf, vsi)) {
2382 				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2383 					 vf->vf_id);
2384 				/* There is no need to let VF know about being
2385 				 * not trusted, so we can just return success
2386 				 * message here as well.
2387 				 */
2388 				goto error_param;
2389 			}
2390 
2391 			/* we add VLAN 0 by default for each VF so we can enable
2392 			 * Tx VLAN anti-spoof without triggering MDD events so
2393 			 * we don't need to add it again here
2394 			 */
2395 			if (!vid)
2396 				continue;
2397 
2398 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2399 			status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
2400 			if (status) {
2401 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2402 				goto error_param;
2403 			}
2404 
2405 			/* Enable VLAN filtering on first non-zero VLAN */
2406 			if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
2407 				if (vf->spoofchk) {
2408 					status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
2409 					if (status) {
2410 						v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2411 						dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
2412 							vid, status);
2413 						goto error_param;
2414 					}
2415 				}
2416 				if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
2417 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2418 					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2419 						vid, status);
2420 					goto error_param;
2421 				}
2422 			} else if (vlan_promisc) {
2423 				status = ice_vf_ena_vlan_promisc(vsi, &vlan);
2424 				if (status) {
2425 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2426 					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2427 						vid, status);
2428 				}
2429 			}
2430 		}
2431 	} else {
2432 		/* In case of non_trusted VF, number of VLAN elements passed
2433 		 * to PF for removal might be greater than number of VLANs
2434 		 * filter programmed for that VF - So, use actual number of
2435 		 * VLANS added earlier with add VLAN opcode. In order to avoid
2436 		 * removing VLAN that doesn't exist, which result to sending
2437 		 * erroneous failed message back to the VF
2438 		 */
2439 		int num_vf_vlan;
2440 
2441 		num_vf_vlan = vsi->num_vlan;
2442 		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2443 			u16 vid = vfl->vlan_id[i];
2444 			struct ice_vlan vlan;
2445 
2446 			/* we add VLAN 0 by default for each VF so we can enable
2447 			 * Tx VLAN anti-spoof without triggering MDD events so
2448 			 * we don't want a VIRTCHNL request to remove it
2449 			 */
2450 			if (!vid)
2451 				continue;
2452 
2453 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2454 			status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
2455 			if (status) {
2456 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2457 				goto error_param;
2458 			}
2459 
2460 			/* Disable VLAN filtering when only VLAN 0 is left */
2461 			if (!ice_vsi_has_non_zero_vlans(vsi)) {
2462 				vsi->inner_vlan_ops.dis_tx_filtering(vsi);
2463 				vsi->inner_vlan_ops.dis_rx_filtering(vsi);
2464 			}
2465 
2466 			if (vlan_promisc)
2467 				ice_vf_dis_vlan_promisc(vsi, &vlan);
2468 		}
2469 	}
2470 
2471 error_param:
2472 	/* send the response to the VF */
2473 	if (add_v)
2474 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2475 					     NULL, 0);
2476 	else
2477 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2478 					     NULL, 0);
2479 }
2480 
2481 /**
2482  * ice_vc_add_vlan_msg
2483  * @vf: pointer to the VF info
2484  * @msg: pointer to the msg buffer
2485  *
2486  * Add and program guest VLAN ID
2487  */
ice_vc_add_vlan_msg(struct ice_vf * vf,u8 * msg)2488 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2489 {
2490 	return ice_vc_process_vlan_msg(vf, msg, true);
2491 }
2492 
2493 /**
2494  * ice_vc_remove_vlan_msg
2495  * @vf: pointer to the VF info
2496  * @msg: pointer to the msg buffer
2497  *
2498  * remove programmed guest VLAN ID
2499  */
ice_vc_remove_vlan_msg(struct ice_vf * vf,u8 * msg)2500 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2501 {
2502 	return ice_vc_process_vlan_msg(vf, msg, false);
2503 }
2504 
2505 /**
2506  * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not
2507  * @vsi: pointer to the VF VSI info
2508  */
ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi * vsi)2509 static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi)
2510 {
2511 	unsigned int i;
2512 
2513 	ice_for_each_alloc_rxq(vsi, i)
2514 		if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS)
2515 			return true;
2516 
2517 	return false;
2518 }
2519 
2520 /**
2521  * ice_vc_ena_vlan_stripping
2522  * @vf: pointer to the VF info
2523  *
2524  * Enable VLAN header stripping for a given VF
2525  */
ice_vc_ena_vlan_stripping(struct ice_vf * vf)2526 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2527 {
2528 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2529 	struct ice_vsi *vsi;
2530 
2531 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2532 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2533 		goto error_param;
2534 	}
2535 
2536 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2537 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2538 		goto error_param;
2539 	}
2540 
2541 	vsi = ice_get_vf_vsi(vf);
2542 	if (!vsi) {
2543 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2544 		goto error_param;
2545 	}
2546 
2547 	if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
2548 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2549 	else
2550 		vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
2551 
2552 error_param:
2553 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2554 				     v_ret, NULL, 0);
2555 }
2556 
2557 /**
2558  * ice_vc_dis_vlan_stripping
2559  * @vf: pointer to the VF info
2560  *
2561  * Disable VLAN header stripping for a given VF
2562  */
ice_vc_dis_vlan_stripping(struct ice_vf * vf)2563 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2564 {
2565 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2566 	struct ice_vsi *vsi;
2567 
2568 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2569 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2570 		goto error_param;
2571 	}
2572 
2573 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2574 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2575 		goto error_param;
2576 	}
2577 
2578 	vsi = ice_get_vf_vsi(vf);
2579 	if (!vsi) {
2580 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2581 		goto error_param;
2582 	}
2583 
2584 	if (vsi->inner_vlan_ops.dis_stripping(vsi))
2585 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2586 	else
2587 		vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
2588 
2589 error_param:
2590 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2591 				     v_ret, NULL, 0);
2592 }
2593 
2594 /**
2595  * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware
2596  * @vf: pointer to the VF info
2597  */
ice_vc_get_rss_hena(struct ice_vf * vf)2598 static int ice_vc_get_rss_hena(struct ice_vf *vf)
2599 {
2600 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2601 	struct virtchnl_rss_hena *vrh = NULL;
2602 	int len = 0, ret;
2603 
2604 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2605 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2606 		goto err;
2607 	}
2608 
2609 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2610 		dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
2611 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2612 		goto err;
2613 	}
2614 
2615 	len = sizeof(struct virtchnl_rss_hena);
2616 	vrh = kzalloc(len, GFP_KERNEL);
2617 	if (!vrh) {
2618 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2619 		len = 0;
2620 		goto err;
2621 	}
2622 
2623 	vrh->hena = ICE_DEFAULT_RSS_HENA;
2624 err:
2625 	/* send the response back to the VF */
2626 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret,
2627 				    (u8 *)vrh, len);
2628 	kfree(vrh);
2629 	return ret;
2630 }
2631 
2632 /**
2633  * ice_vc_set_rss_hena - set RSS HENA bits for the VF
2634  * @vf: pointer to the VF info
2635  * @msg: pointer to the msg buffer
2636  */
ice_vc_set_rss_hena(struct ice_vf * vf,u8 * msg)2637 static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
2638 {
2639 	struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
2640 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2641 	struct ice_pf *pf = vf->pf;
2642 	struct ice_vsi *vsi;
2643 	struct device *dev;
2644 	int status;
2645 
2646 	dev = ice_pf_to_dev(pf);
2647 
2648 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2649 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2650 		goto err;
2651 	}
2652 
2653 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2654 		dev_err(dev, "RSS not supported by PF\n");
2655 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2656 		goto err;
2657 	}
2658 
2659 	vsi = ice_get_vf_vsi(vf);
2660 	if (!vsi) {
2661 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2662 		goto err;
2663 	}
2664 
2665 	/* clear all previously programmed RSS configuration to allow VF drivers
2666 	 * the ability to customize the RSS configuration and/or completely
2667 	 * disable RSS
2668 	 */
2669 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
2670 	if (status && !vrh->hena) {
2671 		/* only report failure to clear the current RSS configuration if
2672 		 * that was clearly the VF's intention (i.e. vrh->hena = 0)
2673 		 */
2674 		v_ret = ice_err_to_virt_err(status);
2675 		goto err;
2676 	} else if (status) {
2677 		/* allow the VF to update the RSS configuration even on failure
2678 		 * to clear the current RSS confguration in an attempt to keep
2679 		 * RSS in a working state
2680 		 */
2681 		dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
2682 			 vf->vf_id);
2683 	}
2684 
2685 	if (vrh->hena) {
2686 		status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
2687 		v_ret = ice_err_to_virt_err(status);
2688 	}
2689 
2690 	/* send the response to the VF */
2691 err:
2692 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret,
2693 				     NULL, 0);
2694 }
2695 
2696 /**
2697  * ice_vc_query_rxdid - query RXDID supported by DDP package
2698  * @vf: pointer to VF info
2699  *
2700  * Called from VF to query a bitmap of supported flexible
2701  * descriptor RXDIDs of a DDP package.
2702  */
ice_vc_query_rxdid(struct ice_vf * vf)2703 static int ice_vc_query_rxdid(struct ice_vf *vf)
2704 {
2705 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2706 	struct virtchnl_supported_rxdids *rxdid = NULL;
2707 	struct ice_hw *hw = &vf->pf->hw;
2708 	struct ice_pf *pf = vf->pf;
2709 	int len = 0;
2710 	int ret, i;
2711 	u32 regval;
2712 
2713 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2714 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2715 		goto err;
2716 	}
2717 
2718 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) {
2719 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2720 		goto err;
2721 	}
2722 
2723 	len = sizeof(struct virtchnl_supported_rxdids);
2724 	rxdid = kzalloc(len, GFP_KERNEL);
2725 	if (!rxdid) {
2726 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2727 		len = 0;
2728 		goto err;
2729 	}
2730 
2731 	/* RXDIDs supported by DDP package can be read from the register
2732 	 * to get the supported RXDID bitmap. But the legacy 32byte RXDID
2733 	 * is not listed in DDP package, add it in the bitmap manually.
2734 	 * Legacy 16byte descriptor is not supported.
2735 	 */
2736 	rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
2737 
2738 	for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2739 		regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
2740 		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2741 			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2742 			rxdid->supported_rxdids |= BIT(i);
2743 	}
2744 
2745 	pf->supported_rxdids = rxdid->supported_rxdids;
2746 
2747 err:
2748 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
2749 				    v_ret, (u8 *)rxdid, len);
2750 	kfree(rxdid);
2751 	return ret;
2752 }
2753 
2754 /**
2755  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
2756  * @vf: VF to enable/disable VLAN stripping for on initialization
2757  *
2758  * Set the default for VLAN stripping based on whether a port VLAN is configured
2759  * and the current VLAN mode of the device.
2760  */
ice_vf_init_vlan_stripping(struct ice_vf * vf)2761 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
2762 {
2763 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
2764 
2765 	vf->vlan_strip_ena = 0;
2766 
2767 	if (!vsi)
2768 		return -EINVAL;
2769 
2770 	/* don't modify stripping if port VLAN is configured in SVM since the
2771 	 * port VLAN is based on the inner/single VLAN in SVM
2772 	 */
2773 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
2774 		return 0;
2775 
2776 	if (ice_vf_vlan_offload_ena(vf->driver_caps)) {
2777 		int err;
2778 
2779 		err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
2780 		if (!err)
2781 			vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
2782 		return err;
2783 	}
2784 
2785 	return vsi->inner_vlan_ops.dis_stripping(vsi);
2786 }
2787 
ice_vc_get_max_vlan_fltrs(struct ice_vf * vf)2788 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
2789 {
2790 	if (vf->trusted)
2791 		return VLAN_N_VID;
2792 	else
2793 		return ICE_MAX_VLAN_PER_VF;
2794 }
2795 
2796 /**
2797  * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
2798  * @vf: VF that being checked for
2799  *
2800  * When the device is in double VLAN mode, check whether or not the outer VLAN
2801  * is allowed.
2802  */
ice_vf_outer_vlan_not_allowed(struct ice_vf * vf)2803 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
2804 {
2805 	if (ice_vf_is_port_vlan_ena(vf))
2806 		return true;
2807 
2808 	return false;
2809 }
2810 
2811 /**
2812  * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
2813  * @vf: VF that capabilities are being set for
2814  * @caps: VLAN capabilities to populate
2815  *
2816  * Determine VLAN capabilities support based on whether a port VLAN is
2817  * configured. If a port VLAN is configured then the VF should use the inner
2818  * filtering/offload capabilities since the port VLAN is using the outer VLAN
2819  * capabilies.
2820  */
2821 static void
ice_vc_set_dvm_caps(struct ice_vf * vf,struct virtchnl_vlan_caps * caps)2822 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2823 {
2824 	struct virtchnl_vlan_supported_caps *supported_caps;
2825 
2826 	if (ice_vf_outer_vlan_not_allowed(vf)) {
2827 		/* until support for inner VLAN filtering is added when a port
2828 		 * VLAN is configured, only support software offloaded inner
2829 		 * VLANs when a port VLAN is confgured in DVM
2830 		 */
2831 		supported_caps = &caps->filtering.filtering_support;
2832 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2833 
2834 		supported_caps = &caps->offloads.stripping_support;
2835 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2836 					VIRTCHNL_VLAN_TOGGLE |
2837 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2838 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2839 
2840 		supported_caps = &caps->offloads.insertion_support;
2841 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2842 					VIRTCHNL_VLAN_TOGGLE |
2843 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2844 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2845 
2846 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2847 		caps->offloads.ethertype_match =
2848 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2849 	} else {
2850 		supported_caps = &caps->filtering.filtering_support;
2851 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2852 		supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2853 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2854 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2855 					VIRTCHNL_VLAN_ETHERTYPE_AND;
2856 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2857 						 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2858 						 VIRTCHNL_VLAN_ETHERTYPE_9100;
2859 
2860 		supported_caps = &caps->offloads.stripping_support;
2861 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2862 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2863 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2864 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2865 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2866 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2867 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2868 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2869 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
2870 
2871 		supported_caps = &caps->offloads.insertion_support;
2872 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2873 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2874 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2875 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2876 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2877 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2878 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2879 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2880 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
2881 
2882 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2883 
2884 		caps->offloads.ethertype_match =
2885 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2886 	}
2887 
2888 	caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2889 }
2890 
2891 /**
2892  * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
2893  * @vf: VF that capabilities are being set for
2894  * @caps: VLAN capabilities to populate
2895  *
2896  * Determine VLAN capabilities support based on whether a port VLAN is
2897  * configured. If a port VLAN is configured then the VF does not have any VLAN
2898  * filtering or offload capabilities since the port VLAN is using the inner VLAN
2899  * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
2900  * VLAN fitlering and offload capabilities.
2901  */
2902 static void
ice_vc_set_svm_caps(struct ice_vf * vf,struct virtchnl_vlan_caps * caps)2903 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2904 {
2905 	struct virtchnl_vlan_supported_caps *supported_caps;
2906 
2907 	if (ice_vf_is_port_vlan_ena(vf)) {
2908 		supported_caps = &caps->filtering.filtering_support;
2909 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2910 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2911 
2912 		supported_caps = &caps->offloads.stripping_support;
2913 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2914 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2915 
2916 		supported_caps = &caps->offloads.insertion_support;
2917 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2918 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2919 
2920 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
2921 		caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
2922 		caps->filtering.max_filters = 0;
2923 	} else {
2924 		supported_caps = &caps->filtering.filtering_support;
2925 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
2926 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2927 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2928 
2929 		supported_caps = &caps->offloads.stripping_support;
2930 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2931 					VIRTCHNL_VLAN_TOGGLE |
2932 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2933 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2934 
2935 		supported_caps = &caps->offloads.insertion_support;
2936 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2937 					VIRTCHNL_VLAN_TOGGLE |
2938 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2939 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2940 
2941 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2942 		caps->offloads.ethertype_match =
2943 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2944 		caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2945 	}
2946 }
2947 
2948 /**
2949  * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
2950  * @vf: VF to determine VLAN capabilities for
2951  *
2952  * This will only be called if the VF and PF successfully negotiated
2953  * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2954  *
2955  * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
2956  * is configured or not.
2957  */
ice_vc_get_offload_vlan_v2_caps(struct ice_vf * vf)2958 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
2959 {
2960 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2961 	struct virtchnl_vlan_caps *caps = NULL;
2962 	int err, len = 0;
2963 
2964 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2965 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2966 		goto out;
2967 	}
2968 
2969 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
2970 	if (!caps) {
2971 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2972 		goto out;
2973 	}
2974 	len = sizeof(*caps);
2975 
2976 	if (ice_is_dvm_ena(&vf->pf->hw))
2977 		ice_vc_set_dvm_caps(vf, caps);
2978 	else
2979 		ice_vc_set_svm_caps(vf, caps);
2980 
2981 	/* store negotiated caps to prevent invalid VF messages */
2982 	memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
2983 
2984 out:
2985 	err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
2986 				    v_ret, (u8 *)caps, len);
2987 	kfree(caps);
2988 	return err;
2989 }
2990 
2991 /**
2992  * ice_vc_validate_vlan_tpid - validate VLAN TPID
2993  * @filtering_caps: negotiated/supported VLAN filtering capabilities
2994  * @tpid: VLAN TPID used for validation
2995  *
2996  * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
2997  * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
2998  */
ice_vc_validate_vlan_tpid(u16 filtering_caps,u16 tpid)2999 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
3000 {
3001 	enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
3002 
3003 	switch (tpid) {
3004 	case ETH_P_8021Q:
3005 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
3006 		break;
3007 	case ETH_P_8021AD:
3008 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
3009 		break;
3010 	case ETH_P_QINQ1:
3011 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
3012 		break;
3013 	}
3014 
3015 	if (!(filtering_caps & vlan_ethertype))
3016 		return false;
3017 
3018 	return true;
3019 }
3020 
3021 /**
3022  * ice_vc_is_valid_vlan - validate the virtchnl_vlan
3023  * @vc_vlan: virtchnl_vlan to validate
3024  *
3025  * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
3026  * false. Otherwise return true.
3027  */
ice_vc_is_valid_vlan(struct virtchnl_vlan * vc_vlan)3028 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
3029 {
3030 	if (!vc_vlan->tci || !vc_vlan->tpid)
3031 		return false;
3032 
3033 	return true;
3034 }
3035 
3036 /**
3037  * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
3038  * @vfc: negotiated/supported VLAN filtering capabilities
3039  * @vfl: VLAN filter list from VF to validate
3040  *
3041  * Validate all of the filters in the VLAN filter list from the VF. If any of
3042  * the checks fail then return false. Otherwise return true.
3043  */
3044 static bool
ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps * vfc,struct virtchnl_vlan_filter_list_v2 * vfl)3045 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
3046 				 struct virtchnl_vlan_filter_list_v2 *vfl)
3047 {
3048 	u16 i;
3049 
3050 	if (!vfl->num_elements)
3051 		return false;
3052 
3053 	for (i = 0; i < vfl->num_elements; i++) {
3054 		struct virtchnl_vlan_supported_caps *filtering_support =
3055 			&vfc->filtering_support;
3056 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
3057 		struct virtchnl_vlan *outer = &vlan_fltr->outer;
3058 		struct virtchnl_vlan *inner = &vlan_fltr->inner;
3059 
3060 		if ((ice_vc_is_valid_vlan(outer) &&
3061 		     filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
3062 		    (ice_vc_is_valid_vlan(inner) &&
3063 		     filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
3064 			return false;
3065 
3066 		if ((outer->tci_mask &&
3067 		     !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
3068 		    (inner->tci_mask &&
3069 		     !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
3070 			return false;
3071 
3072 		if (((outer->tci & VLAN_PRIO_MASK) &&
3073 		     !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
3074 		    ((inner->tci & VLAN_PRIO_MASK) &&
3075 		     !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
3076 			return false;
3077 
3078 		if ((ice_vc_is_valid_vlan(outer) &&
3079 		     !ice_vc_validate_vlan_tpid(filtering_support->outer,
3080 						outer->tpid)) ||
3081 		    (ice_vc_is_valid_vlan(inner) &&
3082 		     !ice_vc_validate_vlan_tpid(filtering_support->inner,
3083 						inner->tpid)))
3084 			return false;
3085 	}
3086 
3087 	return true;
3088 }
3089 
3090 /**
3091  * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
3092  * @vc_vlan: struct virtchnl_vlan to transform
3093  */
ice_vc_to_vlan(struct virtchnl_vlan * vc_vlan)3094 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
3095 {
3096 	struct ice_vlan vlan = { 0 };
3097 
3098 	vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci);
3099 	vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
3100 	vlan.tpid = vc_vlan->tpid;
3101 
3102 	return vlan;
3103 }
3104 
3105 /**
3106  * ice_vc_vlan_action - action to perform on the virthcnl_vlan
3107  * @vsi: VF's VSI used to perform the action
3108  * @vlan_action: function to perform the action with (i.e. add/del)
3109  * @vlan: VLAN filter to perform the action with
3110  */
3111 static int
ice_vc_vlan_action(struct ice_vsi * vsi,int (* vlan_action)(struct ice_vsi *,struct ice_vlan *),struct ice_vlan * vlan)3112 ice_vc_vlan_action(struct ice_vsi *vsi,
3113 		   int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
3114 		   struct ice_vlan *vlan)
3115 {
3116 	int err;
3117 
3118 	err = vlan_action(vsi, vlan);
3119 	if (err)
3120 		return err;
3121 
3122 	return 0;
3123 }
3124 
3125 /**
3126  * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
3127  * @vf: VF used to delete the VLAN(s)
3128  * @vsi: VF's VSI used to delete the VLAN(s)
3129  * @vfl: virthchnl filter list used to delete the filters
3130  */
3131 static int
ice_vc_del_vlans(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_vlan_filter_list_v2 * vfl)3132 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
3133 		 struct virtchnl_vlan_filter_list_v2 *vfl)
3134 {
3135 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
3136 	int err;
3137 	u16 i;
3138 
3139 	for (i = 0; i < vfl->num_elements; i++) {
3140 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
3141 		struct virtchnl_vlan *vc_vlan;
3142 
3143 		vc_vlan = &vlan_fltr->outer;
3144 		if (ice_vc_is_valid_vlan(vc_vlan)) {
3145 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
3146 
3147 			err = ice_vc_vlan_action(vsi,
3148 						 vsi->outer_vlan_ops.del_vlan,
3149 						 &vlan);
3150 			if (err)
3151 				return err;
3152 
3153 			if (vlan_promisc)
3154 				ice_vf_dis_vlan_promisc(vsi, &vlan);
3155 
3156 			/* Disable VLAN filtering when only VLAN 0 is left */
3157 			if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
3158 				err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
3159 				if (err)
3160 					return err;
3161 			}
3162 		}
3163 
3164 		vc_vlan = &vlan_fltr->inner;
3165 		if (ice_vc_is_valid_vlan(vc_vlan)) {
3166 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
3167 
3168 			err = ice_vc_vlan_action(vsi,
3169 						 vsi->inner_vlan_ops.del_vlan,
3170 						 &vlan);
3171 			if (err)
3172 				return err;
3173 
3174 			/* no support for VLAN promiscuous on inner VLAN unless
3175 			 * we are in Single VLAN Mode (SVM)
3176 			 */
3177 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
3178 				if (vlan_promisc)
3179 					ice_vf_dis_vlan_promisc(vsi, &vlan);
3180 
3181 				/* Disable VLAN filtering when only VLAN 0 is left */
3182 				if (!ice_vsi_has_non_zero_vlans(vsi)) {
3183 					err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
3184 					if (err)
3185 						return err;
3186 				}
3187 			}
3188 		}
3189 	}
3190 
3191 	return 0;
3192 }
3193 
3194 /**
3195  * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
3196  * @vf: VF the message was received from
3197  * @msg: message received from the VF
3198  */
ice_vc_remove_vlan_v2_msg(struct ice_vf * vf,u8 * msg)3199 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
3200 {
3201 	struct virtchnl_vlan_filter_list_v2 *vfl =
3202 		(struct virtchnl_vlan_filter_list_v2 *)msg;
3203 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3204 	struct ice_vsi *vsi;
3205 
3206 	if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
3207 					      vfl)) {
3208 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3209 		goto out;
3210 	}
3211 
3212 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
3213 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3214 		goto out;
3215 	}
3216 
3217 	vsi = ice_get_vf_vsi(vf);
3218 	if (!vsi) {
3219 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3220 		goto out;
3221 	}
3222 
3223 	if (ice_vc_del_vlans(vf, vsi, vfl))
3224 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3225 
3226 out:
3227 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
3228 				     0);
3229 }
3230 
3231 /**
3232  * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
3233  * @vf: VF used to add the VLAN(s)
3234  * @vsi: VF's VSI used to add the VLAN(s)
3235  * @vfl: virthchnl filter list used to add the filters
3236  */
3237 static int
ice_vc_add_vlans(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_vlan_filter_list_v2 * vfl)3238 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
3239 		 struct virtchnl_vlan_filter_list_v2 *vfl)
3240 {
3241 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
3242 	int err;
3243 	u16 i;
3244 
3245 	for (i = 0; i < vfl->num_elements; i++) {
3246 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
3247 		struct virtchnl_vlan *vc_vlan;
3248 
3249 		vc_vlan = &vlan_fltr->outer;
3250 		if (ice_vc_is_valid_vlan(vc_vlan)) {
3251 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
3252 
3253 			err = ice_vc_vlan_action(vsi,
3254 						 vsi->outer_vlan_ops.add_vlan,
3255 						 &vlan);
3256 			if (err)
3257 				return err;
3258 
3259 			if (vlan_promisc) {
3260 				err = ice_vf_ena_vlan_promisc(vsi, &vlan);
3261 				if (err)
3262 					return err;
3263 			}
3264 
3265 			/* Enable VLAN filtering on first non-zero VLAN */
3266 			if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
3267 				err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
3268 				if (err)
3269 					return err;
3270 			}
3271 		}
3272 
3273 		vc_vlan = &vlan_fltr->inner;
3274 		if (ice_vc_is_valid_vlan(vc_vlan)) {
3275 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
3276 
3277 			err = ice_vc_vlan_action(vsi,
3278 						 vsi->inner_vlan_ops.add_vlan,
3279 						 &vlan);
3280 			if (err)
3281 				return err;
3282 
3283 			/* no support for VLAN promiscuous on inner VLAN unless
3284 			 * we are in Single VLAN Mode (SVM)
3285 			 */
3286 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
3287 				if (vlan_promisc) {
3288 					err = ice_vf_ena_vlan_promisc(vsi, &vlan);
3289 					if (err)
3290 						return err;
3291 				}
3292 
3293 				/* Enable VLAN filtering on first non-zero VLAN */
3294 				if (vf->spoofchk && vlan.vid) {
3295 					err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
3296 					if (err)
3297 						return err;
3298 				}
3299 			}
3300 		}
3301 	}
3302 
3303 	return 0;
3304 }
3305 
3306 /**
3307  * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
3308  * @vsi: VF VSI used to get number of existing VLAN filters
3309  * @vfc: negotiated/supported VLAN filtering capabilities
3310  * @vfl: VLAN filter list from VF to validate
3311  *
3312  * Validate all of the filters in the VLAN filter list from the VF during the
3313  * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
3314  * Otherwise return true.
3315  */
3316 static bool
ice_vc_validate_add_vlan_filter_list(struct ice_vsi * vsi,struct virtchnl_vlan_filtering_caps * vfc,struct virtchnl_vlan_filter_list_v2 * vfl)3317 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
3318 				     struct virtchnl_vlan_filtering_caps *vfc,
3319 				     struct virtchnl_vlan_filter_list_v2 *vfl)
3320 {
3321 	u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
3322 		vfl->num_elements;
3323 
3324 	if (num_requested_filters > vfc->max_filters)
3325 		return false;
3326 
3327 	return ice_vc_validate_vlan_filter_list(vfc, vfl);
3328 }
3329 
3330 /**
3331  * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
3332  * @vf: VF the message was received from
3333  * @msg: message received from the VF
3334  */
ice_vc_add_vlan_v2_msg(struct ice_vf * vf,u8 * msg)3335 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
3336 {
3337 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3338 	struct virtchnl_vlan_filter_list_v2 *vfl =
3339 		(struct virtchnl_vlan_filter_list_v2 *)msg;
3340 	struct ice_vsi *vsi;
3341 
3342 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3343 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3344 		goto out;
3345 	}
3346 
3347 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
3348 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3349 		goto out;
3350 	}
3351 
3352 	vsi = ice_get_vf_vsi(vf);
3353 	if (!vsi) {
3354 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3355 		goto out;
3356 	}
3357 
3358 	if (!ice_vc_validate_add_vlan_filter_list(vsi,
3359 						  &vf->vlan_v2_caps.filtering,
3360 						  vfl)) {
3361 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3362 		goto out;
3363 	}
3364 
3365 	if (ice_vc_add_vlans(vf, vsi, vfl))
3366 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3367 
3368 out:
3369 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
3370 				     0);
3371 }
3372 
3373 /**
3374  * ice_vc_valid_vlan_setting - validate VLAN setting
3375  * @negotiated_settings: negotiated VLAN settings during VF init
3376  * @ethertype_setting: ethertype(s) requested for the VLAN setting
3377  */
3378 static bool
ice_vc_valid_vlan_setting(u32 negotiated_settings,u32 ethertype_setting)3379 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
3380 {
3381 	if (ethertype_setting && !(negotiated_settings & ethertype_setting))
3382 		return false;
3383 
3384 	/* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
3385 	 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
3386 	 */
3387 	if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
3388 	    hweight32(ethertype_setting) > 1)
3389 		return false;
3390 
3391 	/* ability to modify the VLAN setting was not negotiated */
3392 	if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
3393 		return false;
3394 
3395 	return true;
3396 }
3397 
3398 /**
3399  * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
3400  * @caps: negotiated VLAN settings during VF init
3401  * @msg: message to validate
3402  *
3403  * Used to validate any VLAN virtchnl message sent as a
3404  * virtchnl_vlan_setting structure. Validates the message against the
3405  * negotiated/supported caps during VF driver init.
3406  */
3407 static bool
ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps * caps,struct virtchnl_vlan_setting * msg)3408 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
3409 			      struct virtchnl_vlan_setting *msg)
3410 {
3411 	if ((!msg->outer_ethertype_setting &&
3412 	     !msg->inner_ethertype_setting) ||
3413 	    (!caps->outer && !caps->inner))
3414 		return false;
3415 
3416 	if (msg->outer_ethertype_setting &&
3417 	    !ice_vc_valid_vlan_setting(caps->outer,
3418 				       msg->outer_ethertype_setting))
3419 		return false;
3420 
3421 	if (msg->inner_ethertype_setting &&
3422 	    !ice_vc_valid_vlan_setting(caps->inner,
3423 				       msg->inner_ethertype_setting))
3424 		return false;
3425 
3426 	return true;
3427 }
3428 
3429 /**
3430  * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
3431  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
3432  * @tpid: VLAN TPID to populate
3433  */
ice_vc_get_tpid(u32 ethertype_setting,u16 * tpid)3434 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
3435 {
3436 	switch (ethertype_setting) {
3437 	case VIRTCHNL_VLAN_ETHERTYPE_8100:
3438 		*tpid = ETH_P_8021Q;
3439 		break;
3440 	case VIRTCHNL_VLAN_ETHERTYPE_88A8:
3441 		*tpid = ETH_P_8021AD;
3442 		break;
3443 	case VIRTCHNL_VLAN_ETHERTYPE_9100:
3444 		*tpid = ETH_P_QINQ1;
3445 		break;
3446 	default:
3447 		*tpid = 0;
3448 		return -EINVAL;
3449 	}
3450 
3451 	return 0;
3452 }
3453 
3454 /**
3455  * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
3456  * @vsi: VF's VSI used to enable the VLAN offload
3457  * @ena_offload: function used to enable the VLAN offload
3458  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
3459  */
3460 static int
ice_vc_ena_vlan_offload(struct ice_vsi * vsi,int (* ena_offload)(struct ice_vsi * vsi,u16 tpid),u32 ethertype_setting)3461 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
3462 			int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
3463 			u32 ethertype_setting)
3464 {
3465 	u16 tpid;
3466 	int err;
3467 
3468 	err = ice_vc_get_tpid(ethertype_setting, &tpid);
3469 	if (err)
3470 		return err;
3471 
3472 	err = ena_offload(vsi, tpid);
3473 	if (err)
3474 		return err;
3475 
3476 	return 0;
3477 }
3478 
3479 #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX	3
3480 #define ICE_L2TSEL_BIT_OFFSET		23
3481 enum ice_l2tsel {
3482 	ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
3483 	ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
3484 };
3485 
3486 /**
3487  * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
3488  * @vsi: VSI used to update l2tsel on
3489  * @l2tsel: l2tsel setting requested
3490  *
3491  * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
3492  * This will modify which descriptor field the first offloaded VLAN will be
3493  * stripped into.
3494  */
ice_vsi_update_l2tsel(struct ice_vsi * vsi,enum ice_l2tsel l2tsel)3495 static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
3496 {
3497 	struct ice_hw *hw = &vsi->back->hw;
3498 	u32 l2tsel_bit;
3499 	int i;
3500 
3501 	if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
3502 		l2tsel_bit = 0;
3503 	else
3504 		l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
3505 
3506 	for (i = 0; i < vsi->alloc_rxq; i++) {
3507 		u16 pfq = vsi->rxq_map[i];
3508 		u32 qrx_context_offset;
3509 		u32 regval;
3510 
3511 		qrx_context_offset =
3512 			QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
3513 
3514 		regval = rd32(hw, qrx_context_offset);
3515 		regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
3516 		regval |= l2tsel_bit;
3517 		wr32(hw, qrx_context_offset, regval);
3518 	}
3519 }
3520 
3521 /**
3522  * ice_vc_ena_vlan_stripping_v2_msg
3523  * @vf: VF the message was received from
3524  * @msg: message received from the VF
3525  *
3526  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
3527  */
ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf * vf,u8 * msg)3528 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3529 {
3530 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3531 	struct virtchnl_vlan_supported_caps *stripping_support;
3532 	struct virtchnl_vlan_setting *strip_msg =
3533 		(struct virtchnl_vlan_setting *)msg;
3534 	u32 ethertype_setting;
3535 	struct ice_vsi *vsi;
3536 
3537 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3538 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3539 		goto out;
3540 	}
3541 
3542 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3543 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3544 		goto out;
3545 	}
3546 
3547 	vsi = ice_get_vf_vsi(vf);
3548 	if (!vsi) {
3549 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3550 		goto out;
3551 	}
3552 
3553 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3554 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3555 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3556 		goto out;
3557 	}
3558 
3559 	if (ice_vsi_is_rxq_crc_strip_dis(vsi)) {
3560 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
3561 		goto out;
3562 	}
3563 
3564 	ethertype_setting = strip_msg->outer_ethertype_setting;
3565 	if (ethertype_setting) {
3566 		if (ice_vc_ena_vlan_offload(vsi,
3567 					    vsi->outer_vlan_ops.ena_stripping,
3568 					    ethertype_setting)) {
3569 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3570 			goto out;
3571 		} else {
3572 			enum ice_l2tsel l2tsel =
3573 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
3574 
3575 			/* PF tells the VF that the outer VLAN tag is always
3576 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3577 			 * inner is always extracted to
3578 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3579 			 * support outer stripping so the first tag always ends
3580 			 * up in L2TAG2_2ND and the second/inner tag, if
3581 			 * enabled, is extracted in L2TAG1.
3582 			 */
3583 			ice_vsi_update_l2tsel(vsi, l2tsel);
3584 
3585 			vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA;
3586 		}
3587 	}
3588 
3589 	ethertype_setting = strip_msg->inner_ethertype_setting;
3590 	if (ethertype_setting &&
3591 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
3592 				    ethertype_setting)) {
3593 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3594 		goto out;
3595 	}
3596 
3597 	if (ethertype_setting)
3598 		vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
3599 
3600 out:
3601 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
3602 				     v_ret, NULL, 0);
3603 }
3604 
3605 /**
3606  * ice_vc_dis_vlan_stripping_v2_msg
3607  * @vf: VF the message was received from
3608  * @msg: message received from the VF
3609  *
3610  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
3611  */
ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf * vf,u8 * msg)3612 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3613 {
3614 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3615 	struct virtchnl_vlan_supported_caps *stripping_support;
3616 	struct virtchnl_vlan_setting *strip_msg =
3617 		(struct virtchnl_vlan_setting *)msg;
3618 	u32 ethertype_setting;
3619 	struct ice_vsi *vsi;
3620 
3621 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3622 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3623 		goto out;
3624 	}
3625 
3626 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3627 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3628 		goto out;
3629 	}
3630 
3631 	vsi = ice_get_vf_vsi(vf);
3632 	if (!vsi) {
3633 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3634 		goto out;
3635 	}
3636 
3637 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3638 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3639 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3640 		goto out;
3641 	}
3642 
3643 	ethertype_setting = strip_msg->outer_ethertype_setting;
3644 	if (ethertype_setting) {
3645 		if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
3646 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3647 			goto out;
3648 		} else {
3649 			enum ice_l2tsel l2tsel =
3650 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
3651 
3652 			/* PF tells the VF that the outer VLAN tag is always
3653 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3654 			 * inner is always extracted to
3655 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3656 			 * support inner stripping while outer stripping is
3657 			 * disabled so that the first and only tag is extracted
3658 			 * in L2TAG1.
3659 			 */
3660 			ice_vsi_update_l2tsel(vsi, l2tsel);
3661 
3662 			vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA;
3663 		}
3664 	}
3665 
3666 	ethertype_setting = strip_msg->inner_ethertype_setting;
3667 	if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
3668 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3669 		goto out;
3670 	}
3671 
3672 	if (ethertype_setting)
3673 		vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
3674 
3675 out:
3676 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
3677 				     v_ret, NULL, 0);
3678 }
3679 
3680 /**
3681  * ice_vc_ena_vlan_insertion_v2_msg
3682  * @vf: VF the message was received from
3683  * @msg: message received from the VF
3684  *
3685  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
3686  */
ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf * vf,u8 * msg)3687 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3688 {
3689 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3690 	struct virtchnl_vlan_supported_caps *insertion_support;
3691 	struct virtchnl_vlan_setting *insertion_msg =
3692 		(struct virtchnl_vlan_setting *)msg;
3693 	u32 ethertype_setting;
3694 	struct ice_vsi *vsi;
3695 
3696 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3697 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3698 		goto out;
3699 	}
3700 
3701 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3702 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3703 		goto out;
3704 	}
3705 
3706 	vsi = ice_get_vf_vsi(vf);
3707 	if (!vsi) {
3708 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3709 		goto out;
3710 	}
3711 
3712 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3713 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3714 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3715 		goto out;
3716 	}
3717 
3718 	ethertype_setting = insertion_msg->outer_ethertype_setting;
3719 	if (ethertype_setting &&
3720 	    ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
3721 				    ethertype_setting)) {
3722 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3723 		goto out;
3724 	}
3725 
3726 	ethertype_setting = insertion_msg->inner_ethertype_setting;
3727 	if (ethertype_setting &&
3728 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
3729 				    ethertype_setting)) {
3730 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3731 		goto out;
3732 	}
3733 
3734 out:
3735 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
3736 				     v_ret, NULL, 0);
3737 }
3738 
3739 /**
3740  * ice_vc_dis_vlan_insertion_v2_msg
3741  * @vf: VF the message was received from
3742  * @msg: message received from the VF
3743  *
3744  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
3745  */
ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf * vf,u8 * msg)3746 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3747 {
3748 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3749 	struct virtchnl_vlan_supported_caps *insertion_support;
3750 	struct virtchnl_vlan_setting *insertion_msg =
3751 		(struct virtchnl_vlan_setting *)msg;
3752 	u32 ethertype_setting;
3753 	struct ice_vsi *vsi;
3754 
3755 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3756 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3757 		goto out;
3758 	}
3759 
3760 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3761 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3762 		goto out;
3763 	}
3764 
3765 	vsi = ice_get_vf_vsi(vf);
3766 	if (!vsi) {
3767 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3768 		goto out;
3769 	}
3770 
3771 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3772 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3773 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3774 		goto out;
3775 	}
3776 
3777 	ethertype_setting = insertion_msg->outer_ethertype_setting;
3778 	if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
3779 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3780 		goto out;
3781 	}
3782 
3783 	ethertype_setting = insertion_msg->inner_ethertype_setting;
3784 	if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
3785 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3786 		goto out;
3787 	}
3788 
3789 out:
3790 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
3791 				     v_ret, NULL, 0);
3792 }
3793 
3794 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
3795 	.get_ver_msg = ice_vc_get_ver_msg,
3796 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3797 	.reset_vf = ice_vc_reset_vf_msg,
3798 	.add_mac_addr_msg = ice_vc_add_mac_addr_msg,
3799 	.del_mac_addr_msg = ice_vc_del_mac_addr_msg,
3800 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3801 	.ena_qs_msg = ice_vc_ena_qs_msg,
3802 	.dis_qs_msg = ice_vc_dis_qs_msg,
3803 	.request_qs_msg = ice_vc_request_qs_msg,
3804 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3805 	.config_rss_key = ice_vc_config_rss_key,
3806 	.config_rss_lut = ice_vc_config_rss_lut,
3807 	.config_rss_hfunc = ice_vc_config_rss_hfunc,
3808 	.get_stats_msg = ice_vc_get_stats_msg,
3809 	.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
3810 	.add_vlan_msg = ice_vc_add_vlan_msg,
3811 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3812 	.query_rxdid = ice_vc_query_rxdid,
3813 	.get_rss_hena = ice_vc_get_rss_hena,
3814 	.set_rss_hena_msg = ice_vc_set_rss_hena,
3815 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3816 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3817 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3818 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3819 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3820 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3821 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3822 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3823 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3824 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3825 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3826 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3827 };
3828 
3829 /**
3830  * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
3831  * @vf: the VF to switch ops
3832  */
ice_virtchnl_set_dflt_ops(struct ice_vf * vf)3833 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
3834 {
3835 	vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
3836 }
3837 
3838 /**
3839  * ice_vc_repr_add_mac
3840  * @vf: pointer to VF
3841  * @msg: virtchannel message
3842  *
3843  * When port representors are created, we do not add MAC rule
3844  * to firmware, we store it so that PF could report same
3845  * MAC as VF.
3846  */
ice_vc_repr_add_mac(struct ice_vf * vf,u8 * msg)3847 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
3848 {
3849 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3850 	struct virtchnl_ether_addr_list *al =
3851 	    (struct virtchnl_ether_addr_list *)msg;
3852 	struct ice_vsi *vsi;
3853 	struct ice_pf *pf;
3854 	int i;
3855 
3856 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3857 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3858 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3859 		goto handle_mac_exit;
3860 	}
3861 
3862 	pf = vf->pf;
3863 
3864 	vsi = ice_get_vf_vsi(vf);
3865 	if (!vsi) {
3866 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3867 		goto handle_mac_exit;
3868 	}
3869 
3870 	for (i = 0; i < al->num_elements; i++) {
3871 		u8 *mac_addr = al->list[i].addr;
3872 
3873 		if (!is_unicast_ether_addr(mac_addr) ||
3874 		    ether_addr_equal(mac_addr, vf->hw_lan_addr))
3875 			continue;
3876 
3877 		if (vf->pf_set_mac) {
3878 			dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
3879 			v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
3880 			goto handle_mac_exit;
3881 		}
3882 
3883 		ice_vfhw_mac_add(vf, &al->list[i]);
3884 		vf->num_mac++;
3885 		break;
3886 	}
3887 
3888 handle_mac_exit:
3889 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3890 				     v_ret, NULL, 0);
3891 }
3892 
3893 /**
3894  * ice_vc_repr_del_mac - response with success for deleting MAC
3895  * @vf: pointer to VF
3896  * @msg: virtchannel message
3897  *
3898  * Respond with success to not break normal VF flow.
3899  * For legacy VF driver try to update cached MAC address.
3900  */
3901 static int
ice_vc_repr_del_mac(struct ice_vf __always_unused * vf,u8 __always_unused * msg)3902 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
3903 {
3904 	struct virtchnl_ether_addr_list *al =
3905 		(struct virtchnl_ether_addr_list *)msg;
3906 
3907 	ice_update_legacy_cached_mac(vf, &al->list[0]);
3908 
3909 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
3910 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3911 }
3912 
3913 static int
ice_vc_repr_cfg_promiscuous_mode(struct ice_vf * vf,u8 __always_unused * msg)3914 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
3915 {
3916 	dev_dbg(ice_pf_to_dev(vf->pf),
3917 		"Can't config promiscuous mode in switchdev mode for VF %d\n",
3918 		vf->vf_id);
3919 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3920 				     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3921 				     NULL, 0);
3922 }
3923 
3924 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
3925 	.get_ver_msg = ice_vc_get_ver_msg,
3926 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3927 	.reset_vf = ice_vc_reset_vf_msg,
3928 	.add_mac_addr_msg = ice_vc_repr_add_mac,
3929 	.del_mac_addr_msg = ice_vc_repr_del_mac,
3930 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3931 	.ena_qs_msg = ice_vc_ena_qs_msg,
3932 	.dis_qs_msg = ice_vc_dis_qs_msg,
3933 	.request_qs_msg = ice_vc_request_qs_msg,
3934 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3935 	.config_rss_key = ice_vc_config_rss_key,
3936 	.config_rss_lut = ice_vc_config_rss_lut,
3937 	.config_rss_hfunc = ice_vc_config_rss_hfunc,
3938 	.get_stats_msg = ice_vc_get_stats_msg,
3939 	.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
3940 	.add_vlan_msg = ice_vc_add_vlan_msg,
3941 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3942 	.query_rxdid = ice_vc_query_rxdid,
3943 	.get_rss_hena = ice_vc_get_rss_hena,
3944 	.set_rss_hena_msg = ice_vc_set_rss_hena,
3945 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3946 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3947 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3948 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3949 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3950 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3951 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3952 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3953 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3954 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3955 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3956 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3957 };
3958 
3959 /**
3960  * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
3961  * @vf: the VF to switch ops
3962  */
ice_virtchnl_set_repr_ops(struct ice_vf * vf)3963 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
3964 {
3965 	vf->virtchnl_ops = &ice_virtchnl_repr_ops;
3966 }
3967 
3968 /**
3969  * ice_is_malicious_vf - check if this vf might be overflowing mailbox
3970  * @vf: the VF to check
3971  * @mbxdata: data about the state of the mailbox
3972  *
3973  * Detect if a given VF might be malicious and attempting to overflow the PF
3974  * mailbox. If so, log a warning message and ignore this event.
3975  */
3976 static bool
ice_is_malicious_vf(struct ice_vf * vf,struct ice_mbx_data * mbxdata)3977 ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
3978 {
3979 	bool report_malvf = false;
3980 	struct device *dev;
3981 	struct ice_pf *pf;
3982 	int status;
3983 
3984 	pf = vf->pf;
3985 	dev = ice_pf_to_dev(pf);
3986 
3987 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
3988 		return vf->mbx_info.malicious;
3989 
3990 	/* check to see if we have a newly malicious VF */
3991 	status = ice_mbx_vf_state_handler(&pf->hw, mbxdata, &vf->mbx_info,
3992 					  &report_malvf);
3993 	if (status)
3994 		dev_warn_ratelimited(dev, "Unable to check status of mailbox overflow for VF %u MAC %pM, status %d\n",
3995 				     vf->vf_id, vf->dev_lan_addr, status);
3996 
3997 	if (report_malvf) {
3998 		struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3999 		u8 zero_addr[ETH_ALEN] = {};
4000 
4001 		dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
4002 			 vf->dev_lan_addr,
4003 			 pf_vsi ? pf_vsi->netdev->dev_addr : zero_addr);
4004 	}
4005 
4006 	return vf->mbx_info.malicious;
4007 }
4008 
4009 /**
4010  * ice_vc_process_vf_msg - Process request from VF
4011  * @pf: pointer to the PF structure
4012  * @event: pointer to the AQ event
4013  * @mbxdata: information used to detect VF attempting mailbox overflow
4014  *
4015  * called from the common asq/arq handler to
4016  * process request from VF
4017  */
ice_vc_process_vf_msg(struct ice_pf * pf,struct ice_rq_event_info * event,struct ice_mbx_data * mbxdata)4018 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
4019 			   struct ice_mbx_data *mbxdata)
4020 {
4021 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
4022 	s16 vf_id = le16_to_cpu(event->desc.retval);
4023 	const struct ice_virtchnl_ops *ops;
4024 	u16 msglen = event->msg_len;
4025 	u8 *msg = event->msg_buf;
4026 	struct ice_vf *vf = NULL;
4027 	struct device *dev;
4028 	int err = 0;
4029 
4030 	dev = ice_pf_to_dev(pf);
4031 
4032 	vf = ice_get_vf_by_id(pf, vf_id);
4033 	if (!vf) {
4034 		dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
4035 			vf_id, v_opcode, msglen);
4036 		return;
4037 	}
4038 
4039 	mutex_lock(&vf->cfg_lock);
4040 
4041 	/* Check if the VF is trying to overflow the mailbox */
4042 	if (ice_is_malicious_vf(vf, mbxdata))
4043 		goto finish;
4044 
4045 	/* Check if VF is disabled. */
4046 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
4047 		err = -EPERM;
4048 		goto error_handler;
4049 	}
4050 
4051 	ops = vf->virtchnl_ops;
4052 
4053 	/* Perform basic checks on the msg */
4054 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4055 	if (err) {
4056 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
4057 			err = -EPERM;
4058 		else
4059 			err = -EINVAL;
4060 	}
4061 
4062 error_handler:
4063 	if (err) {
4064 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
4065 				      NULL, 0);
4066 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
4067 			vf_id, v_opcode, msglen, err);
4068 		goto finish;
4069 	}
4070 
4071 	if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
4072 		ice_vc_send_msg_to_vf(vf, v_opcode,
4073 				      VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
4074 				      0);
4075 		goto finish;
4076 	}
4077 
4078 	switch (v_opcode) {
4079 	case VIRTCHNL_OP_VERSION:
4080 		err = ops->get_ver_msg(vf, msg);
4081 		break;
4082 	case VIRTCHNL_OP_GET_VF_RESOURCES:
4083 		err = ops->get_vf_res_msg(vf, msg);
4084 		if (ice_vf_init_vlan_stripping(vf))
4085 			dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
4086 				vf->vf_id);
4087 		ice_vc_notify_vf_link_state(vf);
4088 		break;
4089 	case VIRTCHNL_OP_RESET_VF:
4090 		ops->reset_vf(vf);
4091 		break;
4092 	case VIRTCHNL_OP_ADD_ETH_ADDR:
4093 		err = ops->add_mac_addr_msg(vf, msg);
4094 		break;
4095 	case VIRTCHNL_OP_DEL_ETH_ADDR:
4096 		err = ops->del_mac_addr_msg(vf, msg);
4097 		break;
4098 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4099 		err = ops->cfg_qs_msg(vf, msg);
4100 		break;
4101 	case VIRTCHNL_OP_ENABLE_QUEUES:
4102 		err = ops->ena_qs_msg(vf, msg);
4103 		ice_vc_notify_vf_link_state(vf);
4104 		break;
4105 	case VIRTCHNL_OP_DISABLE_QUEUES:
4106 		err = ops->dis_qs_msg(vf, msg);
4107 		break;
4108 	case VIRTCHNL_OP_REQUEST_QUEUES:
4109 		err = ops->request_qs_msg(vf, msg);
4110 		break;
4111 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4112 		err = ops->cfg_irq_map_msg(vf, msg);
4113 		break;
4114 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
4115 		err = ops->config_rss_key(vf, msg);
4116 		break;
4117 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
4118 		err = ops->config_rss_lut(vf, msg);
4119 		break;
4120 	case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
4121 		err = ops->config_rss_hfunc(vf, msg);
4122 		break;
4123 	case VIRTCHNL_OP_GET_STATS:
4124 		err = ops->get_stats_msg(vf, msg);
4125 		break;
4126 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4127 		err = ops->cfg_promiscuous_mode_msg(vf, msg);
4128 		break;
4129 	case VIRTCHNL_OP_ADD_VLAN:
4130 		err = ops->add_vlan_msg(vf, msg);
4131 		break;
4132 	case VIRTCHNL_OP_DEL_VLAN:
4133 		err = ops->remove_vlan_msg(vf, msg);
4134 		break;
4135 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
4136 		err = ops->query_rxdid(vf);
4137 		break;
4138 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4139 		err = ops->get_rss_hena(vf);
4140 		break;
4141 	case VIRTCHNL_OP_SET_RSS_HENA:
4142 		err = ops->set_rss_hena_msg(vf, msg);
4143 		break;
4144 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4145 		err = ops->ena_vlan_stripping(vf);
4146 		break;
4147 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4148 		err = ops->dis_vlan_stripping(vf);
4149 		break;
4150 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
4151 		err = ops->add_fdir_fltr_msg(vf, msg);
4152 		break;
4153 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
4154 		err = ops->del_fdir_fltr_msg(vf, msg);
4155 		break;
4156 	case VIRTCHNL_OP_ADD_RSS_CFG:
4157 		err = ops->handle_rss_cfg_msg(vf, msg, true);
4158 		break;
4159 	case VIRTCHNL_OP_DEL_RSS_CFG:
4160 		err = ops->handle_rss_cfg_msg(vf, msg, false);
4161 		break;
4162 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
4163 		err = ops->get_offload_vlan_v2_caps(vf);
4164 		break;
4165 	case VIRTCHNL_OP_ADD_VLAN_V2:
4166 		err = ops->add_vlan_v2_msg(vf, msg);
4167 		break;
4168 	case VIRTCHNL_OP_DEL_VLAN_V2:
4169 		err = ops->remove_vlan_v2_msg(vf, msg);
4170 		break;
4171 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
4172 		err = ops->ena_vlan_stripping_v2_msg(vf, msg);
4173 		break;
4174 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
4175 		err = ops->dis_vlan_stripping_v2_msg(vf, msg);
4176 		break;
4177 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
4178 		err = ops->ena_vlan_insertion_v2_msg(vf, msg);
4179 		break;
4180 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
4181 		err = ops->dis_vlan_insertion_v2_msg(vf, msg);
4182 		break;
4183 	case VIRTCHNL_OP_UNKNOWN:
4184 	default:
4185 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
4186 			vf_id);
4187 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
4188 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
4189 					    NULL, 0);
4190 		break;
4191 	}
4192 	if (err) {
4193 		/* Helper function cares less about error return values here
4194 		 * as it is busy with pending work.
4195 		 */
4196 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
4197 			 vf_id, v_opcode, err);
4198 	}
4199 
4200 finish:
4201 	mutex_unlock(&vf->cfg_lock);
4202 	ice_put_vf(vf);
4203 }
4204