1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3
4 #include <linux/mlx5/driver.h>
5 #include "mlx5_ifc_vhca_event.h"
6 #include "mlx5_core.h"
7 #include "vhca_event.h"
8 #include "ecpf.h"
9 #define CREATE_TRACE_POINTS
10 #include "diag/vhca_tracepoint.h"
11
12 struct mlx5_vhca_event_work {
13 struct work_struct work;
14 struct mlx5_core_dev *dev;
15 struct mlx5_vhca_state_event event;
16 };
17
18 struct mlx5_vhca_event_handler {
19 struct workqueue_struct *wq;
20 };
21
22 struct mlx5_vhca_events {
23 struct mlx5_core_dev *dev;
24 struct mlx5_vhca_event_handler handler[MLX5_DEV_MAX_WQS];
25 };
26
mlx5_cmd_query_vhca_state(struct mlx5_core_dev * dev,u16 function_id,u32 * out,u32 outlen)27 int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
28 {
29 u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
30
31 MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
32 MLX5_SET(query_vhca_state_in, in, function_id, function_id);
33 MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0);
34
35 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
36 }
37
mlx5_cmd_modify_vhca_state(struct mlx5_core_dev * dev,u16 function_id,u32 * in,u32 inlen)38 static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
39 u32 *in, u32 inlen)
40 {
41 u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
42
43 MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
44 MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
45 MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
46
47 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
48 }
49
mlx5_modify_vhca_sw_id(struct mlx5_core_dev * dev,u16 function_id,u32 sw_fn_id)50 int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id)
51 {
52 u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
53 u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
54
55 MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
56 MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
57 MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
58 MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
59 MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
60
61 return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
62 }
63
mlx5_vhca_event_arm(struct mlx5_core_dev * dev,u16 function_id)64 int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id)
65 {
66 u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
67
68 MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
69 MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
70
71 return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in));
72 }
73
74 static void
mlx5_vhca_event_notify(struct mlx5_core_dev * dev,struct mlx5_vhca_state_event * event)75 mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *event)
76 {
77 u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
78 int err;
79
80 err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out));
81 if (err)
82 return;
83
84 event->sw_function_id = MLX5_GET(query_vhca_state_out, out,
85 vhca_state_context.sw_function_id);
86 event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
87 vhca_state_context.vhca_state);
88
89 mlx5_vhca_event_arm(dev, event->function_id);
90 trace_mlx5_sf_vhca_event(dev, event);
91
92 blocking_notifier_call_chain(&dev->priv.vhca_state_n_head, 0, event);
93 }
94
mlx5_vhca_state_work_handler(struct work_struct * _work)95 static void mlx5_vhca_state_work_handler(struct work_struct *_work)
96 {
97 struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
98
99 mlx5_vhca_event_notify(work->dev, &work->event);
100 kfree(work);
101 }
102
mlx5_vhca_events_work_enqueue(struct mlx5_core_dev * dev,int idx,struct work_struct * work)103 void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work)
104 {
105 queue_work(dev->priv.vhca_events->handler[idx].wq, work);
106 }
107
108 static int
mlx5_vhca_state_change_notifier(struct notifier_block * nb,unsigned long type,void * data)109 mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
110 {
111 struct mlx5_core_dev *dev = mlx5_nb_cof(nb, struct mlx5_core_dev,
112 priv.vhca_state_nb);
113 struct mlx5_vhca_event_work *work;
114 struct mlx5_eqe *eqe = data;
115 int wq_idx;
116
117 work = kzalloc_obj(*work, GFP_ATOMIC);
118 if (!work)
119 return NOTIFY_DONE;
120 INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
121 work->dev = dev;
122 work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
123 wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
124 mlx5_vhca_events_work_enqueue(dev, wq_idx, &work->work);
125 return NOTIFY_OK;
126 }
127
mlx5_vhca_state_cap_handle(struct mlx5_core_dev * dev,void * set_hca_cap)128 void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
129 {
130 if (!mlx5_vhca_event_supported(dev))
131 return;
132
133 MLX5_SET(cmd_hca_cap, set_hca_cap, vhca_state, 1);
134 MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_allocated, 1);
135 MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_active, 1);
136 MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_in_use, 1);
137 MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
138 }
139
mlx5_vhca_state_notifier_init(struct mlx5_core_dev * dev)140 void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
141 {
142 BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.vhca_state_n_head);
143 MLX5_NB_INIT(&dev->priv.vhca_state_nb, mlx5_vhca_state_change_notifier,
144 VHCA_STATE_CHANGE);
145 }
146
mlx5_vhca_event_init(struct mlx5_core_dev * dev)147 int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
148 {
149 char wq_name[MLX5_CMD_WQ_MAX_NAME];
150 struct mlx5_vhca_events *events;
151 int err, i;
152
153 if (!mlx5_vhca_event_supported(dev))
154 return 0;
155
156 events = kzalloc_obj(*events);
157 if (!events)
158 return -ENOMEM;
159
160 events->dev = dev;
161 for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
162 snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
163 events->handler[i].wq = create_singlethread_workqueue(wq_name);
164 if (!events->handler[i].wq) {
165 err = -ENOMEM;
166 goto err_create_wq;
167 }
168 }
169 dev->priv.vhca_events = events;
170
171 return 0;
172
173 err_create_wq:
174 for (--i; i >= 0; i--)
175 destroy_workqueue(events->handler[i].wq);
176 kfree(events);
177 return err;
178 }
179
mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev * dev)180 void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev)
181 {
182 struct mlx5_vhca_events *vhca_events;
183 int i;
184
185 if (!mlx5_vhca_event_supported(dev))
186 return;
187
188 vhca_events = dev->priv.vhca_events;
189 for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
190 flush_workqueue(vhca_events->handler[i].wq);
191 }
192
mlx5_vhca_event_cleanup(struct mlx5_core_dev * dev)193 void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
194 {
195 struct mlx5_vhca_events *vhca_events;
196 int i;
197
198 if (!mlx5_vhca_event_supported(dev))
199 return;
200
201 vhca_events = dev->priv.vhca_events;
202 for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
203 destroy_workqueue(vhca_events->handler[i].wq);
204 kvfree(vhca_events);
205 }
206
mlx5_vhca_event_start(struct mlx5_core_dev * dev)207 void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
208 {
209 if (!mlx5_vhca_event_supported(dev))
210 return;
211
212 mlx5_eq_notifier_register(dev, &dev->priv.vhca_state_nb);
213 }
214
mlx5_vhca_event_stop(struct mlx5_core_dev * dev)215 void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
216 {
217 if (!mlx5_vhca_event_supported(dev))
218 return;
219
220 mlx5_eq_notifier_unregister(dev, &dev->priv.vhca_state_nb);
221
222 /* Flush workqueues of all pending events. */
223 mlx5_vhca_event_work_queues_flush(dev);
224 }
225
mlx5_vhca_event_notifier_register(struct mlx5_core_dev * dev,struct notifier_block * nb)226 int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
227 {
228 return blocking_notifier_chain_register(&dev->priv.vhca_state_n_head,
229 nb);
230 }
231
mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev * dev,struct notifier_block * nb)232 void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
233 {
234 blocking_notifier_chain_unregister(&dev->priv.vhca_state_n_head, nb);
235 }
236