1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2024 Analog Devices Inc.
4  * Copyright (C) 2024 BayLibre, SAS
5  */
6 
7 /*
8  * SPI Offloading support.
9  *
10  * Some SPI controllers support offloading of SPI transfers. Essentially, this
11  * is the ability for a SPI controller to perform SPI transfers with minimal
12  * or even no CPU intervention, e.g. via a specialized SPI controller with a
13  * hardware trigger or via a conventional SPI controller using a non-Linux MCU
14  * processor core to offload the work.
15  */
16 
17 #define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD"
18 
19 #include <linux/cleanup.h>
20 #include <linux/device.h>
21 #include <linux/dmaengine.h>
22 #include <linux/export.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/mutex.h>
26 #include <linux/of.h>
27 #include <linux/property.h>
28 #include <linux/spi/offload/consumer.h>
29 #include <linux/spi/offload/provider.h>
30 #include <linux/spi/offload/types.h>
31 #include <linux/spi/spi.h>
32 #include <linux/types.h>
33 
34 struct spi_controller_and_offload {
35 	struct spi_controller *controller;
36 	struct spi_offload *offload;
37 };
38 
39 struct spi_offload_trigger {
40 	struct list_head list;
41 	struct kref ref;
42 	struct fwnode_handle *fwnode;
43 	/* synchronizes calling ops and driver registration */
44 	struct mutex lock;
45 	/*
46 	 * If the provider goes away while the consumer still has a reference,
47 	 * ops and priv will be set to NULL and all calls will fail with -ENODEV.
48 	 */
49 	const struct spi_offload_trigger_ops *ops;
50 	void *priv;
51 };
52 
53 static LIST_HEAD(spi_offload_triggers);
54 static DEFINE_MUTEX(spi_offload_triggers_lock);
55 
56 /**
57  * devm_spi_offload_alloc() - Allocate offload instance
58  * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev
59  * @priv_size: Size of private data to allocate
60  *
61  * Offload providers should use this to allocate offload instances.
62  *
63  * Return: Pointer to new offload instance or error on failure.
64  */
devm_spi_offload_alloc(struct device * dev,size_t priv_size)65 struct spi_offload *devm_spi_offload_alloc(struct device *dev,
66 					   size_t priv_size)
67 {
68 	struct spi_offload *offload;
69 	void *priv;
70 
71 	offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL);
72 	if (!offload)
73 		return ERR_PTR(-ENOMEM);
74 
75 	priv = devm_kzalloc(dev, priv_size, GFP_KERNEL);
76 	if (!priv)
77 		return ERR_PTR(-ENOMEM);
78 
79 	offload->provider_dev = dev;
80 	offload->priv = priv;
81 
82 	return offload;
83 }
84 EXPORT_SYMBOL_GPL(devm_spi_offload_alloc);
85 
spi_offload_put(void * data)86 static void spi_offload_put(void *data)
87 {
88 	struct spi_controller_and_offload *resource = data;
89 
90 	resource->controller->put_offload(resource->offload);
91 	kfree(resource);
92 }
93 
94 /**
95  * devm_spi_offload_get() - Get an offload instance
96  * @dev: Device for devm purposes
97  * @spi: SPI device to use for the transfers
98  * @config: Offload configuration
99  *
100  * Peripheral drivers call this function to get an offload instance that meets
101  * the requirements specified in @config. If no suitable offload instance is
102  * available, -ENODEV is returned.
103  *
104  * Return: Offload instance or error on failure.
105  */
devm_spi_offload_get(struct device * dev,struct spi_device * spi,const struct spi_offload_config * config)106 struct spi_offload *devm_spi_offload_get(struct device *dev,
107 					 struct spi_device *spi,
108 					 const struct spi_offload_config *config)
109 {
110 	struct spi_controller_and_offload *resource;
111 	struct spi_offload *offload;
112 	int ret;
113 
114 	if (!spi || !config)
115 		return ERR_PTR(-EINVAL);
116 
117 	if (!spi->controller->get_offload)
118 		return ERR_PTR(-ENODEV);
119 
120 	resource = kzalloc(sizeof(*resource), GFP_KERNEL);
121 	if (!resource)
122 		return ERR_PTR(-ENOMEM);
123 
124 	offload = spi->controller->get_offload(spi, config);
125 	if (IS_ERR(offload)) {
126 		kfree(resource);
127 		return offload;
128 	}
129 
130 	resource->controller = spi->controller;
131 	resource->offload = offload;
132 
133 	ret = devm_add_action_or_reset(dev, spi_offload_put, resource);
134 	if (ret)
135 		return ERR_PTR(ret);
136 
137 	return offload;
138 }
139 EXPORT_SYMBOL_GPL(devm_spi_offload_get);
140 
spi_offload_trigger_free(struct kref * ref)141 static void spi_offload_trigger_free(struct kref *ref)
142 {
143 	struct spi_offload_trigger *trigger =
144 		container_of(ref, struct spi_offload_trigger, ref);
145 
146 	mutex_destroy(&trigger->lock);
147 	fwnode_handle_put(trigger->fwnode);
148 	kfree(trigger);
149 }
150 
spi_offload_trigger_put(void * data)151 static void spi_offload_trigger_put(void *data)
152 {
153 	struct spi_offload_trigger *trigger = data;
154 
155 	scoped_guard(mutex, &trigger->lock)
156 		if (trigger->ops && trigger->ops->release)
157 			trigger->ops->release(trigger);
158 
159 	kref_put(&trigger->ref, spi_offload_trigger_free);
160 }
161 
162 static struct spi_offload_trigger
spi_offload_trigger_get(enum spi_offload_trigger_type type,struct fwnode_reference_args * args)163 *spi_offload_trigger_get(enum spi_offload_trigger_type type,
164 			 struct fwnode_reference_args *args)
165 {
166 	struct spi_offload_trigger *trigger;
167 	bool match = false;
168 	int ret;
169 
170 	guard(mutex)(&spi_offload_triggers_lock);
171 
172 	list_for_each_entry(trigger, &spi_offload_triggers, list) {
173 		if (trigger->fwnode != args->fwnode)
174 			continue;
175 
176 		match = trigger->ops->match(trigger, type, args->args, args->nargs);
177 		if (match)
178 			break;
179 	}
180 
181 	if (!match)
182 		return ERR_PTR(-EPROBE_DEFER);
183 
184 	guard(mutex)(&trigger->lock);
185 
186 	if (!trigger->ops)
187 		return ERR_PTR(-ENODEV);
188 
189 	if (trigger->ops->request) {
190 		ret = trigger->ops->request(trigger, type, args->args, args->nargs);
191 		if (ret)
192 			return ERR_PTR(ret);
193 	}
194 
195 	kref_get(&trigger->ref);
196 
197 	return trigger;
198 }
199 
200 /**
201  * devm_spi_offload_trigger_get() - Get an offload trigger instance
202  * @dev: Device for devm purposes.
203  * @offload: Offload instance connected to a trigger.
204  * @type: Trigger type to get.
205  *
206  * Return: Offload trigger instance or error on failure.
207  */
208 struct spi_offload_trigger
devm_spi_offload_trigger_get(struct device * dev,struct spi_offload * offload,enum spi_offload_trigger_type type)209 *devm_spi_offload_trigger_get(struct device *dev,
210 			      struct spi_offload *offload,
211 			      enum spi_offload_trigger_type type)
212 {
213 	struct spi_offload_trigger *trigger;
214 	struct fwnode_reference_args args;
215 	int ret;
216 
217 	ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev),
218 						 "trigger-sources",
219 						 "#trigger-source-cells", 0, 0,
220 						 &args);
221 	if (ret)
222 		return ERR_PTR(ret);
223 
224 	trigger = spi_offload_trigger_get(type, &args);
225 	fwnode_handle_put(args.fwnode);
226 	if (IS_ERR(trigger))
227 		return trigger;
228 
229 	ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger);
230 	if (ret)
231 		return ERR_PTR(ret);
232 
233 	return trigger;
234 }
235 EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get);
236 
237 /**
238  * spi_offload_trigger_validate - Validate the requested trigger
239  * @trigger: Offload trigger instance
240  * @config: Trigger config to validate
241  *
242  * On success, @config may be modifed to reflect what the hardware can do.
243  * For example, the frequency of a periodic trigger may be adjusted to the
244  * nearest supported value.
245  *
246  * Callers will likely need to do additional validation of the modified trigger
247  * parameters.
248  *
249  * Return: 0 on success, negative error code on failure.
250  */
spi_offload_trigger_validate(struct spi_offload_trigger * trigger,struct spi_offload_trigger_config * config)251 int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
252 				 struct spi_offload_trigger_config *config)
253 {
254 	guard(mutex)(&trigger->lock);
255 
256 	if (!trigger->ops)
257 		return -ENODEV;
258 
259 	if (!trigger->ops->validate)
260 		return -EOPNOTSUPP;
261 
262 	return trigger->ops->validate(trigger, config);
263 }
264 EXPORT_SYMBOL_GPL(spi_offload_trigger_validate);
265 
266 /**
267  * spi_offload_trigger_enable - enables trigger for offload
268  * @offload: Offload instance
269  * @trigger: Offload trigger instance
270  * @config: Trigger config to validate
271  *
272  * There must be a prepared offload instance with the specified ID (i.e.
273  * spi_optimize_message() was called with the same offload assigned to the
274  * message). This will also reserve the bus for exclusive use by the offload
275  * instance until the trigger is disabled. Any other attempts to send a
276  * transfer or lock the bus will fail with -EBUSY during this time.
277  *
278  * Calls must be balanced with spi_offload_trigger_disable().
279  *
280  * Context: can sleep
281  * Return: 0 on success, else a negative error code.
282  */
spi_offload_trigger_enable(struct spi_offload * offload,struct spi_offload_trigger * trigger,struct spi_offload_trigger_config * config)283 int spi_offload_trigger_enable(struct spi_offload *offload,
284 			       struct spi_offload_trigger *trigger,
285 			       struct spi_offload_trigger_config *config)
286 {
287 	int ret;
288 
289 	guard(mutex)(&trigger->lock);
290 
291 	if (!trigger->ops)
292 		return -ENODEV;
293 
294 	if (offload->ops && offload->ops->trigger_enable) {
295 		ret = offload->ops->trigger_enable(offload);
296 		if (ret)
297 			return ret;
298 	}
299 
300 	if (trigger->ops->enable) {
301 		ret = trigger->ops->enable(trigger, config);
302 		if (ret) {
303 			if (offload->ops->trigger_disable)
304 				offload->ops->trigger_disable(offload);
305 			return ret;
306 		}
307 	}
308 
309 	return 0;
310 }
311 EXPORT_SYMBOL_GPL(spi_offload_trigger_enable);
312 
313 /**
314  * spi_offload_trigger_disable - disables hardware trigger for offload
315  * @offload: Offload instance
316  * @trigger: Offload trigger instance
317  *
318  * Disables the hardware trigger for the offload instance with the specified ID
319  * and releases the bus for use by other clients.
320  *
321  * Context: can sleep
322  */
spi_offload_trigger_disable(struct spi_offload * offload,struct spi_offload_trigger * trigger)323 void spi_offload_trigger_disable(struct spi_offload *offload,
324 				 struct spi_offload_trigger *trigger)
325 {
326 	if (offload->ops && offload->ops->trigger_disable)
327 		offload->ops->trigger_disable(offload);
328 
329 	guard(mutex)(&trigger->lock);
330 
331 	if (!trigger->ops)
332 		return;
333 
334 	if (trigger->ops->disable)
335 		trigger->ops->disable(trigger);
336 }
337 EXPORT_SYMBOL_GPL(spi_offload_trigger_disable);
338 
spi_offload_release_dma_chan(void * chan)339 static void spi_offload_release_dma_chan(void *chan)
340 {
341 	dma_release_channel(chan);
342 }
343 
344 /**
345  * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream
346  * @dev: Device for devm purposes.
347  * @offload: Offload instance
348  *
349  * This is the DMA channel that will provide data to transfers that use the
350  * %SPI_OFFLOAD_XFER_TX_STREAM offload flag.
351  *
352  * Return: Pointer to DMA channel info, or negative error code
353  */
354 struct dma_chan
devm_spi_offload_tx_stream_request_dma_chan(struct device * dev,struct spi_offload * offload)355 *devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
356 					     struct spi_offload *offload)
357 {
358 	struct dma_chan *chan;
359 	int ret;
360 
361 	if (!offload->ops || !offload->ops->tx_stream_request_dma_chan)
362 		return ERR_PTR(-EOPNOTSUPP);
363 
364 	chan = offload->ops->tx_stream_request_dma_chan(offload);
365 	if (IS_ERR(chan))
366 		return chan;
367 
368 	ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
369 	if (ret)
370 		return ERR_PTR(ret);
371 
372 	return chan;
373 }
374 EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan);
375 
376 /**
377  * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream
378  * @dev: Device for devm purposes.
379  * @offload: Offload instance
380  *
381  * This is the DMA channel that will receive data from transfers that use the
382  * %SPI_OFFLOAD_XFER_RX_STREAM offload flag.
383  *
384  * Return: Pointer to DMA channel info, or negative error code
385  */
386 struct dma_chan
devm_spi_offload_rx_stream_request_dma_chan(struct device * dev,struct spi_offload * offload)387 *devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
388 					     struct spi_offload *offload)
389 {
390 	struct dma_chan *chan;
391 	int ret;
392 
393 	if (!offload->ops || !offload->ops->rx_stream_request_dma_chan)
394 		return ERR_PTR(-EOPNOTSUPP);
395 
396 	chan = offload->ops->rx_stream_request_dma_chan(offload);
397 	if (IS_ERR(chan))
398 		return chan;
399 
400 	ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
401 	if (ret)
402 		return ERR_PTR(ret);
403 
404 	return chan;
405 }
406 EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan);
407 
408 /* Triggers providers */
409 
spi_offload_trigger_unregister(void * data)410 static void spi_offload_trigger_unregister(void *data)
411 {
412 	struct spi_offload_trigger *trigger = data;
413 
414 	scoped_guard(mutex, &spi_offload_triggers_lock)
415 		list_del(&trigger->list);
416 
417 	scoped_guard(mutex, &trigger->lock) {
418 		trigger->priv = NULL;
419 		trigger->ops = NULL;
420 	}
421 
422 	kref_put(&trigger->ref, spi_offload_trigger_free);
423 }
424 
425 /**
426  * devm_spi_offload_trigger_register() - Allocate and register an offload trigger
427  * @dev: Device for devm purposes.
428  * @info: Provider-specific trigger info.
429  *
430  * Return: 0 on success, else a negative error code.
431  */
devm_spi_offload_trigger_register(struct device * dev,struct spi_offload_trigger_info * info)432 int devm_spi_offload_trigger_register(struct device *dev,
433 				      struct spi_offload_trigger_info *info)
434 {
435 	struct spi_offload_trigger *trigger;
436 
437 	if (!info->fwnode || !info->ops)
438 		return -EINVAL;
439 
440 	trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
441 	if (!trigger)
442 		return -ENOMEM;
443 
444 	kref_init(&trigger->ref);
445 	mutex_init(&trigger->lock);
446 	trigger->fwnode = fwnode_handle_get(info->fwnode);
447 	trigger->ops = info->ops;
448 	trigger->priv = info->priv;
449 
450 	scoped_guard(mutex, &spi_offload_triggers_lock)
451 		list_add_tail(&trigger->list, &spi_offload_triggers);
452 
453 	return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger);
454 }
455 EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register);
456 
457 /**
458  * spi_offload_trigger_get_priv() - Get the private data for the trigger
459  *
460  * @trigger: Offload trigger instance.
461  *
462  * Return: Private data for the trigger.
463  */
spi_offload_trigger_get_priv(struct spi_offload_trigger * trigger)464 void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger)
465 {
466 	return trigger->priv;
467 }
468 EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv);
469