1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- linux-c -*- *
3 *
4 * ALSA driver for the digigram lx6464es interface
5 * low-level interface
6 *
7 * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
8 */
9
10 /* #define RMH_DEBUG 1 */
11
12 #include <linux/bitops.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16
17 #include "lx6464es.h"
18 #include "lx_core.h"
19
20 /* low-level register access */
21
22 static const unsigned long dsp_port_offsets[] = {
23 0,
24 0x400,
25 0x401,
26 0x402,
27 0x403,
28 0x404,
29 0x405,
30 0x406,
31 0x407,
32 0x408,
33 0x409,
34 0x40a,
35 0x40b,
36 0x40c,
37
38 0x410,
39 0x411,
40 0x412,
41 0x413,
42 0x414,
43 0x415,
44 0x416,
45
46 0x420,
47 0x430,
48 0x431,
49 0x432,
50 0x433,
51 0x434,
52 0x440
53 };
54
lx_dsp_register(struct lx6464es * chip,int port)55 static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
56 {
57 void __iomem *base_address = chip->port_dsp_bar;
58 return base_address + dsp_port_offsets[port]*4;
59 }
60
lx_dsp_reg_read(struct lx6464es * chip,int port)61 unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
62 {
63 void __iomem *address = lx_dsp_register(chip, port);
64 return ioread32(address);
65 }
66
lx_dsp_reg_readbuf(struct lx6464es * chip,int port,u32 * data,u32 len)67 static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
68 u32 len)
69 {
70 u32 __iomem *address = lx_dsp_register(chip, port);
71 int i;
72
73 /* we cannot use memcpy_fromio */
74 for (i = 0; i != len; ++i)
75 data[i] = ioread32(address + i);
76 }
77
78
lx_dsp_reg_write(struct lx6464es * chip,int port,unsigned data)79 void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
80 {
81 void __iomem *address = lx_dsp_register(chip, port);
82 iowrite32(data, address);
83 }
84
lx_dsp_reg_writebuf(struct lx6464es * chip,int port,const u32 * data,u32 len)85 static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
86 const u32 *data, u32 len)
87 {
88 u32 __iomem *address = lx_dsp_register(chip, port);
89 int i;
90
91 /* we cannot use memcpy_to */
92 for (i = 0; i != len; ++i)
93 iowrite32(data[i], address + i);
94 }
95
96
97 static const unsigned long plx_port_offsets[] = {
98 0x04,
99 0x40,
100 0x44,
101 0x48,
102 0x4c,
103 0x50,
104 0x54,
105 0x58,
106 0x5c,
107 0x64,
108 0x68,
109 0x6C
110 };
111
lx_plx_register(struct lx6464es * chip,int port)112 static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
113 {
114 void __iomem *base_address = chip->port_plx_remapped;
115 return base_address + plx_port_offsets[port];
116 }
117
lx_plx_reg_read(struct lx6464es * chip,int port)118 unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
119 {
120 void __iomem *address = lx_plx_register(chip, port);
121 return ioread32(address);
122 }
123
lx_plx_reg_write(struct lx6464es * chip,int port,u32 data)124 void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
125 {
126 void __iomem *address = lx_plx_register(chip, port);
127 iowrite32(data, address);
128 }
129
130 /* rmh */
131
132 #ifdef CONFIG_SND_DEBUG
133 #define CMD_NAME(a) a
134 #else
135 #define CMD_NAME(a) NULL
136 #endif
137
138 #define Reg_CSM_MR 0x00000002
139 #define Reg_CSM_MC 0x00000001
140
141 struct dsp_cmd_info {
142 u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
143 * word).*/
144 u16 dcCmdLength; /* Command length in words of 24 bits.*/
145 u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
146 * random. */
147 u16 dcStatusLength; /* Status length (if fixed).*/
148 char *dcOpName;
149 };
150
151 /*
152 Initialization and control data for the Microblaze interface
153 - OpCode:
154 the opcode field of the command set at the proper offset
155 - CmdLength
156 the number of command words
157 - StatusType
158 offset in the status registers: 0 means that the return value may be
159 different from 0, and must be read
160 - StatusLength
161 the number of status words (in addition to the return value)
162 */
163
164 static const struct dsp_cmd_info dsp_commands[] =
165 {
166 { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
167 , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
168 { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
169 , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
170 { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
171 , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
172 { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
173 , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
174 { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
175 , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
176 { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
177 , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
178 { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
179 , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
180 { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
181 , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
182 { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
183 , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
184 { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
185 , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
186 { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
187 , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
188 { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
189 , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
190 { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
191 , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
192 { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
193 , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
194 { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
195 , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
196 { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
197 , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
198 { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
199 , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
200 { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
201 , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
202 { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
203 , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
204 { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
205 , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
206 };
207
lx_message_init(struct lx_rmh * rmh,enum cmd_mb_opcodes cmd)208 static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
209 {
210 snd_BUG_ON(cmd >= CMD_14_INVALID);
211
212 rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
213 rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
214 rmh->stat_len = dsp_commands[cmd].dcStatusLength;
215 rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
216 rmh->cmd_idx = cmd;
217 memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
218
219 #ifdef CONFIG_SND_DEBUG
220 memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
221 #endif
222 #ifdef RMH_DEBUG
223 rmh->cmd_idx = cmd;
224 #endif
225 }
226
227 #ifdef RMH_DEBUG
228 #define LXRMH "lx6464es rmh: "
lx_message_dump(struct lx_rmh * rmh)229 static void lx_message_dump(struct lx_rmh *rmh)
230 {
231 u8 idx = rmh->cmd_idx;
232 int i;
233
234 pr_debug(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
235
236 for (i = 0; i != rmh->cmd_len; ++i)
237 pr_debug(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
238
239 for (i = 0; i != rmh->stat_len; ++i)
240 pr_debug(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
241 pr_debug("\n");
242 }
243 #else
lx_message_dump(struct lx_rmh * rmh)244 static inline void lx_message_dump(struct lx_rmh *rmh)
245 {}
246 #endif
247
248
249
250 /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
251 #define XILINX_TIMEOUT_MS 40
252 #define XILINX_POLL_NO_SLEEP 100
253 #define XILINX_POLL_ITERATIONS 150
254
255
lx_message_send_atomic(struct lx6464es * chip,struct lx_rmh * rmh)256 static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
257 {
258 u32 reg = ED_DSP_TIMED_OUT;
259 int dwloop;
260
261 if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
262 dev_err(chip->card->dev, "PIOSendMessage eReg_CSM %x\n", reg);
263 return -EBUSY;
264 }
265
266 /* write command */
267 lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
268
269 /* MicoBlaze gogogo */
270 lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
271
272 /* wait for device to answer */
273 for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
274 if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
275 if (rmh->dsp_stat == 0)
276 reg = lx_dsp_reg_read(chip, eReg_CRM1);
277 else
278 reg = 0;
279 goto polling_successful;
280 } else
281 udelay(1);
282 }
283 dev_warn(chip->card->dev, "TIMEOUT lx_message_send_atomic! "
284 "polling failed\n");
285
286 polling_successful:
287 if ((reg & ERROR_VALUE) == 0) {
288 /* read response */
289 if (rmh->stat_len) {
290 snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
291 lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
292 rmh->stat_len);
293 }
294 } else
295 dev_err(chip->card->dev, "rmh error: %08x\n", reg);
296
297 /* clear Reg_CSM_MR */
298 lx_dsp_reg_write(chip, eReg_CSM, 0);
299
300 switch (reg) {
301 case ED_DSP_TIMED_OUT:
302 dev_warn(chip->card->dev, "lx_message_send: dsp timeout\n");
303 return -ETIMEDOUT;
304
305 case ED_DSP_CRASHED:
306 dev_warn(chip->card->dev, "lx_message_send: dsp crashed\n");
307 return -EAGAIN;
308 }
309
310 lx_message_dump(rmh);
311
312 return reg;
313 }
314
315
316 /* low-level dsp access */
lx_dsp_get_version(struct lx6464es * chip,u32 * rdsp_version)317 int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
318 {
319 int ret;
320
321 guard(mutex)(&chip->msg_lock);
322
323 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
324 ret = lx_message_send_atomic(chip, &chip->rmh);
325
326 *rdsp_version = chip->rmh.stat[1];
327 return ret;
328 }
329
lx_dsp_get_clock_frequency(struct lx6464es * chip,u32 * rfreq)330 int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
331 {
332 u32 freq_raw = 0;
333 u32 freq = 0;
334 u32 frequency = 0;
335 int ret;
336
337 guard(mutex)(&chip->msg_lock);
338
339 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
340 ret = lx_message_send_atomic(chip, &chip->rmh);
341
342 if (ret == 0) {
343 freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
344 freq = freq_raw & XES_FREQ_COUNT8_MASK;
345
346 if ((freq < XES_FREQ_COUNT8_48_MAX) ||
347 (freq > XES_FREQ_COUNT8_44_MIN))
348 frequency = 0; /* unknown */
349 else if (freq >= XES_FREQ_COUNT8_44_MAX)
350 frequency = 44100;
351 else
352 frequency = 48000;
353 }
354
355 *rfreq = frequency * chip->freq_ratio;
356
357 return ret;
358 }
359
lx_dsp_get_mac(struct lx6464es * chip)360 int lx_dsp_get_mac(struct lx6464es *chip)
361 {
362 u32 macmsb, maclsb;
363
364 macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
365 maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
366
367 /* todo: endianess handling */
368 chip->mac_address[5] = ((u8 *)(&maclsb))[0];
369 chip->mac_address[4] = ((u8 *)(&maclsb))[1];
370 chip->mac_address[3] = ((u8 *)(&maclsb))[2];
371 chip->mac_address[2] = ((u8 *)(&macmsb))[0];
372 chip->mac_address[1] = ((u8 *)(&macmsb))[1];
373 chip->mac_address[0] = ((u8 *)(&macmsb))[2];
374
375 return 0;
376 }
377
378
lx_dsp_set_granularity(struct lx6464es * chip,u32 gran)379 int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
380 {
381 guard(mutex)(&chip->msg_lock);
382
383 lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
384 chip->rmh.cmd[0] |= gran;
385
386 return lx_message_send_atomic(chip, &chip->rmh);
387 }
388
lx_dsp_read_async_events(struct lx6464es * chip,u32 * data)389 int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
390 {
391 int ret;
392
393 guard(mutex)(&chip->msg_lock);
394
395 lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
396 chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
397
398 ret = lx_message_send_atomic(chip, &chip->rmh);
399
400 if (!ret)
401 memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
402
403 return ret;
404 }
405
406 #define PIPE_INFO_TO_CMD(capture, pipe) \
407 ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
408
409
410
411 /* low-level pipe handling */
lx_pipe_allocate(struct lx6464es * chip,u32 pipe,int is_capture,int channels)412 int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
413 int channels)
414 {
415 int err;
416 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
417
418 guard(mutex)(&chip->msg_lock);
419 lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
420
421 chip->rmh.cmd[0] |= pipe_cmd;
422 chip->rmh.cmd[0] |= channels;
423
424 err = lx_message_send_atomic(chip, &chip->rmh);
425
426 if (err != 0)
427 dev_err(chip->card->dev, "could not allocate pipe\n");
428
429 return err;
430 }
431
lx_pipe_release(struct lx6464es * chip,u32 pipe,int is_capture)432 int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
433 {
434 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
435
436 guard(mutex)(&chip->msg_lock);
437 lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
438
439 chip->rmh.cmd[0] |= pipe_cmd;
440
441 return lx_message_send_atomic(chip, &chip->rmh);
442 }
443
lx_buffer_ask(struct lx6464es * chip,u32 pipe,int is_capture,u32 * r_needed,u32 * r_freed,u32 * size_array)444 int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
445 u32 *r_needed, u32 *r_freed, u32 *size_array)
446 {
447 int err;
448 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
449
450 #ifdef CONFIG_SND_DEBUG
451 if (size_array)
452 memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
453 #endif
454
455 *r_needed = 0;
456 *r_freed = 0;
457
458 guard(mutex)(&chip->msg_lock);
459 lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
460
461 chip->rmh.cmd[0] |= pipe_cmd;
462
463 err = lx_message_send_atomic(chip, &chip->rmh);
464
465 if (!err) {
466 int i;
467 for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
468 u32 stat = chip->rmh.stat[i];
469 if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
470 /* finished */
471 *r_freed += 1;
472 if (size_array)
473 size_array[i] = stat & MASK_DATA_SIZE;
474 } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
475 == 0)
476 /* free */
477 *r_needed += 1;
478 }
479
480 dev_dbg(chip->card->dev,
481 "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
482 *r_needed, *r_freed);
483 for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len;
484 ++i) {
485 dev_dbg(chip->card->dev, " stat[%d]: %x, %x\n", i,
486 chip->rmh.stat[i],
487 chip->rmh.stat[i] & MASK_DATA_SIZE);
488 }
489 }
490
491 return err;
492 }
493
494
lx_pipe_stop(struct lx6464es * chip,u32 pipe,int is_capture)495 int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
496 {
497 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
498
499 guard(mutex)(&chip->msg_lock);
500 lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
501
502 chip->rmh.cmd[0] |= pipe_cmd;
503
504 return lx_message_send_atomic(chip, &chip->rmh);
505 }
506
lx_pipe_toggle_state(struct lx6464es * chip,u32 pipe,int is_capture)507 static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
508 {
509 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
510
511 guard(mutex)(&chip->msg_lock);
512 lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
513
514 chip->rmh.cmd[0] |= pipe_cmd;
515
516 return lx_message_send_atomic(chip, &chip->rmh);
517 }
518
519
lx_pipe_start(struct lx6464es * chip,u32 pipe,int is_capture)520 int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
521 {
522 int err;
523
524 err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
525 if (err < 0)
526 return err;
527
528 err = lx_pipe_toggle_state(chip, pipe, is_capture);
529
530 return err;
531 }
532
lx_pipe_pause(struct lx6464es * chip,u32 pipe,int is_capture)533 int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
534 {
535 int err = 0;
536
537 err = lx_pipe_wait_for_start(chip, pipe, is_capture);
538 if (err < 0)
539 return err;
540
541 err = lx_pipe_toggle_state(chip, pipe, is_capture);
542
543 return err;
544 }
545
546
lx_pipe_sample_count(struct lx6464es * chip,u32 pipe,int is_capture,u64 * rsample_count)547 int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
548 u64 *rsample_count)
549 {
550 int err;
551 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
552
553 guard(mutex)(&chip->msg_lock);
554 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
555
556 chip->rmh.cmd[0] |= pipe_cmd;
557 chip->rmh.stat_len = 2; /* need all words here! */
558
559 err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
560
561 if (err != 0)
562 dev_err(chip->card->dev,
563 "could not query pipe's sample count\n");
564 else {
565 *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
566 << 24) /* hi part */
567 + chip->rmh.stat[1]; /* lo part */
568 }
569
570 return err;
571 }
572
lx_pipe_state(struct lx6464es * chip,u32 pipe,int is_capture,u16 * rstate)573 int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
574 {
575 int err;
576 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
577
578 guard(mutex)(&chip->msg_lock);
579 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
580
581 chip->rmh.cmd[0] |= pipe_cmd;
582
583 err = lx_message_send_atomic(chip, &chip->rmh);
584
585 if (err != 0)
586 dev_err(chip->card->dev, "could not query pipe's state\n");
587 else
588 *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
589
590 return err;
591 }
592
lx_pipe_wait_for_state(struct lx6464es * chip,u32 pipe,int is_capture,u16 state)593 static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
594 int is_capture, u16 state)
595 {
596 int i;
597
598 /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
599 * timeout 50 ms */
600 for (i = 0; i != 50; ++i) {
601 u16 current_state;
602 int err = lx_pipe_state(chip, pipe, is_capture, ¤t_state);
603
604 if (err < 0)
605 return err;
606
607 if (!err && current_state == state)
608 return 0;
609
610 mdelay(1);
611 }
612
613 return -ETIMEDOUT;
614 }
615
lx_pipe_wait_for_start(struct lx6464es * chip,u32 pipe,int is_capture)616 int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
617 {
618 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
619 }
620
lx_pipe_wait_for_idle(struct lx6464es * chip,u32 pipe,int is_capture)621 int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
622 {
623 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
624 }
625
626 /* low-level stream handling */
lx_stream_set_state(struct lx6464es * chip,u32 pipe,int is_capture,enum stream_state_t state)627 int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
628 int is_capture, enum stream_state_t state)
629 {
630 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
631
632 guard(mutex)(&chip->msg_lock);
633 lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
634
635 chip->rmh.cmd[0] |= pipe_cmd;
636 chip->rmh.cmd[0] |= state;
637
638 return lx_message_send_atomic(chip, &chip->rmh);
639 }
640
lx_stream_set_format(struct lx6464es * chip,struct snd_pcm_runtime * runtime,u32 pipe,int is_capture)641 int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
642 u32 pipe, int is_capture)
643 {
644 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
645 u32 channels = runtime->channels;
646
647 guard(mutex)(&chip->msg_lock);
648 lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
649
650 chip->rmh.cmd[0] |= pipe_cmd;
651
652 if (runtime->sample_bits == 16)
653 /* 16 bit format */
654 chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
655
656 if (snd_pcm_format_little_endian(runtime->format))
657 /* little endian/intel format */
658 chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
659
660 chip->rmh.cmd[0] |= channels-1;
661
662 return lx_message_send_atomic(chip, &chip->rmh);
663 }
664
lx_stream_state(struct lx6464es * chip,u32 pipe,int is_capture,int * rstate)665 int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
666 int *rstate)
667 {
668 int err;
669 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
670
671 guard(mutex)(&chip->msg_lock);
672 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
673
674 chip->rmh.cmd[0] |= pipe_cmd;
675
676 err = lx_message_send_atomic(chip, &chip->rmh);
677
678 *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
679
680 return err;
681 }
682
lx_stream_sample_position(struct lx6464es * chip,u32 pipe,int is_capture,u64 * r_bytepos)683 int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
684 u64 *r_bytepos)
685 {
686 int err;
687 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
688
689 guard(mutex)(&chip->msg_lock);
690 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
691
692 chip->rmh.cmd[0] |= pipe_cmd;
693
694 err = lx_message_send_atomic(chip, &chip->rmh);
695
696 *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
697 << 32) /* hi part */
698 + chip->rmh.stat[1]; /* lo part */
699
700 return err;
701 }
702
703 /* low-level buffer handling */
lx_buffer_give(struct lx6464es * chip,u32 pipe,int is_capture,u32 buffer_size,u32 buf_address_lo,u32 buf_address_hi,u32 * r_buffer_index)704 int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
705 u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
706 u32 *r_buffer_index)
707 {
708 int err;
709 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
710
711 guard(mutex)(&chip->msg_lock);
712 lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
713
714 chip->rmh.cmd[0] |= pipe_cmd;
715 chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
716
717 /* todo: pause request, circular buffer */
718
719 chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
720 chip->rmh.cmd[2] = buf_address_lo;
721
722 if (buf_address_hi) {
723 chip->rmh.cmd_len = 4;
724 chip->rmh.cmd[3] = buf_address_hi;
725 chip->rmh.cmd[0] |= BF_64BITS_ADR;
726 }
727
728 err = lx_message_send_atomic(chip, &chip->rmh);
729
730 if (err == 0) {
731 *r_buffer_index = chip->rmh.stat[0];
732 return err;
733 }
734
735 if (err == EB_RBUFFERS_TABLE_OVERFLOW)
736 dev_err(chip->card->dev,
737 "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
738
739 if (err == EB_INVALID_STREAM)
740 dev_err(chip->card->dev,
741 "lx_buffer_give EB_INVALID_STREAM\n");
742
743 if (err == EB_CMD_REFUSED)
744 dev_err(chip->card->dev,
745 "lx_buffer_give EB_CMD_REFUSED\n");
746
747 return err;
748 }
749
lx_buffer_free(struct lx6464es * chip,u32 pipe,int is_capture,u32 * r_buffer_size)750 int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
751 u32 *r_buffer_size)
752 {
753 int err;
754 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
755
756 guard(mutex)(&chip->msg_lock);
757 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
758
759 chip->rmh.cmd[0] |= pipe_cmd;
760 chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
761 * microblaze will seek for it */
762
763 err = lx_message_send_atomic(chip, &chip->rmh);
764
765 if (err == 0)
766 *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
767
768 return err;
769 }
770
lx_buffer_cancel(struct lx6464es * chip,u32 pipe,int is_capture,u32 buffer_index)771 int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
772 u32 buffer_index)
773 {
774 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
775
776 guard(mutex)(&chip->msg_lock);
777 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
778
779 chip->rmh.cmd[0] |= pipe_cmd;
780 chip->rmh.cmd[0] |= buffer_index;
781
782 return lx_message_send_atomic(chip, &chip->rmh);
783 }
784
785
786 /* low-level gain/peak handling
787 *
788 * \todo: can we unmute capture/playback channels independently?
789 *
790 * */
lx_level_unmute(struct lx6464es * chip,int is_capture,int unmute)791 int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
792 {
793 /* bit set to 1: channel muted */
794 u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
795
796 guard(mutex)(&chip->msg_lock);
797 lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
798
799 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
800
801 chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
802 chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
803
804 dev_dbg(chip->card->dev,
805 "mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
806 chip->rmh.cmd[2]);
807
808 return lx_message_send_atomic(chip, &chip->rmh);
809 }
810
811 static const u32 peak_map[] = {
812 0x00000109, /* -90.308dB */
813 0x0000083B, /* -72.247dB */
814 0x000020C4, /* -60.205dB */
815 0x00008273, /* -48.030dB */
816 0x00020756, /* -36.005dB */
817 0x00040C37, /* -30.001dB */
818 0x00081385, /* -24.002dB */
819 0x00101D3F, /* -18.000dB */
820 0x0016C310, /* -15.000dB */
821 0x002026F2, /* -12.001dB */
822 0x002D6A86, /* -9.000dB */
823 0x004026E6, /* -6.004dB */
824 0x005A9DF6, /* -3.000dB */
825 0x0065AC8B, /* -2.000dB */
826 0x00721481, /* -1.000dB */
827 0x007FFFFF, /* FS */
828 };
829
lx_level_peaks(struct lx6464es * chip,int is_capture,int channels,u32 * r_levels)830 int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
831 u32 *r_levels)
832 {
833 int err = 0;
834 int i;
835
836 guard(mutex)(&chip->msg_lock);
837 for (i = 0; i < channels; i += 4) {
838 u32 s0, s1, s2, s3;
839
840 lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
841 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
842
843 err = lx_message_send_atomic(chip, &chip->rmh);
844
845 if (err == 0) {
846 s0 = peak_map[chip->rmh.stat[0] & 0x0F];
847 s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
848 s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
849 s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
850 } else
851 s0 = s1 = s2 = s3 = 0;
852
853 r_levels[0] = s0;
854 r_levels[1] = s1;
855 r_levels[2] = s2;
856 r_levels[3] = s3;
857
858 r_levels += 4;
859 }
860
861 return err;
862 }
863
864 /* interrupt handling */
865 #define PCX_IRQ_NONE 0
866 #define IRQCS_ACTIVE_PCIDB BIT(13)
867 #define IRQCS_ENABLE_PCIIRQ BIT(8)
868 #define IRQCS_ENABLE_PCIDB BIT(9)
869
lx_interrupt_test_ack(struct lx6464es * chip)870 static u32 lx_interrupt_test_ack(struct lx6464es *chip)
871 {
872 u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
873
874 /* Test if PCI Doorbell interrupt is active */
875 if (irqcs & IRQCS_ACTIVE_PCIDB) {
876 u32 temp;
877 irqcs = PCX_IRQ_NONE;
878
879 while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
880 /* RAZ interrupt */
881 irqcs |= temp;
882 lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
883 }
884
885 return irqcs;
886 }
887 return PCX_IRQ_NONE;
888 }
889
lx_interrupt_ack(struct lx6464es * chip,u32 * r_irqsrc,int * r_async_pending,int * r_async_escmd)890 static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
891 int *r_async_pending, int *r_async_escmd)
892 {
893 u32 irq_async;
894 u32 irqsrc = lx_interrupt_test_ack(chip);
895
896 if (irqsrc == PCX_IRQ_NONE)
897 return 0;
898
899 *r_irqsrc = irqsrc;
900
901 irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
902 * (set by xilinx) + EOB */
903
904 if (irq_async & MASK_SYS_STATUS_ESA) {
905 irq_async &= ~MASK_SYS_STATUS_ESA;
906 *r_async_escmd = 1;
907 }
908
909 if (irq_async) {
910 /* dev_dbg(chip->card->dev, "interrupt: async event pending\n"); */
911 *r_async_pending = 1;
912 }
913
914 return 1;
915 }
916
lx_interrupt_handle_async_events(struct lx6464es * chip,u32 irqsrc,int * r_freq_changed,u64 * r_notified_in_pipe_mask,u64 * r_notified_out_pipe_mask)917 static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
918 int *r_freq_changed,
919 u64 *r_notified_in_pipe_mask,
920 u64 *r_notified_out_pipe_mask)
921 {
922 int err;
923 u32 stat[9]; /* answer from CMD_04_GET_EVENT */
924
925 /* We can optimize this to not read dumb events.
926 * Answer words are in the following order:
927 * Stat[0] general status
928 * Stat[1] end of buffer OUT pF
929 * Stat[2] end of buffer OUT pf
930 * Stat[3] end of buffer IN pF
931 * Stat[4] end of buffer IN pf
932 * Stat[5] MSB underrun
933 * Stat[6] LSB underrun
934 * Stat[7] MSB overrun
935 * Stat[8] LSB overrun
936 * */
937
938 int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
939 int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
940
941 *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
942
943 err = lx_dsp_read_async_events(chip, stat);
944 if (err < 0)
945 return err;
946
947 if (eb_pending_in) {
948 *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
949 + stat[4];
950 dev_dbg(chip->card->dev, "interrupt: EOBI pending %llx\n",
951 *r_notified_in_pipe_mask);
952 }
953 if (eb_pending_out) {
954 *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
955 + stat[2];
956 dev_dbg(chip->card->dev, "interrupt: EOBO pending %llx\n",
957 *r_notified_out_pipe_mask);
958 }
959
960 /* todo: handle xrun notification */
961
962 return err;
963 }
964
lx_interrupt_request_new_buffer(struct lx6464es * chip,struct lx_stream * lx_stream)965 static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
966 struct lx_stream *lx_stream)
967 {
968 struct snd_pcm_substream *substream = lx_stream->stream;
969 const unsigned int is_capture = lx_stream->is_capture;
970 int err;
971
972 const u32 channels = substream->runtime->channels;
973 const u32 bytes_per_frame = channels * 3;
974 const u32 period_size = substream->runtime->period_size;
975 const u32 period_bytes = period_size * bytes_per_frame;
976 const u32 pos = lx_stream->frame_pos;
977 const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
978 0 : pos + 1;
979
980 dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
981 u32 buf_hi = 0;
982 u32 buf_lo = 0;
983 u32 buffer_index = 0;
984
985 u32 needed, freed;
986 u32 size_array[MAX_STREAM_BUFFER];
987
988 dev_dbg(chip->card->dev, "->lx_interrupt_request_new_buffer\n");
989
990 guard(mutex)(&chip->lock);
991
992 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
993 dev_dbg(chip->card->dev,
994 "interrupt: needed %d, freed %d\n", needed, freed);
995
996 unpack_pointer(buf, &buf_lo, &buf_hi);
997 err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
998 &buffer_index);
999 dev_dbg(chip->card->dev,
1000 "interrupt: gave buffer index %x on 0x%lx (%d bytes)\n",
1001 buffer_index, (unsigned long)buf, period_bytes);
1002
1003 lx_stream->frame_pos = next_pos;
1004
1005 return err;
1006 }
1007
lx_interrupt(int irq,void * dev_id)1008 irqreturn_t lx_interrupt(int irq, void *dev_id)
1009 {
1010 struct lx6464es *chip = dev_id;
1011 int async_pending, async_escmd;
1012 u32 irqsrc;
1013 bool wake_thread = false;
1014
1015 dev_dbg(chip->card->dev,
1016 "**************************************************\n");
1017
1018 if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
1019 dev_dbg(chip->card->dev, "IRQ_NONE\n");
1020 return IRQ_NONE; /* this device did not cause the interrupt */
1021 }
1022
1023 if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
1024 return IRQ_HANDLED;
1025
1026 if (irqsrc & MASK_SYS_STATUS_EOBI)
1027 dev_dbg(chip->card->dev, "interrupt: EOBI\n");
1028
1029 if (irqsrc & MASK_SYS_STATUS_EOBO)
1030 dev_dbg(chip->card->dev, "interrupt: EOBO\n");
1031
1032 if (irqsrc & MASK_SYS_STATUS_URUN)
1033 dev_dbg(chip->card->dev, "interrupt: URUN\n");
1034
1035 if (irqsrc & MASK_SYS_STATUS_ORUN)
1036 dev_dbg(chip->card->dev, "interrupt: ORUN\n");
1037
1038 if (async_pending) {
1039 wake_thread = true;
1040 chip->irqsrc = irqsrc;
1041 }
1042
1043 if (async_escmd) {
1044 /* backdoor for ethersound commands
1045 *
1046 * for now, we do not need this
1047 *
1048 * */
1049
1050 dev_dbg(chip->card->dev, "interrupt requests escmd handling\n");
1051 }
1052
1053 return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
1054 }
1055
lx_threaded_irq(int irq,void * dev_id)1056 irqreturn_t lx_threaded_irq(int irq, void *dev_id)
1057 {
1058 struct lx6464es *chip = dev_id;
1059 u64 notified_in_pipe_mask = 0;
1060 u64 notified_out_pipe_mask = 0;
1061 int freq_changed;
1062 int err;
1063
1064 /* handle async events */
1065 err = lx_interrupt_handle_async_events(chip, chip->irqsrc,
1066 &freq_changed,
1067 ¬ified_in_pipe_mask,
1068 ¬ified_out_pipe_mask);
1069 if (err)
1070 dev_err(chip->card->dev, "error handling async events\n");
1071
1072 if (notified_in_pipe_mask) {
1073 struct lx_stream *lx_stream = &chip->capture_stream;
1074
1075 dev_dbg(chip->card->dev,
1076 "requesting audio transfer for capture\n");
1077 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1078 if (err < 0)
1079 dev_err(chip->card->dev,
1080 "cannot request new buffer for capture\n");
1081 snd_pcm_period_elapsed(lx_stream->stream);
1082 }
1083
1084 if (notified_out_pipe_mask) {
1085 struct lx_stream *lx_stream = &chip->playback_stream;
1086
1087 dev_dbg(chip->card->dev,
1088 "requesting audio transfer for playback\n");
1089 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1090 if (err < 0)
1091 dev_err(chip->card->dev,
1092 "cannot request new buffer for playback\n");
1093 snd_pcm_period_elapsed(lx_stream->stream);
1094 }
1095
1096 return IRQ_HANDLED;
1097 }
1098
1099
lx_irq_set(struct lx6464es * chip,int enable)1100 static void lx_irq_set(struct lx6464es *chip, int enable)
1101 {
1102 u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
1103
1104 /* enable/disable interrupts
1105 *
1106 * Set the Doorbell and PCI interrupt enable bits
1107 *
1108 * */
1109 if (enable)
1110 reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1111 else
1112 reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1113 lx_plx_reg_write(chip, ePLX_IRQCS, reg);
1114 }
1115
lx_irq_enable(struct lx6464es * chip)1116 void lx_irq_enable(struct lx6464es *chip)
1117 {
1118 dev_dbg(chip->card->dev, "->lx_irq_enable\n");
1119 lx_irq_set(chip, 1);
1120 }
1121
lx_irq_disable(struct lx6464es * chip)1122 void lx_irq_disable(struct lx6464es *chip)
1123 {
1124 dev_dbg(chip->card->dev, "->lx_irq_disable\n");
1125 lx_irq_set(chip, 0);
1126 }
1127