1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca>
5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org>
6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Intel High Definition Audio (Controller) driver for FreeBSD.
33 */
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_snd.h"
37 #endif
38
39 #include <dev/sound/pcm/sound.h>
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42
43 #include <sys/ctype.h>
44 #include <sys/endian.h>
45 #include <sys/taskqueue.h>
46
47 #include <dev/sound/pci/hda/hdac_private.h>
48 #include <dev/sound/pci/hda/hdac_reg.h>
49 #include <dev/sound/pci/hda/hda_reg.h>
50 #include <dev/sound/pci/hda/hdac.h>
51
52 #define HDA_DRV_TEST_REV "20120126_0002"
53
54 #define hdac_lock(sc) mtx_lock(&(sc)->lock)
55 #define hdac_unlock(sc) mtx_unlock(&(sc)->lock)
56 #define hdac_lockassert(sc) mtx_assert(&(sc)->lock, MA_OWNED)
57
58 #define HDAC_QUIRK_64BIT (1 << 0)
59 #define HDAC_QUIRK_DMAPOS (1 << 1)
60 #define HDAC_QUIRK_MSI (1 << 2)
61
62 static const struct {
63 const char *key;
64 uint32_t value;
65 } hdac_quirks_tab[] = {
66 { "64bit", HDAC_QUIRK_64BIT },
67 { "dmapos", HDAC_QUIRK_DMAPOS },
68 { "msi", HDAC_QUIRK_MSI },
69 };
70
71 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller");
72
73 static const struct {
74 uint32_t model;
75 const char *desc;
76 char quirks_on;
77 char quirks_off;
78 } hdac_devices[] = {
79 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 },
80 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 },
81 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 },
82 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 },
83 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 },
84 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 },
85 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 },
86 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 },
87 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 },
88 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 },
89 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 },
90 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 },
91 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 },
92 { HDA_INTEL_BR, "Intel Braswell", 0, 0 },
93 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 },
94 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 },
95 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 },
96 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 },
97 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 },
98 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 },
99 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 },
100 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 },
101 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 },
102 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 },
103 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 },
104 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 },
105 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 },
106 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 },
107 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 },
108 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 },
109 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 },
110 { HDA_INTEL_TGLKH, "Intel Tiger Lake-H", 0, 0 },
111 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 },
112 { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 },
113 { HDA_INTEL_ALLKM, "Intel Alder Lake-M", 0, 0 },
114 { HDA_INTEL_ALLKN, "Intel Alder Lake-N", 0, 0 },
115 { HDA_INTEL_ALLKP1, "Intel Alder Lake-P", 0, 0 },
116 { HDA_INTEL_ALLKP2, "Intel Alder Lake-P", 0, 0 },
117 { HDA_INTEL_ALLKPS, "Intel Alder Lake-PS", 0, 0 },
118 { HDA_INTEL_RPTLK1, "Intel Raptor Lake-P", 0, 0 },
119 { HDA_INTEL_RPTLK2, "Intel Raptor Lake-P", 0, 0 },
120 { HDA_INTEL_RPTLK3, "Intel Raptor Lake-S", 0, 0 },
121 { HDA_INTEL_MTL, "Intel Meteor Lake-P", 0, 0 },
122 { HDA_INTEL_ARLS, "Intel Arrow Lake-S", 0, 0 },
123 { HDA_INTEL_ARL, "Intel Arrow Lake", 0, 0 },
124 { HDA_INTEL_LNLP, "Intel Lunar Lake-P", 0, 0 },
125 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 },
126 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 },
127 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 },
128 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 },
129 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 },
130 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 },
131 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 },
132 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 },
133 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 },
134 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 },
135 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 },
136 { HDA_INTEL_ELLK2, "Intel Elkhart Lake", 0, 0 },
137 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 },
138 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 },
139 { HDA_INTEL_SCH, "Intel SCH", 0, 0 },
140 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI },
141 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI },
142 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 },
143 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 },
144 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 },
145 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 },
146 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 },
147 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 },
148 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 },
149 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 },
150 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
151 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
152 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
153 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
154 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 },
155 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 },
156 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 },
157 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 },
158 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 },
159 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 },
160 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 },
161 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 },
162 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI },
163 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI },
164 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI },
165 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI },
166 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI },
167 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI },
168 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI },
169 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI },
170 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 },
171 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI },
172 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI },
173 { HDA_ATI_RAVEN, "ATI Raven", 0, 0 },
174 { HDA_ATI_SB450, "ATI SB450", 0, 0 },
175 { HDA_ATI_SB600, "ATI SB600", 0, 0 },
176 { HDA_ATI_RS600, "ATI RS600", 0, 0 },
177 { HDA_ATI_RS690, "ATI RS690", 0, 0 },
178 { HDA_ATI_RS780, "ATI RS780", 0, 0 },
179 { HDA_ATI_RS880, "ATI RS880", 0, 0 },
180 { HDA_ATI_R600, "ATI R600", 0, 0 },
181 { HDA_ATI_RV610, "ATI RV610", 0, 0 },
182 { HDA_ATI_RV620, "ATI RV620", 0, 0 },
183 { HDA_ATI_RV630, "ATI RV630", 0, 0 },
184 { HDA_ATI_RV635, "ATI RV635", 0, 0 },
185 { HDA_ATI_RV710, "ATI RV710", 0, 0 },
186 { HDA_ATI_RV730, "ATI RV730", 0, 0 },
187 { HDA_ATI_RV740, "ATI RV740", 0, 0 },
188 { HDA_ATI_RV770, "ATI RV770", 0, 0 },
189 { HDA_ATI_RV810, "ATI RV810", 0, 0 },
190 { HDA_ATI_RV830, "ATI RV830", 0, 0 },
191 { HDA_ATI_RV840, "ATI RV840", 0, 0 },
192 { HDA_ATI_RV870, "ATI RV870", 0, 0 },
193 { HDA_ATI_RV910, "ATI RV910", 0, 0 },
194 { HDA_ATI_RV930, "ATI RV930", 0, 0 },
195 { HDA_ATI_RV940, "ATI RV940", 0, 0 },
196 { HDA_ATI_RV970, "ATI RV970", 0, 0 },
197 { HDA_ATI_R1000, "ATI R1000", 0, 0 },
198 { HDA_ATI_OLAND, "ATI Oland", 0, 0 },
199 { HDA_ATI_KABINI, "ATI Kabini", 0, 0 },
200 { HDA_ATI_TRINITY, "ATI Trinity", 0, 0 },
201 { HDA_AMD_X370, "AMD X370", 0, 0 },
202 { HDA_AMD_X570, "AMD X570", 0, 0 },
203 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 },
204 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 },
205 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 },
206 { HDA_RDC_M3010, "RDC M3010", 0, 0 },
207 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 },
208 { HDA_VMWARE, "VMware", 0, 0 },
209 { HDA_SIS_966, "SiS 966/968", 0, 0 },
210 { HDA_ULI_M5461, "ULI M5461", 0, 0 },
211 { HDA_CREATIVE_SB1570, "Creative SB Audigy FX", 0, HDAC_QUIRK_64BIT },
212 /* Unknown */
213 { HDA_INTEL_ALL, "Intel", 0, 0 },
214 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 },
215 { HDA_ATI_ALL, "ATI", 0, 0 },
216 { HDA_AMD_ALL, "AMD", 0, 0 },
217 { HDA_CREATIVE_ALL, "Creative", 0, 0 },
218 { HDA_VIA_ALL, "VIA", 0, 0 },
219 { HDA_VMWARE_ALL, "VMware", 0, 0 },
220 { HDA_SIS_ALL, "SiS", 0, 0 },
221 { HDA_ULI_ALL, "ULI", 0, 0 },
222 };
223
224 static const struct {
225 uint16_t vendor;
226 uint8_t reg;
227 uint8_t mask;
228 uint8_t enable;
229 } hdac_pcie_snoop[] = {
230 { INTEL_VENDORID, 0x00, 0x00, 0x00 },
231 { ATI_VENDORID, 0x42, 0xf8, 0x02 },
232 { AMD_VENDORID, 0x42, 0xf8, 0x02 },
233 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f },
234 };
235
236 /****************************************************************************
237 * Function prototypes
238 ****************************************************************************/
239 static void hdac_intr_handler(void *);
240 static int hdac_reset(struct hdac_softc *, bool);
241 static int hdac_get_capabilities(struct hdac_softc *);
242 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int);
243 static int hdac_dma_alloc(struct hdac_softc *,
244 struct hdac_dma *, bus_size_t);
245 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *);
246 static int hdac_mem_alloc(struct hdac_softc *);
247 static void hdac_mem_free(struct hdac_softc *);
248 static int hdac_irq_alloc(struct hdac_softc *);
249 static void hdac_irq_free(struct hdac_softc *);
250 static void hdac_corb_init(struct hdac_softc *);
251 static void hdac_rirb_init(struct hdac_softc *);
252 static void hdac_corb_start(struct hdac_softc *);
253 static void hdac_rirb_start(struct hdac_softc *);
254
255 static void hdac_attach2(void *);
256
257 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t);
258
259 static int hdac_probe(device_t);
260 static int hdac_attach(device_t);
261 static int hdac_detach(device_t);
262 static int hdac_suspend(device_t);
263 static int hdac_resume(device_t);
264
265 static int hdac_rirb_flush(struct hdac_softc *sc);
266 static int hdac_unsolq_flush(struct hdac_softc *sc);
267
268 /* This function surely going to make its way into upper level someday. */
269 static void
hdac_config_fetch(struct hdac_softc * sc,uint32_t * on,uint32_t * off)270 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off)
271 {
272 const char *res = NULL;
273 int i = 0, j, k, len, inv;
274
275 if (resource_string_value(device_get_name(sc->dev),
276 device_get_unit(sc->dev), "config", &res) != 0)
277 return;
278 if (!(res != NULL && strlen(res) > 0))
279 return;
280 HDA_BOOTVERBOSE(
281 device_printf(sc->dev, "Config options:");
282 );
283 for (;;) {
284 while (res[i] != '\0' &&
285 (res[i] == ',' || isspace(res[i]) != 0))
286 i++;
287 if (res[i] == '\0') {
288 HDA_BOOTVERBOSE(
289 printf("\n");
290 );
291 return;
292 }
293 j = i;
294 while (res[j] != '\0' &&
295 !(res[j] == ',' || isspace(res[j]) != 0))
296 j++;
297 len = j - i;
298 if (len > 2 && strncmp(res + i, "no", 2) == 0)
299 inv = 2;
300 else
301 inv = 0;
302 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) {
303 if (strncmp(res + i + inv,
304 hdac_quirks_tab[k].key, len - inv) != 0)
305 continue;
306 if (len - inv != strlen(hdac_quirks_tab[k].key))
307 continue;
308 HDA_BOOTVERBOSE(
309 printf(" %s%s", (inv != 0) ? "no" : "",
310 hdac_quirks_tab[k].key);
311 );
312 if (inv == 0) {
313 *on |= hdac_quirks_tab[k].value;
314 *off &= ~hdac_quirks_tab[k].value;
315 } else if (inv != 0) {
316 *off |= hdac_quirks_tab[k].value;
317 *on &= ~hdac_quirks_tab[k].value;
318 }
319 break;
320 }
321 i = j;
322 }
323 }
324
325 static void
hdac_one_intr(struct hdac_softc * sc,uint32_t intsts)326 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts)
327 {
328 device_t dev;
329 uint8_t rirbsts;
330 int i;
331
332 /* Was this a controller interrupt? */
333 if (intsts & HDAC_INTSTS_CIS) {
334 /*
335 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then
336 * we will need to check and clear HDAC_STATESTS.
337 * That event is used to report codec status changes such as
338 * a reset or a wake-up event.
339 */
340 /*
341 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we
342 * will need to check and clear HDAC_CORBSTS_CMEI in
343 * HDAC_CORBSTS.
344 * That event is used to report CORB memory errors.
345 */
346 /*
347 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we
348 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in
349 * HDAC_RIRBSTS.
350 * That event is used to report response FIFO overruns.
351 */
352
353 /* Get as many responses that we can */
354 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
355 while (rirbsts & HDAC_RIRBSTS_RINTFL) {
356 HDAC_WRITE_1(&sc->mem,
357 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL);
358 hdac_rirb_flush(sc);
359 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
360 }
361 if (sc->unsolq_rp != sc->unsolq_wp)
362 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
363 }
364
365 if (intsts & HDAC_INTSTS_SIS_MASK) {
366 for (i = 0; i < sc->num_ss; i++) {
367 if ((intsts & (1 << i)) == 0)
368 continue;
369 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS,
370 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
371 if ((dev = sc->streams[i].dev) != NULL) {
372 HDAC_STREAM_INTR(dev,
373 sc->streams[i].dir, sc->streams[i].stream);
374 }
375 }
376 }
377 }
378
379 /****************************************************************************
380 * void hdac_intr_handler(void *)
381 *
382 * Interrupt handler. Processes interrupts received from the hdac.
383 ****************************************************************************/
384 static void
hdac_intr_handler(void * context)385 hdac_intr_handler(void *context)
386 {
387 struct hdac_softc *sc;
388 uint32_t intsts;
389
390 sc = (struct hdac_softc *)context;
391
392 /*
393 * Loop until HDAC_INTSTS_GIS gets clear.
394 * It is plausible that hardware interrupts a host only when GIS goes
395 * from zero to one. GIS is formed by OR-ing multiple hardware
396 * statuses, so it's possible that a previously cleared status gets set
397 * again while another status has not been cleared yet. Thus, there
398 * will be no new interrupt as GIS always stayed set. If we don't
399 * re-examine GIS then we can leave it set and never get an interrupt
400 * again.
401 */
402 hdac_lock(sc);
403 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
404 while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) {
405 hdac_one_intr(sc, intsts);
406 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
407 }
408 hdac_unlock(sc);
409 }
410
411 static void
hdac_poll_callback(void * arg)412 hdac_poll_callback(void *arg)
413 {
414 struct hdac_softc *sc = arg;
415
416 if (sc == NULL)
417 return;
418
419 hdac_lock(sc);
420 if (sc->polling == 0) {
421 hdac_unlock(sc);
422 return;
423 }
424 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc);
425 hdac_unlock(sc);
426
427 hdac_intr_handler(sc);
428 }
429
430 /****************************************************************************
431 * int hdac_reset(hdac_softc *, bool)
432 *
433 * Reset the hdac to a quiescent and known state.
434 ****************************************************************************/
435 static int
hdac_reset(struct hdac_softc * sc,bool wakeup)436 hdac_reset(struct hdac_softc *sc, bool wakeup)
437 {
438 uint32_t gctl;
439 int count, i;
440
441 /*
442 * Stop all Streams DMA engine
443 */
444 for (i = 0; i < sc->num_iss; i++)
445 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0);
446 for (i = 0; i < sc->num_oss; i++)
447 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0);
448 for (i = 0; i < sc->num_bss; i++)
449 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0);
450
451 /*
452 * Stop Control DMA engines.
453 */
454 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0);
455 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0);
456
457 /*
458 * Reset DMA position buffer.
459 */
460 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0);
461 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0);
462
463 /*
464 * Reset the controller. The reset must remain asserted for
465 * a minimum of 100us.
466 */
467 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
468 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST);
469 count = 10000;
470 do {
471 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
472 if (!(gctl & HDAC_GCTL_CRST))
473 break;
474 DELAY(10);
475 } while (--count);
476 if (gctl & HDAC_GCTL_CRST) {
477 device_printf(sc->dev, "Unable to put hdac in reset\n");
478 return (ENXIO);
479 }
480
481 /* If wakeup is not requested - leave the controller in reset state. */
482 if (!wakeup)
483 return (0);
484
485 DELAY(100);
486 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
487 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST);
488 count = 10000;
489 do {
490 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
491 if (gctl & HDAC_GCTL_CRST)
492 break;
493 DELAY(10);
494 } while (--count);
495 if (!(gctl & HDAC_GCTL_CRST)) {
496 device_printf(sc->dev, "Device stuck in reset\n");
497 return (ENXIO);
498 }
499
500 /*
501 * Wait for codecs to finish their own reset sequence. The delay here
502 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery).
503 */
504 DELAY(1000);
505
506 return (0);
507 }
508
509 /****************************************************************************
510 * int hdac_get_capabilities(struct hdac_softc *);
511 *
512 * Retreive the general capabilities of the hdac;
513 * Number of Input Streams
514 * Number of Output Streams
515 * Number of bidirectional Streams
516 * 64bit ready
517 * CORB and RIRB sizes
518 ****************************************************************************/
519 static int
hdac_get_capabilities(struct hdac_softc * sc)520 hdac_get_capabilities(struct hdac_softc *sc)
521 {
522 uint16_t gcap;
523 uint8_t corbsize, rirbsize;
524
525 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP);
526 sc->num_iss = HDAC_GCAP_ISS(gcap);
527 sc->num_oss = HDAC_GCAP_OSS(gcap);
528 sc->num_bss = HDAC_GCAP_BSS(gcap);
529 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss;
530 sc->num_sdo = HDAC_GCAP_NSDO(gcap);
531 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0;
532 if (sc->quirks_on & HDAC_QUIRK_64BIT)
533 sc->support_64bit = 1;
534 else if (sc->quirks_off & HDAC_QUIRK_64BIT)
535 sc->support_64bit = 0;
536
537 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE);
538 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) ==
539 HDAC_CORBSIZE_CORBSZCAP_256)
540 sc->corb_size = 256;
541 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) ==
542 HDAC_CORBSIZE_CORBSZCAP_16)
543 sc->corb_size = 16;
544 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) ==
545 HDAC_CORBSIZE_CORBSZCAP_2)
546 sc->corb_size = 2;
547 else {
548 device_printf(sc->dev, "%s: Hardware reports invalid corb size "
549 "(%x), defaulting to 256\n",
550 __func__, corbsize);
551 sc->corb_size = 256;
552 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256);
553 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize);
554 }
555
556 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE);
557 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) ==
558 HDAC_RIRBSIZE_RIRBSZCAP_256)
559 sc->rirb_size = 256;
560 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) ==
561 HDAC_RIRBSIZE_RIRBSZCAP_16)
562 sc->rirb_size = 16;
563 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) ==
564 HDAC_RIRBSIZE_RIRBSZCAP_2)
565 sc->rirb_size = 2;
566 else {
567 device_printf(sc->dev, "%s: Hardware reports invalid rirb size "
568 "(%x), defaulting to 256\n",
569 __func__, rirbsize);
570 sc->rirb_size = 256;
571 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256);
572 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize);
573 }
574
575 HDA_BOOTVERBOSE(
576 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, "
577 "NSDO %d%s, CORB %d, RIRB %d\n",
578 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo,
579 sc->support_64bit ? ", 64bit" : "",
580 sc->corb_size, sc->rirb_size);
581 );
582
583 return (0);
584 }
585
586 /****************************************************************************
587 * void hdac_dma_cb
588 *
589 * This function is called by bus_dmamap_load when the mapping has been
590 * established. We just record the physical address of the mapping into
591 * the struct hdac_dma passed in.
592 ****************************************************************************/
593 static void
hdac_dma_cb(void * callback_arg,bus_dma_segment_t * segs,int nseg,int error)594 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error)
595 {
596 struct hdac_dma *dma;
597
598 if (error == 0) {
599 dma = (struct hdac_dma *)callback_arg;
600 dma->dma_paddr = segs[0].ds_addr;
601 }
602 }
603
604 /****************************************************************************
605 * int hdac_dma_alloc
606 *
607 * This function allocate and setup a dma region (struct hdac_dma).
608 * It must be freed by a corresponding hdac_dma_free.
609 ****************************************************************************/
610 static int
hdac_dma_alloc(struct hdac_softc * sc,struct hdac_dma * dma,bus_size_t size)611 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size)
612 {
613 bus_size_t roundsz;
614 int result;
615
616 roundsz = roundup2(size, HDA_DMA_ALIGNMENT);
617 bzero(dma, sizeof(*dma));
618
619 /*
620 * Create a DMA tag
621 */
622 result = bus_dma_tag_create(
623 bus_get_dma_tag(sc->dev), /* parent */
624 HDA_DMA_ALIGNMENT, /* alignment */
625 0, /* boundary */
626 (sc->support_64bit) ? BUS_SPACE_MAXADDR :
627 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
628 BUS_SPACE_MAXADDR, /* highaddr */
629 NULL, /* filtfunc */
630 NULL, /* fistfuncarg */
631 roundsz, /* maxsize */
632 1, /* nsegments */
633 roundsz, /* maxsegsz */
634 0, /* flags */
635 NULL, /* lockfunc */
636 NULL, /* lockfuncarg */
637 &dma->dma_tag); /* dmat */
638 if (result != 0) {
639 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n",
640 __func__, result);
641 goto hdac_dma_alloc_fail;
642 }
643
644 /*
645 * Allocate DMA memory
646 */
647 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
648 BUS_DMA_NOWAIT | BUS_DMA_ZERO |
649 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE :
650 BUS_DMA_COHERENT),
651 &dma->dma_map);
652 if (result != 0) {
653 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n",
654 __func__, result);
655 goto hdac_dma_alloc_fail;
656 }
657
658 dma->dma_size = roundsz;
659
660 /*
661 * Map the memory
662 */
663 result = bus_dmamap_load(dma->dma_tag, dma->dma_map,
664 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0);
665 if (result != 0 || dma->dma_paddr == 0) {
666 if (result == 0)
667 result = ENOMEM;
668 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n",
669 __func__, result);
670 goto hdac_dma_alloc_fail;
671 }
672
673 HDA_BOOTHVERBOSE(
674 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n",
675 __func__, (uintmax_t)size, (uintmax_t)roundsz);
676 );
677
678 return (0);
679
680 hdac_dma_alloc_fail:
681 hdac_dma_free(sc, dma);
682
683 return (result);
684 }
685
686 /****************************************************************************
687 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *)
688 *
689 * Free a struct hdac_dma that has been previously allocated via the
690 * hdac_dma_alloc function.
691 ****************************************************************************/
692 static void
hdac_dma_free(struct hdac_softc * sc,struct hdac_dma * dma)693 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma)
694 {
695 if (dma->dma_paddr != 0) {
696 /* Flush caches */
697 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
698 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
699 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
700 dma->dma_paddr = 0;
701 }
702 if (dma->dma_vaddr != NULL) {
703 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
704 dma->dma_vaddr = NULL;
705 }
706 if (dma->dma_tag != NULL) {
707 bus_dma_tag_destroy(dma->dma_tag);
708 dma->dma_tag = NULL;
709 }
710 dma->dma_size = 0;
711 }
712
713 /****************************************************************************
714 * int hdac_mem_alloc(struct hdac_softc *)
715 *
716 * Allocate all the bus resources necessary to speak with the physical
717 * controller.
718 ****************************************************************************/
719 static int
hdac_mem_alloc(struct hdac_softc * sc)720 hdac_mem_alloc(struct hdac_softc *sc)
721 {
722 struct hdac_mem *mem;
723
724 mem = &sc->mem;
725 mem->mem_rid = PCIR_BAR(0);
726 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
727 &mem->mem_rid, RF_ACTIVE);
728 if (mem->mem_res == NULL) {
729 device_printf(sc->dev,
730 "%s: Unable to allocate memory resource\n", __func__);
731 return (ENOMEM);
732 }
733 mem->mem_tag = rman_get_bustag(mem->mem_res);
734 mem->mem_handle = rman_get_bushandle(mem->mem_res);
735
736 return (0);
737 }
738
739 /****************************************************************************
740 * void hdac_mem_free(struct hdac_softc *)
741 *
742 * Free up resources previously allocated by hdac_mem_alloc.
743 ****************************************************************************/
744 static void
hdac_mem_free(struct hdac_softc * sc)745 hdac_mem_free(struct hdac_softc *sc)
746 {
747 struct hdac_mem *mem;
748
749 mem = &sc->mem;
750 if (mem->mem_res != NULL)
751 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid,
752 mem->mem_res);
753 mem->mem_res = NULL;
754 }
755
756 /****************************************************************************
757 * int hdac_irq_alloc(struct hdac_softc *)
758 *
759 * Allocate and setup the resources necessary for interrupt handling.
760 ****************************************************************************/
761 static int
hdac_irq_alloc(struct hdac_softc * sc)762 hdac_irq_alloc(struct hdac_softc *sc)
763 {
764 struct hdac_irq *irq;
765 int result;
766
767 irq = &sc->irq;
768 irq->irq_rid = 0x0;
769
770 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 &&
771 (result = pci_msi_count(sc->dev)) == 1 &&
772 pci_alloc_msi(sc->dev, &result) == 0)
773 irq->irq_rid = 0x1;
774
775 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
776 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE);
777 if (irq->irq_res == NULL) {
778 device_printf(sc->dev, "%s: Unable to allocate irq\n",
779 __func__);
780 goto hdac_irq_alloc_fail;
781 }
782 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV,
783 NULL, hdac_intr_handler, sc, &irq->irq_handle);
784 if (result != 0) {
785 device_printf(sc->dev,
786 "%s: Unable to setup interrupt handler (%d)\n",
787 __func__, result);
788 goto hdac_irq_alloc_fail;
789 }
790
791 return (0);
792
793 hdac_irq_alloc_fail:
794 hdac_irq_free(sc);
795
796 return (ENXIO);
797 }
798
799 /****************************************************************************
800 * void hdac_irq_free(struct hdac_softc *)
801 *
802 * Free up resources previously allocated by hdac_irq_alloc.
803 ****************************************************************************/
804 static void
hdac_irq_free(struct hdac_softc * sc)805 hdac_irq_free(struct hdac_softc *sc)
806 {
807 struct hdac_irq *irq;
808
809 irq = &sc->irq;
810 if (irq->irq_res != NULL && irq->irq_handle != NULL)
811 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle);
812 if (irq->irq_res != NULL)
813 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid,
814 irq->irq_res);
815 if (irq->irq_rid == 0x1)
816 pci_release_msi(sc->dev);
817 irq->irq_handle = NULL;
818 irq->irq_res = NULL;
819 irq->irq_rid = 0x0;
820 }
821
822 /****************************************************************************
823 * void hdac_corb_init(struct hdac_softc *)
824 *
825 * Initialize the corb registers for operations but do not start it up yet.
826 * The CORB engine must not be running when this function is called.
827 ****************************************************************************/
828 static void
hdac_corb_init(struct hdac_softc * sc)829 hdac_corb_init(struct hdac_softc *sc)
830 {
831 uint8_t corbsize;
832 uint64_t corbpaddr;
833
834 /* Setup the CORB size. */
835 switch (sc->corb_size) {
836 case 256:
837 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256);
838 break;
839 case 16:
840 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16);
841 break;
842 case 2:
843 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2);
844 break;
845 default:
846 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size);
847 }
848 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize);
849
850 /* Setup the CORB Address in the hdac */
851 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr;
852 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr);
853 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32));
854
855 /* Set the WP and RP */
856 sc->corb_wp = 0;
857 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
858 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST);
859 /*
860 * The HDA specification indicates that the CORBRPRST bit will always
861 * read as zero. Unfortunately, it seems that at least the 82801G
862 * doesn't reset the bit to zero, which stalls the corb engine.
863 * manually reset the bit to zero before continuing.
864 */
865 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0);
866
867 /* Enable CORB error reporting */
868 #if 0
869 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE);
870 #endif
871 }
872
873 /****************************************************************************
874 * void hdac_rirb_init(struct hdac_softc *)
875 *
876 * Initialize the rirb registers for operations but do not start it up yet.
877 * The RIRB engine must not be running when this function is called.
878 ****************************************************************************/
879 static void
hdac_rirb_init(struct hdac_softc * sc)880 hdac_rirb_init(struct hdac_softc *sc)
881 {
882 uint8_t rirbsize;
883 uint64_t rirbpaddr;
884
885 /* Setup the RIRB size. */
886 switch (sc->rirb_size) {
887 case 256:
888 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256);
889 break;
890 case 16:
891 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16);
892 break;
893 case 2:
894 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2);
895 break;
896 default:
897 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size);
898 }
899 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize);
900
901 /* Setup the RIRB Address in the hdac */
902 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr;
903 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr);
904 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32));
905
906 /* Setup the WP and RP */
907 sc->rirb_rp = 0;
908 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST);
909
910 /* Setup the interrupt threshold */
911 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2);
912
913 /* Enable Overrun and response received reporting */
914 #if 0
915 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL,
916 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL);
917 #else
918 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL);
919 #endif
920
921 /*
922 * Make sure that the Host CPU cache doesn't contain any dirty
923 * cache lines that falls in the rirb. If I understood correctly, it
924 * should be sufficient to do this only once as the rirb is purely
925 * read-only from now on.
926 */
927 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
928 BUS_DMASYNC_PREREAD);
929 }
930
931 /****************************************************************************
932 * void hdac_corb_start(hdac_softc *)
933 *
934 * Startup the corb DMA engine
935 ****************************************************************************/
936 static void
hdac_corb_start(struct hdac_softc * sc)937 hdac_corb_start(struct hdac_softc *sc)
938 {
939 uint32_t corbctl;
940
941 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL);
942 corbctl |= HDAC_CORBCTL_CORBRUN;
943 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl);
944 }
945
946 /****************************************************************************
947 * void hdac_rirb_start(hdac_softc *)
948 *
949 * Startup the rirb DMA engine
950 ****************************************************************************/
951 static void
hdac_rirb_start(struct hdac_softc * sc)952 hdac_rirb_start(struct hdac_softc *sc)
953 {
954 uint32_t rirbctl;
955
956 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL);
957 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN;
958 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl);
959 }
960
961 static int
hdac_rirb_flush(struct hdac_softc * sc)962 hdac_rirb_flush(struct hdac_softc *sc)
963 {
964 struct hdac_rirb *rirb_base, *rirb;
965 nid_t cad;
966 uint32_t resp, resp_ex;
967 uint8_t rirbwp;
968 int ret;
969
970 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr;
971 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP);
972 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
973 BUS_DMASYNC_POSTREAD);
974
975 ret = 0;
976 while (sc->rirb_rp != rirbwp) {
977 sc->rirb_rp++;
978 sc->rirb_rp %= sc->rirb_size;
979 rirb = &rirb_base[sc->rirb_rp];
980 resp = le32toh(rirb->response);
981 resp_ex = le32toh(rirb->response_ex);
982 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex);
983 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) {
984 sc->unsolq[sc->unsolq_wp++] = resp;
985 sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
986 sc->unsolq[sc->unsolq_wp++] = cad;
987 sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
988 } else if (sc->codecs[cad].pending <= 0) {
989 device_printf(sc->dev, "Unexpected unsolicited "
990 "response from address %d: %08x\n", cad, resp);
991 } else {
992 sc->codecs[cad].response = resp;
993 sc->codecs[cad].pending--;
994 }
995 ret++;
996 }
997
998 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
999 BUS_DMASYNC_PREREAD);
1000 return (ret);
1001 }
1002
1003 static int
hdac_unsolq_flush(struct hdac_softc * sc)1004 hdac_unsolq_flush(struct hdac_softc *sc)
1005 {
1006 device_t child;
1007 nid_t cad;
1008 uint32_t resp;
1009 int ret = 0;
1010
1011 if (sc->unsolq_st == HDAC_UNSOLQ_READY) {
1012 sc->unsolq_st = HDAC_UNSOLQ_BUSY;
1013 while (sc->unsolq_rp != sc->unsolq_wp) {
1014 resp = sc->unsolq[sc->unsolq_rp++];
1015 sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
1016 cad = sc->unsolq[sc->unsolq_rp++];
1017 sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
1018 if ((child = sc->codecs[cad].dev) != NULL &&
1019 device_is_attached(child))
1020 HDAC_UNSOL_INTR(child, resp);
1021 ret++;
1022 }
1023 sc->unsolq_st = HDAC_UNSOLQ_READY;
1024 }
1025
1026 return (ret);
1027 }
1028
1029 /****************************************************************************
1030 * uint32_t hdac_send_command
1031 *
1032 * Wrapper function that sends only one command to a given codec
1033 ****************************************************************************/
1034 static uint32_t
hdac_send_command(struct hdac_softc * sc,nid_t cad,uint32_t verb)1035 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb)
1036 {
1037 int timeout;
1038 uint32_t *corb;
1039
1040 hdac_lockassert(sc);
1041 verb &= ~HDA_CMD_CAD_MASK;
1042 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT;
1043 sc->codecs[cad].response = HDA_INVALID;
1044
1045 sc->codecs[cad].pending++;
1046 sc->corb_wp++;
1047 sc->corb_wp %= sc->corb_size;
1048 corb = (uint32_t *)sc->corb_dma.dma_vaddr;
1049 bus_dmamap_sync(sc->corb_dma.dma_tag,
1050 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE);
1051 corb[sc->corb_wp] = htole32(verb);
1052 bus_dmamap_sync(sc->corb_dma.dma_tag,
1053 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE);
1054 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
1055
1056 timeout = 10000;
1057 do {
1058 if (hdac_rirb_flush(sc) == 0)
1059 DELAY(10);
1060 } while (sc->codecs[cad].pending != 0 && --timeout);
1061
1062 if (sc->codecs[cad].pending != 0) {
1063 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n",
1064 verb, cad);
1065 sc->codecs[cad].pending = 0;
1066 }
1067
1068 if (sc->unsolq_rp != sc->unsolq_wp)
1069 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
1070 return (sc->codecs[cad].response);
1071 }
1072
1073 /****************************************************************************
1074 * Device Methods
1075 ****************************************************************************/
1076
1077 /****************************************************************************
1078 * int hdac_probe(device_t)
1079 *
1080 * Probe for the presence of an hdac. If none is found, check for a generic
1081 * match using the subclass of the device.
1082 ****************************************************************************/
1083 static int
hdac_probe(device_t dev)1084 hdac_probe(device_t dev)
1085 {
1086 int i, result;
1087 uint32_t model;
1088 uint16_t class, subclass;
1089 char desc[64];
1090
1091 model = (uint32_t)pci_get_device(dev) << 16;
1092 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1093 class = pci_get_class(dev);
1094 subclass = pci_get_subclass(dev);
1095
1096 bzero(desc, sizeof(desc));
1097 result = ENXIO;
1098 for (i = 0; i < nitems(hdac_devices); i++) {
1099 if (hdac_devices[i].model == model) {
1100 strlcpy(desc, hdac_devices[i].desc, sizeof(desc));
1101 result = BUS_PROBE_DEFAULT;
1102 break;
1103 }
1104 if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1105 class == PCIC_MULTIMEDIA &&
1106 subclass == PCIS_MULTIMEDIA_HDA) {
1107 snprintf(desc, sizeof(desc), "%s (0x%04x)",
1108 hdac_devices[i].desc, pci_get_device(dev));
1109 result = BUS_PROBE_GENERIC;
1110 break;
1111 }
1112 }
1113 if (result == ENXIO && class == PCIC_MULTIMEDIA &&
1114 subclass == PCIS_MULTIMEDIA_HDA) {
1115 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model);
1116 result = BUS_PROBE_GENERIC;
1117 }
1118 if (result != ENXIO)
1119 device_set_descf(dev, "%s HDA Controller", desc);
1120
1121 return (result);
1122 }
1123
1124 static void
hdac_unsolq_task(void * context,int pending)1125 hdac_unsolq_task(void *context, int pending)
1126 {
1127 struct hdac_softc *sc;
1128
1129 sc = (struct hdac_softc *)context;
1130
1131 hdac_lock(sc);
1132 hdac_unsolq_flush(sc);
1133 hdac_unlock(sc);
1134 }
1135
1136 /****************************************************************************
1137 * int hdac_attach(device_t)
1138 *
1139 * Attach the device into the kernel. Interrupts usually won't be enabled
1140 * when this function is called. Setup everything that doesn't require
1141 * interrupts and defer probing of codecs until interrupts are enabled.
1142 ****************************************************************************/
1143 static int
hdac_attach(device_t dev)1144 hdac_attach(device_t dev)
1145 {
1146 struct hdac_softc *sc;
1147 int result;
1148 int i, devid = -1;
1149 uint32_t model;
1150 uint16_t class, subclass;
1151 uint16_t vendor;
1152 uint8_t v;
1153
1154 sc = device_get_softc(dev);
1155 HDA_BOOTVERBOSE(
1156 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n",
1157 pci_get_subvendor(dev), pci_get_subdevice(dev));
1158 device_printf(dev, "HDA Driver Revision: %s\n",
1159 HDA_DRV_TEST_REV);
1160 );
1161
1162 model = (uint32_t)pci_get_device(dev) << 16;
1163 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1164 class = pci_get_class(dev);
1165 subclass = pci_get_subclass(dev);
1166
1167 for (i = 0; i < nitems(hdac_devices); i++) {
1168 if (hdac_devices[i].model == model) {
1169 devid = i;
1170 break;
1171 }
1172 if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1173 class == PCIC_MULTIMEDIA &&
1174 subclass == PCIS_MULTIMEDIA_HDA) {
1175 devid = i;
1176 break;
1177 }
1178 }
1179
1180 mtx_init(&sc->lock, device_get_nameunit(dev), "HDA driver mutex",
1181 MTX_DEF);
1182 sc->dev = dev;
1183 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc);
1184 callout_init(&sc->poll_callout, 1);
1185 for (i = 0; i < HDAC_CODEC_MAX; i++)
1186 sc->codecs[i].dev = NULL;
1187 if (devid >= 0) {
1188 sc->quirks_on = hdac_devices[devid].quirks_on;
1189 sc->quirks_off = hdac_devices[devid].quirks_off;
1190 } else {
1191 sc->quirks_on = 0;
1192 sc->quirks_off = 0;
1193 }
1194 if (resource_int_value(device_get_name(dev),
1195 device_get_unit(dev), "msi", &i) == 0) {
1196 if (i == 0)
1197 sc->quirks_off |= HDAC_QUIRK_MSI;
1198 else {
1199 sc->quirks_on |= HDAC_QUIRK_MSI;
1200 sc->quirks_off |= ~HDAC_QUIRK_MSI;
1201 }
1202 }
1203 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off);
1204 HDA_BOOTVERBOSE(
1205 device_printf(sc->dev,
1206 "Config options: on=0x%08x off=0x%08x\n",
1207 sc->quirks_on, sc->quirks_off);
1208 );
1209 sc->poll_ival = hz;
1210 if (resource_int_value(device_get_name(dev),
1211 device_get_unit(dev), "polling", &i) == 0 && i != 0)
1212 sc->polling = 1;
1213 else
1214 sc->polling = 0;
1215
1216 pci_enable_busmaster(dev);
1217
1218 vendor = pci_get_vendor(dev);
1219 if (vendor == INTEL_VENDORID) {
1220 /* TCSEL -> TC0 */
1221 v = pci_read_config(dev, 0x44, 1);
1222 pci_write_config(dev, 0x44, v & 0xf8, 1);
1223 HDA_BOOTHVERBOSE(
1224 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v,
1225 pci_read_config(dev, 0x44, 1));
1226 );
1227 }
1228
1229 #if defined(__i386__) || defined(__amd64__)
1230 sc->flags |= HDAC_F_DMA_NOCACHE;
1231
1232 if (resource_int_value(device_get_name(dev),
1233 device_get_unit(dev), "snoop", &i) == 0 && i != 0) {
1234 #else
1235 sc->flags &= ~HDAC_F_DMA_NOCACHE;
1236 #endif
1237 /*
1238 * Try to enable PCIe snoop to avoid messing around with
1239 * uncacheable DMA attribute. Since PCIe snoop register
1240 * config is pretty much vendor specific, there are no
1241 * general solutions on how to enable it, forcing us (even
1242 * Microsoft) to enable uncacheable or write combined DMA
1243 * by default.
1244 *
1245 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx
1246 */
1247 for (i = 0; i < nitems(hdac_pcie_snoop); i++) {
1248 if (hdac_pcie_snoop[i].vendor != vendor)
1249 continue;
1250 sc->flags &= ~HDAC_F_DMA_NOCACHE;
1251 if (hdac_pcie_snoop[i].reg == 0x00)
1252 break;
1253 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1254 if ((v & hdac_pcie_snoop[i].enable) ==
1255 hdac_pcie_snoop[i].enable)
1256 break;
1257 v &= hdac_pcie_snoop[i].mask;
1258 v |= hdac_pcie_snoop[i].enable;
1259 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1);
1260 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1261 if ((v & hdac_pcie_snoop[i].enable) !=
1262 hdac_pcie_snoop[i].enable) {
1263 HDA_BOOTVERBOSE(
1264 device_printf(dev,
1265 "WARNING: Failed to enable PCIe "
1266 "snoop!\n");
1267 );
1268 #if defined(__i386__) || defined(__amd64__)
1269 sc->flags |= HDAC_F_DMA_NOCACHE;
1270 #endif
1271 }
1272 break;
1273 }
1274 #if defined(__i386__) || defined(__amd64__)
1275 }
1276 #endif
1277
1278 HDA_BOOTHVERBOSE(
1279 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n",
1280 (sc->flags & HDAC_F_DMA_NOCACHE) ?
1281 "Uncacheable" : "PCIe snoop", vendor);
1282 );
1283
1284 /* Allocate resources */
1285 result = hdac_mem_alloc(sc);
1286 if (result != 0)
1287 goto hdac_attach_fail;
1288
1289 /* Get Capabilities */
1290 hdac_reset(sc, 1);
1291 result = hdac_get_capabilities(sc);
1292 if (result != 0)
1293 goto hdac_attach_fail;
1294
1295 /* Allocate CORB, RIRB, POS and BDLs dma memory */
1296 result = hdac_dma_alloc(sc, &sc->corb_dma,
1297 sc->corb_size * sizeof(uint32_t));
1298 if (result != 0)
1299 goto hdac_attach_fail;
1300 result = hdac_dma_alloc(sc, &sc->rirb_dma,
1301 sc->rirb_size * sizeof(struct hdac_rirb));
1302 if (result != 0)
1303 goto hdac_attach_fail;
1304 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss,
1305 M_HDAC, M_ZERO | M_WAITOK);
1306 for (i = 0; i < sc->num_ss; i++) {
1307 result = hdac_dma_alloc(sc, &sc->streams[i].bdl,
1308 sizeof(struct hdac_bdle) * HDA_BDL_MAX);
1309 if (result != 0)
1310 goto hdac_attach_fail;
1311 }
1312 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) {
1313 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) {
1314 HDA_BOOTVERBOSE(
1315 device_printf(dev, "Failed to "
1316 "allocate DMA pos buffer "
1317 "(non-fatal)\n");
1318 );
1319 } else {
1320 uint64_t addr = sc->pos_dma.dma_paddr;
1321
1322 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32);
1323 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE,
1324 (addr & HDAC_DPLBASE_DPLBASE_MASK) |
1325 HDAC_DPLBASE_DPLBASE_DMAPBE);
1326 }
1327 }
1328
1329 result = bus_dma_tag_create(
1330 bus_get_dma_tag(sc->dev), /* parent */
1331 HDA_DMA_ALIGNMENT, /* alignment */
1332 0, /* boundary */
1333 (sc->support_64bit) ? BUS_SPACE_MAXADDR :
1334 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1335 BUS_SPACE_MAXADDR, /* highaddr */
1336 NULL, /* filtfunc */
1337 NULL, /* fistfuncarg */
1338 HDA_BUFSZ_MAX, /* maxsize */
1339 1, /* nsegments */
1340 HDA_BUFSZ_MAX, /* maxsegsz */
1341 0, /* flags */
1342 NULL, /* lockfunc */
1343 NULL, /* lockfuncarg */
1344 &sc->chan_dmat); /* dmat */
1345 if (result != 0) {
1346 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n",
1347 __func__, result);
1348 goto hdac_attach_fail;
1349 }
1350
1351 /* Quiesce everything */
1352 HDA_BOOTHVERBOSE(
1353 device_printf(dev, "Reset controller...\n");
1354 );
1355 hdac_reset(sc, true);
1356
1357 /* Initialize the CORB and RIRB */
1358 hdac_corb_init(sc);
1359 hdac_rirb_init(sc);
1360
1361 result = hdac_irq_alloc(sc);
1362 if (result != 0)
1363 goto hdac_attach_fail;
1364
1365 /* Defer remaining of initialization until interrupts are enabled */
1366 sc->intrhook.ich_func = hdac_attach2;
1367 sc->intrhook.ich_arg = (void *)sc;
1368 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) {
1369 sc->intrhook.ich_func = NULL;
1370 hdac_attach2((void *)sc);
1371 }
1372
1373 return (0);
1374
1375 hdac_attach_fail:
1376 hdac_irq_free(sc);
1377 if (sc->streams != NULL)
1378 for (i = 0; i < sc->num_ss; i++)
1379 hdac_dma_free(sc, &sc->streams[i].bdl);
1380 free(sc->streams, M_HDAC);
1381 hdac_dma_free(sc, &sc->rirb_dma);
1382 hdac_dma_free(sc, &sc->corb_dma);
1383 hdac_mem_free(sc);
1384 mtx_destroy(&sc->lock);
1385
1386 return (ENXIO);
1387 }
1388
1389 static int
sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS)1390 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS)
1391 {
1392 struct hdac_softc *sc;
1393 device_t *devlist;
1394 device_t dev;
1395 int devcount, i, err, val;
1396
1397 dev = oidp->oid_arg1;
1398 sc = device_get_softc(dev);
1399 if (sc == NULL)
1400 return (EINVAL);
1401 val = 0;
1402 err = sysctl_handle_int(oidp, &val, 0, req);
1403 if (err != 0 || req->newptr == NULL || val == 0)
1404 return (err);
1405
1406 /* XXX: Temporary. For debugging. */
1407 if (val == 100) {
1408 hdac_suspend(dev);
1409 return (0);
1410 } else if (val == 101) {
1411 hdac_resume(dev);
1412 return (0);
1413 }
1414
1415 bus_topo_lock();
1416
1417 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) {
1418 bus_topo_unlock();
1419 return (err);
1420 }
1421
1422 hdac_lock(sc);
1423 for (i = 0; i < devcount; i++)
1424 HDAC_PINDUMP(devlist[i]);
1425 hdac_unlock(sc);
1426
1427 bus_topo_unlock();
1428
1429 free(devlist, M_TEMP);
1430 return (0);
1431 }
1432
1433 static int
hdac_mdata_rate(uint16_t fmt)1434 hdac_mdata_rate(uint16_t fmt)
1435 {
1436 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 };
1437 int rate, bits;
1438
1439 if (fmt & (1 << 14))
1440 rate = 44100;
1441 else
1442 rate = 48000;
1443 rate *= ((fmt >> 11) & 0x07) + 1;
1444 rate /= ((fmt >> 8) & 0x07) + 1;
1445 bits = mbits[(fmt >> 4) & 0x03];
1446 bits *= (fmt & 0x0f) + 1;
1447 return (rate * bits);
1448 }
1449
1450 static int
hdac_bdata_rate(uint16_t fmt,int output)1451 hdac_bdata_rate(uint16_t fmt, int output)
1452 {
1453 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 };
1454 int rate, bits;
1455
1456 rate = 48000;
1457 rate *= ((fmt >> 11) & 0x07) + 1;
1458 bits = bbits[(fmt >> 4) & 0x03];
1459 bits *= (fmt & 0x0f) + 1;
1460 if (!output)
1461 bits = ((bits + 7) & ~0x07) + 10;
1462 return (rate * bits);
1463 }
1464
1465 static void
hdac_poll_reinit(struct hdac_softc * sc)1466 hdac_poll_reinit(struct hdac_softc *sc)
1467 {
1468 int i, pollticks, min = 1000000;
1469 struct hdac_stream *s;
1470
1471 if (sc->polling == 0)
1472 return;
1473 if (sc->unsol_registered > 0)
1474 min = hz / 2;
1475 for (i = 0; i < sc->num_ss; i++) {
1476 s = &sc->streams[i];
1477 if (s->running == 0)
1478 continue;
1479 pollticks = ((uint64_t)hz * s->blksz) /
1480 (hdac_mdata_rate(s->format) / 8);
1481 pollticks >>= 1;
1482 if (pollticks > hz)
1483 pollticks = hz;
1484 if (pollticks < 1)
1485 pollticks = 1;
1486 if (min > pollticks)
1487 min = pollticks;
1488 }
1489 sc->poll_ival = min;
1490 if (min == 1000000)
1491 callout_stop(&sc->poll_callout);
1492 else
1493 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc);
1494 }
1495
1496 static int
sysctl_hdac_polling(SYSCTL_HANDLER_ARGS)1497 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS)
1498 {
1499 struct hdac_softc *sc;
1500 device_t dev;
1501 uint32_t ctl;
1502 int err, val;
1503
1504 dev = oidp->oid_arg1;
1505 sc = device_get_softc(dev);
1506 if (sc == NULL)
1507 return (EINVAL);
1508 hdac_lock(sc);
1509 val = sc->polling;
1510 hdac_unlock(sc);
1511 err = sysctl_handle_int(oidp, &val, 0, req);
1512
1513 if (err != 0 || req->newptr == NULL)
1514 return (err);
1515 if (val < 0 || val > 1)
1516 return (EINVAL);
1517
1518 hdac_lock(sc);
1519 if (val != sc->polling) {
1520 if (val == 0) {
1521 callout_stop(&sc->poll_callout);
1522 hdac_unlock(sc);
1523 callout_drain(&sc->poll_callout);
1524 hdac_lock(sc);
1525 sc->polling = 0;
1526 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1527 ctl |= HDAC_INTCTL_GIE;
1528 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1529 } else {
1530 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1531 ctl &= ~HDAC_INTCTL_GIE;
1532 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1533 sc->polling = 1;
1534 hdac_poll_reinit(sc);
1535 }
1536 }
1537 hdac_unlock(sc);
1538
1539 return (err);
1540 }
1541
1542 static void
hdac_attach2(void * arg)1543 hdac_attach2(void *arg)
1544 {
1545 struct hdac_softc *sc;
1546 device_t child;
1547 uint32_t vendorid, revisionid;
1548 int i;
1549 uint16_t statests;
1550
1551 sc = (struct hdac_softc *)arg;
1552
1553 hdac_lock(sc);
1554
1555 /* Remove ourselves from the config hooks */
1556 if (sc->intrhook.ich_func != NULL) {
1557 config_intrhook_disestablish(&sc->intrhook);
1558 sc->intrhook.ich_func = NULL;
1559 }
1560
1561 HDA_BOOTHVERBOSE(
1562 device_printf(sc->dev, "Starting CORB Engine...\n");
1563 );
1564 hdac_corb_start(sc);
1565 HDA_BOOTHVERBOSE(
1566 device_printf(sc->dev, "Starting RIRB Engine...\n");
1567 );
1568 hdac_rirb_start(sc);
1569
1570 /*
1571 * Clear HDAC_WAKEEN as at present we have no use for SDI wake
1572 * (status change) interrupts. The documentation says that we
1573 * should not make any assumptions about the state of this register
1574 * and set it explicitly.
1575 * NB: this needs to be done before the interrupt is enabled as
1576 * the handler does not expect this interrupt source.
1577 */
1578 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0);
1579
1580 /*
1581 * Read and clear post-reset SDI wake status.
1582 * Each set bit corresponds to a codec that came out of reset.
1583 */
1584 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS);
1585 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests);
1586
1587 HDA_BOOTHVERBOSE(
1588 device_printf(sc->dev,
1589 "Enabling controller interrupt...\n");
1590 );
1591 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1592 HDAC_GCTL_UNSOL);
1593 if (sc->polling == 0) {
1594 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL,
1595 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1596 }
1597 DELAY(1000);
1598
1599 HDA_BOOTHVERBOSE(
1600 device_printf(sc->dev, "Scanning HDA codecs ...\n");
1601 );
1602 hdac_unlock(sc);
1603 for (i = 0; i < HDAC_CODEC_MAX; i++) {
1604 if (HDAC_STATESTS_SDIWAKE(statests, i)) {
1605 HDA_BOOTHVERBOSE(
1606 device_printf(sc->dev,
1607 "Found CODEC at address %d\n", i);
1608 );
1609 hdac_lock(sc);
1610 vendorid = hdac_send_command(sc, i,
1611 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID));
1612 revisionid = hdac_send_command(sc, i,
1613 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID));
1614 hdac_unlock(sc);
1615 if (vendorid == HDA_INVALID &&
1616 revisionid == HDA_INVALID) {
1617 device_printf(sc->dev,
1618 "CODEC at address %d not responding!\n", i);
1619 continue;
1620 }
1621 sc->codecs[i].vendor_id =
1622 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid);
1623 sc->codecs[i].device_id =
1624 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid);
1625 sc->codecs[i].revision_id =
1626 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid);
1627 sc->codecs[i].stepping_id =
1628 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid);
1629 child = device_add_child(sc->dev, "hdacc", DEVICE_UNIT_ANY);
1630 if (child == NULL) {
1631 device_printf(sc->dev,
1632 "Failed to add CODEC device\n");
1633 continue;
1634 }
1635 device_set_ivars(child, (void *)(intptr_t)i);
1636 sc->codecs[i].dev = child;
1637 }
1638 }
1639 bus_attach_children(sc->dev);
1640
1641 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1642 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1643 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev,
1644 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data");
1645 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1646 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1647 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev,
1648 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode");
1649 }
1650
1651 /****************************************************************************
1652 * int hdac_shutdown(device_t)
1653 *
1654 * Power down HDA bus and codecs.
1655 ****************************************************************************/
1656 static int
hdac_shutdown(device_t dev)1657 hdac_shutdown(device_t dev)
1658 {
1659 struct hdac_softc *sc = device_get_softc(dev);
1660
1661 HDA_BOOTHVERBOSE(
1662 device_printf(dev, "Shutdown...\n");
1663 );
1664 callout_drain(&sc->poll_callout);
1665 taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1666 bus_generic_shutdown(dev);
1667
1668 hdac_lock(sc);
1669 HDA_BOOTHVERBOSE(
1670 device_printf(dev, "Reset controller...\n");
1671 );
1672 hdac_reset(sc, false);
1673 hdac_unlock(sc);
1674 HDA_BOOTHVERBOSE(
1675 device_printf(dev, "Shutdown done\n");
1676 );
1677 return (0);
1678 }
1679
1680 /****************************************************************************
1681 * int hdac_suspend(device_t)
1682 *
1683 * Suspend and power down HDA bus and codecs.
1684 ****************************************************************************/
1685 static int
hdac_suspend(device_t dev)1686 hdac_suspend(device_t dev)
1687 {
1688 struct hdac_softc *sc = device_get_softc(dev);
1689
1690 HDA_BOOTHVERBOSE(
1691 device_printf(dev, "Suspend...\n");
1692 );
1693 bus_generic_suspend(dev);
1694
1695 hdac_lock(sc);
1696 HDA_BOOTHVERBOSE(
1697 device_printf(dev, "Reset controller...\n");
1698 );
1699 callout_stop(&sc->poll_callout);
1700 hdac_reset(sc, false);
1701 hdac_unlock(sc);
1702 callout_drain(&sc->poll_callout);
1703 taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1704 HDA_BOOTHVERBOSE(
1705 device_printf(dev, "Suspend done\n");
1706 );
1707 return (0);
1708 }
1709
1710 /****************************************************************************
1711 * int hdac_resume(device_t)
1712 *
1713 * Powerup and restore HDA bus and codecs state.
1714 ****************************************************************************/
1715 static int
hdac_resume(device_t dev)1716 hdac_resume(device_t dev)
1717 {
1718 struct hdac_softc *sc = device_get_softc(dev);
1719 int error;
1720
1721 HDA_BOOTHVERBOSE(
1722 device_printf(dev, "Resume...\n");
1723 );
1724 hdac_lock(sc);
1725
1726 /* Quiesce everything */
1727 HDA_BOOTHVERBOSE(
1728 device_printf(dev, "Reset controller...\n");
1729 );
1730 hdac_reset(sc, true);
1731
1732 /* Initialize the CORB and RIRB */
1733 hdac_corb_init(sc);
1734 hdac_rirb_init(sc);
1735
1736 HDA_BOOTHVERBOSE(
1737 device_printf(dev, "Starting CORB Engine...\n");
1738 );
1739 hdac_corb_start(sc);
1740 HDA_BOOTHVERBOSE(
1741 device_printf(dev, "Starting RIRB Engine...\n");
1742 );
1743 hdac_rirb_start(sc);
1744
1745 /*
1746 * Clear HDAC_WAKEEN as at present we have no use for SDI wake
1747 * (status change) events. The documentation says that we should
1748 * not make any assumptions about the state of this register and
1749 * set it explicitly.
1750 * Also, clear HDAC_STATESTS.
1751 * NB: this needs to be done before the interrupt is enabled as
1752 * the handler does not expect this interrupt source.
1753 */
1754 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0);
1755 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK);
1756
1757 HDA_BOOTHVERBOSE(
1758 device_printf(dev, "Enabling controller interrupt...\n");
1759 );
1760 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1761 HDAC_GCTL_UNSOL);
1762 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1763 DELAY(1000);
1764 hdac_poll_reinit(sc);
1765 hdac_unlock(sc);
1766
1767 error = bus_generic_resume(dev);
1768 HDA_BOOTHVERBOSE(
1769 device_printf(dev, "Resume done\n");
1770 );
1771 return (error);
1772 }
1773
1774 /****************************************************************************
1775 * int hdac_detach(device_t)
1776 *
1777 * Detach and free up resources utilized by the hdac device.
1778 ****************************************************************************/
1779 static int
hdac_detach(device_t dev)1780 hdac_detach(device_t dev)
1781 {
1782 struct hdac_softc *sc = device_get_softc(dev);
1783 int i, error;
1784
1785 callout_drain(&sc->poll_callout);
1786 hdac_irq_free(sc);
1787 taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1788
1789 error = bus_generic_detach(dev);
1790 if (error != 0)
1791 return (error);
1792
1793 hdac_lock(sc);
1794 hdac_reset(sc, false);
1795 hdac_unlock(sc);
1796
1797 for (i = 0; i < sc->num_ss; i++)
1798 hdac_dma_free(sc, &sc->streams[i].bdl);
1799 free(sc->streams, M_HDAC);
1800 hdac_dma_free(sc, &sc->pos_dma);
1801 hdac_dma_free(sc, &sc->rirb_dma);
1802 hdac_dma_free(sc, &sc->corb_dma);
1803 if (sc->chan_dmat != NULL) {
1804 bus_dma_tag_destroy(sc->chan_dmat);
1805 sc->chan_dmat = NULL;
1806 }
1807 hdac_mem_free(sc);
1808 mtx_destroy(&sc->lock);
1809 return (0);
1810 }
1811
1812 static bus_dma_tag_t
hdac_get_dma_tag(device_t dev,device_t child)1813 hdac_get_dma_tag(device_t dev, device_t child)
1814 {
1815 struct hdac_softc *sc = device_get_softc(dev);
1816
1817 return (sc->chan_dmat);
1818 }
1819
1820 static int
hdac_print_child(device_t dev,device_t child)1821 hdac_print_child(device_t dev, device_t child)
1822 {
1823 int retval;
1824
1825 retval = bus_print_child_header(dev, child);
1826 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child));
1827 retval += bus_print_child_footer(dev, child);
1828
1829 return (retval);
1830 }
1831
1832 static int
hdac_child_location(device_t dev,device_t child,struct sbuf * sb)1833 hdac_child_location(device_t dev, device_t child, struct sbuf *sb)
1834 {
1835
1836 sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child));
1837 return (0);
1838 }
1839
1840 static int
hdac_child_pnpinfo_method(device_t dev,device_t child,struct sbuf * sb)1841 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb)
1842 {
1843 struct hdac_softc *sc = device_get_softc(dev);
1844 nid_t cad = (uintptr_t)device_get_ivars(child);
1845
1846 sbuf_printf(sb,
1847 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x",
1848 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id,
1849 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id);
1850 return (0);
1851 }
1852
1853 static int
hdac_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)1854 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1855 {
1856 struct hdac_softc *sc = device_get_softc(dev);
1857 nid_t cad = (uintptr_t)device_get_ivars(child);
1858
1859 switch (which) {
1860 case HDA_IVAR_CODEC_ID:
1861 *result = cad;
1862 break;
1863 case HDA_IVAR_VENDOR_ID:
1864 *result = sc->codecs[cad].vendor_id;
1865 break;
1866 case HDA_IVAR_DEVICE_ID:
1867 *result = sc->codecs[cad].device_id;
1868 break;
1869 case HDA_IVAR_REVISION_ID:
1870 *result = sc->codecs[cad].revision_id;
1871 break;
1872 case HDA_IVAR_STEPPING_ID:
1873 *result = sc->codecs[cad].stepping_id;
1874 break;
1875 case HDA_IVAR_SUBVENDOR_ID:
1876 *result = pci_get_subvendor(dev);
1877 break;
1878 case HDA_IVAR_SUBDEVICE_ID:
1879 *result = pci_get_subdevice(dev);
1880 break;
1881 case HDA_IVAR_DMA_NOCACHE:
1882 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0;
1883 break;
1884 case HDA_IVAR_STRIPES_MASK:
1885 *result = (1 << (1 << sc->num_sdo)) - 1;
1886 break;
1887 default:
1888 return (ENOENT);
1889 }
1890 return (0);
1891 }
1892
1893 static struct mtx *
hdac_get_mtx(device_t dev,device_t child)1894 hdac_get_mtx(device_t dev, device_t child)
1895 {
1896 struct hdac_softc *sc = device_get_softc(dev);
1897
1898 return (&sc->lock);
1899 }
1900
1901 static uint32_t
hdac_codec_command(device_t dev,device_t child,uint32_t verb)1902 hdac_codec_command(device_t dev, device_t child, uint32_t verb)
1903 {
1904
1905 return (hdac_send_command(device_get_softc(dev),
1906 (intptr_t)device_get_ivars(child), verb));
1907 }
1908
1909 static int
hdac_find_stream(struct hdac_softc * sc,int dir,int stream)1910 hdac_find_stream(struct hdac_softc *sc, int dir, int stream)
1911 {
1912 int i, ss;
1913
1914 ss = -1;
1915 /* Allocate ISS/OSS first. */
1916 if (dir == 0) {
1917 for (i = 0; i < sc->num_iss; i++) {
1918 if (sc->streams[i].stream == stream) {
1919 ss = i;
1920 break;
1921 }
1922 }
1923 } else {
1924 for (i = 0; i < sc->num_oss; i++) {
1925 if (sc->streams[i + sc->num_iss].stream == stream) {
1926 ss = i + sc->num_iss;
1927 break;
1928 }
1929 }
1930 }
1931 /* Fallback to BSS. */
1932 if (ss == -1) {
1933 for (i = 0; i < sc->num_bss; i++) {
1934 if (sc->streams[i + sc->num_iss + sc->num_oss].stream
1935 == stream) {
1936 ss = i + sc->num_iss + sc->num_oss;
1937 break;
1938 }
1939 }
1940 }
1941 return (ss);
1942 }
1943
1944 static int
hdac_stream_alloc(device_t dev,device_t child,int dir,int format,int stripe,uint32_t ** dmapos)1945 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe,
1946 uint32_t **dmapos)
1947 {
1948 struct hdac_softc *sc = device_get_softc(dev);
1949 nid_t cad = (uintptr_t)device_get_ivars(child);
1950 int stream, ss, bw, maxbw, prevbw;
1951
1952 /* Look for empty stream. */
1953 ss = hdac_find_stream(sc, dir, 0);
1954
1955 /* Return if found nothing. */
1956 if (ss < 0)
1957 return (0);
1958
1959 /* Check bus bandwidth. */
1960 bw = hdac_bdata_rate(format, dir);
1961 if (dir == 1) {
1962 bw *= 1 << (sc->num_sdo - stripe);
1963 prevbw = sc->sdo_bw_used;
1964 maxbw = 48000 * 960 * (1 << sc->num_sdo);
1965 } else {
1966 prevbw = sc->codecs[cad].sdi_bw_used;
1967 maxbw = 48000 * 464;
1968 }
1969 HDA_BOOTHVERBOSE(
1970 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n",
1971 (bw + prevbw) / 1000, maxbw / 1000,
1972 bw + prevbw > maxbw ? " -- OVERFLOW!" : "");
1973 );
1974 if (bw + prevbw > maxbw)
1975 return (0);
1976 if (dir == 1)
1977 sc->sdo_bw_used += bw;
1978 else
1979 sc->codecs[cad].sdi_bw_used += bw;
1980
1981 /* Allocate stream number */
1982 if (ss >= sc->num_iss + sc->num_oss)
1983 stream = 15 - (ss - sc->num_iss - sc->num_oss);
1984 else if (ss >= sc->num_iss)
1985 stream = ss - sc->num_iss + 1;
1986 else
1987 stream = ss + 1;
1988
1989 sc->streams[ss].dev = child;
1990 sc->streams[ss].dir = dir;
1991 sc->streams[ss].stream = stream;
1992 sc->streams[ss].bw = bw;
1993 sc->streams[ss].format = format;
1994 sc->streams[ss].stripe = stripe;
1995 if (dmapos != NULL) {
1996 if (sc->pos_dma.dma_vaddr != NULL)
1997 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8);
1998 else
1999 *dmapos = NULL;
2000 }
2001 return (stream);
2002 }
2003
2004 static void
hdac_stream_free(device_t dev,device_t child,int dir,int stream)2005 hdac_stream_free(device_t dev, device_t child, int dir, int stream)
2006 {
2007 struct hdac_softc *sc = device_get_softc(dev);
2008 nid_t cad = (uintptr_t)device_get_ivars(child);
2009 int ss;
2010
2011 ss = hdac_find_stream(sc, dir, stream);
2012 KASSERT(ss >= 0,
2013 ("Free for not allocated stream (%d/%d)\n", dir, stream));
2014 if (dir == 1)
2015 sc->sdo_bw_used -= sc->streams[ss].bw;
2016 else
2017 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw;
2018 sc->streams[ss].stream = 0;
2019 sc->streams[ss].dev = NULL;
2020 }
2021
2022 static int
hdac_stream_start(device_t dev,device_t child,int dir,int stream,bus_addr_t buf,int blksz,int blkcnt)2023 hdac_stream_start(device_t dev, device_t child, int dir, int stream,
2024 bus_addr_t buf, int blksz, int blkcnt)
2025 {
2026 struct hdac_softc *sc = device_get_softc(dev);
2027 struct hdac_bdle *bdle;
2028 uint64_t addr;
2029 int i, ss, off;
2030 uint32_t ctl;
2031
2032 ss = hdac_find_stream(sc, dir, stream);
2033 KASSERT(ss >= 0,
2034 ("Start for not allocated stream (%d/%d)\n", dir, stream));
2035
2036 addr = (uint64_t)buf;
2037 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr;
2038 for (i = 0; i < blkcnt; i++, bdle++) {
2039 bdle->addrl = htole32((uint32_t)addr);
2040 bdle->addrh = htole32((uint32_t)(addr >> 32));
2041 bdle->len = htole32(blksz);
2042 bdle->ioc = htole32(1);
2043 addr += blksz;
2044 }
2045
2046 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
2047 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE);
2048
2049 off = ss << 5;
2050 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt);
2051 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1);
2052 addr = sc->streams[ss].bdl.dma_paddr;
2053 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr);
2054 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32));
2055
2056 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2);
2057 if (dir)
2058 ctl |= HDAC_SDCTL2_DIR;
2059 else
2060 ctl &= ~HDAC_SDCTL2_DIR;
2061 ctl &= ~HDAC_SDCTL2_STRM_MASK;
2062 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT;
2063 ctl &= ~HDAC_SDCTL2_STRIPE_MASK;
2064 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT;
2065 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl);
2066
2067 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format);
2068
2069 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2070 ctl |= 1 << ss;
2071 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2072
2073 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS,
2074 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
2075 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2076 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2077 HDAC_SDCTL_RUN;
2078 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2079
2080 sc->streams[ss].blksz = blksz;
2081 sc->streams[ss].running = 1;
2082 hdac_poll_reinit(sc);
2083 return (0);
2084 }
2085
2086 static void
hdac_stream_stop(device_t dev,device_t child,int dir,int stream)2087 hdac_stream_stop(device_t dev, device_t child, int dir, int stream)
2088 {
2089 struct hdac_softc *sc = device_get_softc(dev);
2090 int ss, off;
2091 uint32_t ctl;
2092
2093 ss = hdac_find_stream(sc, dir, stream);
2094 KASSERT(ss >= 0,
2095 ("Stop for not allocated stream (%d/%d)\n", dir, stream));
2096
2097 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
2098 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE);
2099
2100 off = ss << 5;
2101 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2102 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2103 HDAC_SDCTL_RUN);
2104 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2105
2106 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2107 ctl &= ~(1 << ss);
2108 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2109
2110 sc->streams[ss].running = 0;
2111 hdac_poll_reinit(sc);
2112 }
2113
2114 static void
hdac_stream_reset(device_t dev,device_t child,int dir,int stream)2115 hdac_stream_reset(device_t dev, device_t child, int dir, int stream)
2116 {
2117 struct hdac_softc *sc = device_get_softc(dev);
2118 int timeout = 1000;
2119 int to = timeout;
2120 int ss, off;
2121 uint32_t ctl;
2122
2123 ss = hdac_find_stream(sc, dir, stream);
2124 KASSERT(ss >= 0,
2125 ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2126
2127 off = ss << 5;
2128 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2129 ctl |= HDAC_SDCTL_SRST;
2130 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2131 do {
2132 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2133 if (ctl & HDAC_SDCTL_SRST)
2134 break;
2135 DELAY(10);
2136 } while (--to);
2137 if (!(ctl & HDAC_SDCTL_SRST))
2138 device_printf(dev, "Reset setting timeout\n");
2139 ctl &= ~HDAC_SDCTL_SRST;
2140 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2141 to = timeout;
2142 do {
2143 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2144 if (!(ctl & HDAC_SDCTL_SRST))
2145 break;
2146 DELAY(10);
2147 } while (--to);
2148 if (ctl & HDAC_SDCTL_SRST)
2149 device_printf(dev, "Reset timeout!\n");
2150 }
2151
2152 static uint32_t
hdac_stream_getptr(device_t dev,device_t child,int dir,int stream)2153 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream)
2154 {
2155 struct hdac_softc *sc = device_get_softc(dev);
2156 int ss, off;
2157
2158 ss = hdac_find_stream(sc, dir, stream);
2159 KASSERT(ss >= 0,
2160 ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2161
2162 off = ss << 5;
2163 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB));
2164 }
2165
2166 static int
hdac_unsol_alloc(device_t dev,device_t child,int tag)2167 hdac_unsol_alloc(device_t dev, device_t child, int tag)
2168 {
2169 struct hdac_softc *sc = device_get_softc(dev);
2170
2171 sc->unsol_registered++;
2172 hdac_poll_reinit(sc);
2173 return (tag);
2174 }
2175
2176 static void
hdac_unsol_free(device_t dev,device_t child,int tag)2177 hdac_unsol_free(device_t dev, device_t child, int tag)
2178 {
2179 struct hdac_softc *sc = device_get_softc(dev);
2180
2181 sc->unsol_registered--;
2182 hdac_poll_reinit(sc);
2183 }
2184
2185 static device_method_t hdac_methods[] = {
2186 /* device interface */
2187 DEVMETHOD(device_probe, hdac_probe),
2188 DEVMETHOD(device_attach, hdac_attach),
2189 DEVMETHOD(device_detach, hdac_detach),
2190 DEVMETHOD(device_shutdown, hdac_shutdown),
2191 DEVMETHOD(device_suspend, hdac_suspend),
2192 DEVMETHOD(device_resume, hdac_resume),
2193 /* Bus interface */
2194 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag),
2195 DEVMETHOD(bus_print_child, hdac_print_child),
2196 DEVMETHOD(bus_child_location, hdac_child_location),
2197 DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method),
2198 DEVMETHOD(bus_read_ivar, hdac_read_ivar),
2199 DEVMETHOD(hdac_get_mtx, hdac_get_mtx),
2200 DEVMETHOD(hdac_codec_command, hdac_codec_command),
2201 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc),
2202 DEVMETHOD(hdac_stream_free, hdac_stream_free),
2203 DEVMETHOD(hdac_stream_start, hdac_stream_start),
2204 DEVMETHOD(hdac_stream_stop, hdac_stream_stop),
2205 DEVMETHOD(hdac_stream_reset, hdac_stream_reset),
2206 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr),
2207 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc),
2208 DEVMETHOD(hdac_unsol_free, hdac_unsol_free),
2209 DEVMETHOD_END
2210 };
2211
2212 static driver_t hdac_driver = {
2213 "hdac",
2214 hdac_methods,
2215 sizeof(struct hdac_softc),
2216 };
2217
2218 DRIVER_MODULE_ORDERED(snd_hda, pci, hdac_driver, NULL, NULL, SI_ORDER_ANY);
2219