1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel 7300 class Memory Controllers kernel module (Clarksboro)
4 *
5 * Copyright (c) 2010 by:
6 * Mauro Carvalho Chehab
7 *
8 * Red Hat Inc. https://www.redhat.com
9 *
10 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
11 * http://www.intel.com/Assets/PDF/datasheet/318082.pdf
12 *
13 * TODO: The chipset allow checking for PCI Express errors also. Currently,
14 * the driver covers only memory error errors
15 *
16 * This driver uses "csrows" EDAC attribute to represent DIMM slot#
17 */
18
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/slab.h>
24 #include <linux/edac.h>
25 #include <linux/mmzone.h>
26 #include <linux/string_choices.h>
27
28 #include "edac_module.h"
29
30 /*
31 * Alter this version for the I7300 module when modifications are made
32 */
33 #define I7300_REVISION " Ver: 1.0.0"
34
35 #define EDAC_MOD_STR "i7300_edac"
36
37 #define i7300_printk(level, fmt, arg...) \
38 edac_printk(level, "i7300", fmt, ##arg)
39
40 #define i7300_mc_printk(mci, level, fmt, arg...) \
41 edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
42
43 /***********************************************
44 * i7300 Limit constants Structs and static vars
45 ***********************************************/
46
47 /*
48 * Memory topology is organized as:
49 * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
50 * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
51 * Each channel can have to 8 DIMM sets (called as SLOTS)
52 * Slots should generally be filled in pairs
53 * Except on Single Channel mode of operation
54 * just slot 0/channel0 filled on this mode
55 * On normal operation mode, the two channels on a branch should be
56 * filled together for the same SLOT#
57 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
58 * channels on both branches should be filled
59 */
60
61 /* Limits for i7300 */
62 #define MAX_SLOTS 8
63 #define MAX_BRANCHES 2
64 #define MAX_CH_PER_BRANCH 2
65 #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES)
66 #define MAX_MIR 3
67
68 #define to_channel(ch, branch) ((((branch)) << 1) | (ch))
69
70 #define to_csrow(slot, ch, branch) \
71 (to_channel(ch, branch) | ((slot) << 2))
72
73 /* Device name and register DID (Device ID) */
74 struct i7300_dev_info {
75 const char *ctl_name; /* name for this device */
76 u16 fsb_mapping_errors; /* DID for the branchmap,control */
77 };
78
79 /* Table of devices attributes supported by this driver */
80 static const struct i7300_dev_info i7300_devs[] = {
81 {
82 .ctl_name = "I7300",
83 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
84 },
85 };
86
87 struct i7300_dimm_info {
88 int megabytes; /* size, 0 means not present */
89 };
90
91 /* driver private data structure */
92 struct i7300_pvt {
93 struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */
94 struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */
95 struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */
96 struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */
97
98 u16 tolm; /* top of low memory */
99 u64 ambase; /* AMB BAR */
100
101 u32 mc_settings; /* Report several settings */
102 u32 mc_settings_a;
103
104 u16 mir[MAX_MIR]; /* Memory Interleave Reg*/
105
106 u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */
107 u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */
108
109 /* DIMM information matrix, allocating architecture maximums */
110 struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
111
112 /* Temporary buffer for use when preparing error messages */
113 char *tmp_prt_buffer;
114 };
115
116 /* FIXME: Why do we need to have this static? */
117 static struct edac_pci_ctl_info *i7300_pci;
118
119 /***************************************************
120 * i7300 Register definitions for memory enumeration
121 ***************************************************/
122
123 /*
124 * Device 16,
125 * Function 0: System Address (not documented)
126 * Function 1: Memory Branch Map, Control, Errors Register
127 */
128
129 /* OFFSETS for Function 0 */
130 #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
131 #define MAXCH 0x56 /* Max Channel Number */
132 #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
133
134 /* OFFSETS for Function 1 */
135 #define MC_SETTINGS 0x40
136 #define IS_MIRRORED(mc) ((mc) & (1 << 16))
137 #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5))
138 #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31))
139 #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8))
140
141 #define MC_SETTINGS_A 0x58
142 #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14))
143
144 #define TOLM 0x6C
145
146 #define MIR0 0x80
147 #define MIR1 0x84
148 #define MIR2 0x88
149
150 /*
151 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
152 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
153 * seems that we cannot use this information directly for the same usage.
154 * Each memory slot may have up to 2 AMB interfaces, one for income and another
155 * for outcome interface to the next slot.
156 * For now, the driver just stores the AMB present registers, but rely only at
157 * the MTR info to detect memory.
158 * Datasheet is also not clear about how to map each AMBPRESENT registers to
159 * one of the 4 available channels.
160 */
161 #define AMBPRESENT_0 0x64
162 #define AMBPRESENT_1 0x66
163
164 static const u16 mtr_regs[MAX_SLOTS] = {
165 0x80, 0x84, 0x88, 0x8c,
166 0x82, 0x86, 0x8a, 0x8e
167 };
168
169 /*
170 * Defines to extract the vaious fields from the
171 * MTRx - Memory Technology Registers
172 */
173 #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8))
174 #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7))
175 #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
176 #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4)
177 #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0)
178 #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
179 #define MTR_DRAM_BANKS_ADDR_BITS 2
180 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
181 #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
182 #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
183
184 /************************************************
185 * i7300 Register definitions for error detection
186 ************************************************/
187
188 /*
189 * Device 16.1: FBD Error Registers
190 */
191 #define FERR_FAT_FBD 0x98
192 static const char *ferr_fat_fbd_name[] = {
193 [22] = "Non-Redundant Fast Reset Timeout",
194 [2] = ">Tmid Thermal event with intelligent throttling disabled",
195 [1] = "Memory or FBD configuration CRC read error",
196 [0] = "Memory Write error on non-redundant retry or "
197 "FBD configuration Write error on retry",
198 };
199 #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
200 #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
201
202 #define FERR_NF_FBD 0xa0
203 static const char *ferr_nf_fbd_name[] = {
204 [24] = "DIMM-Spare Copy Completed",
205 [23] = "DIMM-Spare Copy Initiated",
206 [22] = "Redundant Fast Reset Timeout",
207 [21] = "Memory Write error on redundant retry",
208 [18] = "SPD protocol Error",
209 [17] = "FBD Northbound parity error on FBD Sync Status",
210 [16] = "Correctable Patrol Data ECC",
211 [15] = "Correctable Resilver- or Spare-Copy Data ECC",
212 [14] = "Correctable Mirrored Demand Data ECC",
213 [13] = "Correctable Non-Mirrored Demand Data ECC",
214 [11] = "Memory or FBD configuration CRC read error",
215 [10] = "FBD Configuration Write error on first attempt",
216 [9] = "Memory Write error on first attempt",
217 [8] = "Non-Aliased Uncorrectable Patrol Data ECC",
218 [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
219 [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
220 [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
221 [4] = "Aliased Uncorrectable Patrol Data ECC",
222 [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
223 [2] = "Aliased Uncorrectable Mirrored Demand Data ECC",
224 [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
225 [0] = "Uncorrectable Data ECC on Replay",
226 };
227 #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
228 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
229 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
230 (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
231 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
232 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
233 (1 << 1) | (1 << 0))
234
235 #define EMASK_FBD 0xa8
236 #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
237 (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
238 (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
239 (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
240 (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
241 (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
242 (1 << 1) | (1 << 0))
243
244 /*
245 * Device 16.2: Global Error Registers
246 */
247
248 #define FERR_GLOBAL_HI 0x48
249 static const char *ferr_global_hi_name[] = {
250 [3] = "FSB 3 Fatal Error",
251 [2] = "FSB 2 Fatal Error",
252 [1] = "FSB 1 Fatal Error",
253 [0] = "FSB 0 Fatal Error",
254 };
255 #define ferr_global_hi_is_fatal(errno) 1
256
257 #define FERR_GLOBAL_LO 0x40
258 static const char *ferr_global_lo_name[] = {
259 [31] = "Internal MCH Fatal Error",
260 [30] = "Intel QuickData Technology Device Fatal Error",
261 [29] = "FSB1 Fatal Error",
262 [28] = "FSB0 Fatal Error",
263 [27] = "FBD Channel 3 Fatal Error",
264 [26] = "FBD Channel 2 Fatal Error",
265 [25] = "FBD Channel 1 Fatal Error",
266 [24] = "FBD Channel 0 Fatal Error",
267 [23] = "PCI Express Device 7Fatal Error",
268 [22] = "PCI Express Device 6 Fatal Error",
269 [21] = "PCI Express Device 5 Fatal Error",
270 [20] = "PCI Express Device 4 Fatal Error",
271 [19] = "PCI Express Device 3 Fatal Error",
272 [18] = "PCI Express Device 2 Fatal Error",
273 [17] = "PCI Express Device 1 Fatal Error",
274 [16] = "ESI Fatal Error",
275 [15] = "Internal MCH Non-Fatal Error",
276 [14] = "Intel QuickData Technology Device Non Fatal Error",
277 [13] = "FSB1 Non-Fatal Error",
278 [12] = "FSB 0 Non-Fatal Error",
279 [11] = "FBD Channel 3 Non-Fatal Error",
280 [10] = "FBD Channel 2 Non-Fatal Error",
281 [9] = "FBD Channel 1 Non-Fatal Error",
282 [8] = "FBD Channel 0 Non-Fatal Error",
283 [7] = "PCI Express Device 7 Non-Fatal Error",
284 [6] = "PCI Express Device 6 Non-Fatal Error",
285 [5] = "PCI Express Device 5 Non-Fatal Error",
286 [4] = "PCI Express Device 4 Non-Fatal Error",
287 [3] = "PCI Express Device 3 Non-Fatal Error",
288 [2] = "PCI Express Device 2 Non-Fatal Error",
289 [1] = "PCI Express Device 1 Non-Fatal Error",
290 [0] = "ESI Non-Fatal Error",
291 };
292 #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1)
293
294 #define NRECMEMA 0xbe
295 #define NRECMEMA_BANK(v) (((v) >> 12) & 7)
296 #define NRECMEMA_RANK(v) (((v) >> 8) & 15)
297
298 #define NRECMEMB 0xc0
299 #define NRECMEMB_IS_WR(v) ((v) & (1 << 31))
300 #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
301 #define NRECMEMB_RAS(v) ((v) & 0xffff)
302
303 #define REDMEMA 0xdc
304
305 #define REDMEMB 0x7c
306
307 #define RECMEMA 0xe0
308 #define RECMEMA_BANK(v) (((v) >> 12) & 7)
309 #define RECMEMA_RANK(v) (((v) >> 8) & 15)
310
311 #define RECMEMB 0xe4
312 #define RECMEMB_IS_WR(v) ((v) & (1 << 31))
313 #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
314 #define RECMEMB_RAS(v) ((v) & 0xffff)
315
316 /********************************************
317 * i7300 Functions related to error detection
318 ********************************************/
319
320 /**
321 * get_err_from_table() - Gets the error message from a table
322 * @table: table name (array of char *)
323 * @size: number of elements at the table
324 * @pos: position of the element to be returned
325 *
326 * This is a small routine that gets the pos-th element of a table. If the
327 * element doesn't exist (or it is empty), it returns "reserved".
328 * Instead of calling it directly, the better is to call via the macro
329 * GET_ERR_FROM_TABLE(), that automatically checks the table size via
330 * ARRAY_SIZE() macro
331 */
get_err_from_table(const char * table[],int size,int pos)332 static const char *get_err_from_table(const char *table[], int size, int pos)
333 {
334 if (unlikely(pos >= size))
335 return "Reserved";
336
337 if (unlikely(!table[pos]))
338 return "Reserved";
339
340 return table[pos];
341 }
342
343 #define GET_ERR_FROM_TABLE(table, pos) \
344 get_err_from_table(table, ARRAY_SIZE(table), pos)
345
346 /**
347 * i7300_process_error_global() - Retrieve the hardware error information from
348 * the hardware global error registers and
349 * sends it to dmesg
350 * @mci: struct mem_ctl_info pointer
351 */
i7300_process_error_global(struct mem_ctl_info * mci)352 static void i7300_process_error_global(struct mem_ctl_info *mci)
353 {
354 struct i7300_pvt *pvt;
355 u32 errnum, error_reg;
356 unsigned long errors;
357 const char *specific;
358 bool is_fatal;
359
360 pvt = mci->pvt_info;
361
362 /* read in the 1st FATAL error register */
363 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
364 FERR_GLOBAL_HI, &error_reg);
365 if (unlikely(error_reg)) {
366 errors = error_reg;
367 errnum = find_first_bit(&errors,
368 ARRAY_SIZE(ferr_global_hi_name));
369 specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
370 is_fatal = ferr_global_hi_is_fatal(errnum);
371
372 /* Clear the error bit */
373 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
374 FERR_GLOBAL_HI, error_reg);
375
376 goto error_global;
377 }
378
379 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
380 FERR_GLOBAL_LO, &error_reg);
381 if (unlikely(error_reg)) {
382 errors = error_reg;
383 errnum = find_first_bit(&errors,
384 ARRAY_SIZE(ferr_global_lo_name));
385 specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
386 is_fatal = ferr_global_lo_is_fatal(errnum);
387
388 /* Clear the error bit */
389 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
390 FERR_GLOBAL_LO, error_reg);
391
392 goto error_global;
393 }
394 return;
395
396 error_global:
397 i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
398 is_fatal ? "Fatal" : "NOT fatal", specific);
399 }
400
401 /**
402 * i7300_process_fbd_error() - Retrieve the hardware error information from
403 * the FBD error registers and sends it via
404 * EDAC error API calls
405 * @mci: struct mem_ctl_info pointer
406 */
i7300_process_fbd_error(struct mem_ctl_info * mci)407 static void i7300_process_fbd_error(struct mem_ctl_info *mci)
408 {
409 struct i7300_pvt *pvt;
410 u32 errnum, value, error_reg;
411 u16 val16;
412 unsigned branch, channel, bank, rank, cas, ras;
413 u32 syndrome;
414
415 unsigned long errors;
416 const char *specific;
417 bool is_wr;
418
419 pvt = mci->pvt_info;
420
421 /* read in the 1st FATAL error register */
422 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
423 FERR_FAT_FBD, &error_reg);
424 if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
425 errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
426 errnum = find_first_bit(&errors,
427 ARRAY_SIZE(ferr_fat_fbd_name));
428 specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
429 branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
430
431 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
432 NRECMEMA, &val16);
433 bank = NRECMEMA_BANK(val16);
434 rank = NRECMEMA_RANK(val16);
435
436 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
437 NRECMEMB, &value);
438 is_wr = NRECMEMB_IS_WR(value);
439 cas = NRECMEMB_CAS(value);
440 ras = NRECMEMB_RAS(value);
441
442 /* Clean the error register */
443 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
444 FERR_FAT_FBD, error_reg);
445
446 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
447 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
448 bank, ras, cas, errors, specific);
449
450 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
451 branch, -1, rank,
452 is_wr ? "Write error" : "Read error",
453 pvt->tmp_prt_buffer);
454
455 }
456
457 /* read in the 1st NON-FATAL error register */
458 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
459 FERR_NF_FBD, &error_reg);
460 if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
461 errors = error_reg & FERR_NF_FBD_ERR_MASK;
462 errnum = find_first_bit(&errors,
463 ARRAY_SIZE(ferr_nf_fbd_name));
464 specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
465 branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
466
467 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
468 REDMEMA, &syndrome);
469
470 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
471 RECMEMA, &val16);
472 bank = RECMEMA_BANK(val16);
473 rank = RECMEMA_RANK(val16);
474
475 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
476 RECMEMB, &value);
477 is_wr = RECMEMB_IS_WR(value);
478 cas = RECMEMB_CAS(value);
479 ras = RECMEMB_RAS(value);
480
481 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
482 REDMEMB, &value);
483 channel = (branch << 1);
484
485 /* Second channel ? */
486 channel += !!(value & BIT(17));
487
488 /* Clear the error bit */
489 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
490 FERR_NF_FBD, error_reg);
491
492 /* Form out message */
493 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
494 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
495 bank, ras, cas, errors, specific);
496
497 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
498 syndrome,
499 branch >> 1, channel % 2, rank,
500 is_wr ? "Write error" : "Read error",
501 pvt->tmp_prt_buffer);
502 }
503 return;
504 }
505
506 /**
507 * i7300_check_error() - Calls the error checking subroutines
508 * @mci: struct mem_ctl_info pointer
509 */
i7300_check_error(struct mem_ctl_info * mci)510 static void i7300_check_error(struct mem_ctl_info *mci)
511 {
512 i7300_process_error_global(mci);
513 i7300_process_fbd_error(mci);
514 };
515
516 /**
517 * i7300_clear_error() - Clears the error registers
518 * @mci: struct mem_ctl_info pointer
519 */
i7300_clear_error(struct mem_ctl_info * mci)520 static void i7300_clear_error(struct mem_ctl_info *mci)
521 {
522 struct i7300_pvt *pvt = mci->pvt_info;
523 u32 value;
524 /*
525 * All error values are RWC - we need to read and write 1 to the
526 * bit that we want to cleanup
527 */
528
529 /* Clear global error registers */
530 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
531 FERR_GLOBAL_HI, &value);
532 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
533 FERR_GLOBAL_HI, value);
534
535 pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
536 FERR_GLOBAL_LO, &value);
537 pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
538 FERR_GLOBAL_LO, value);
539
540 /* Clear FBD error registers */
541 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
542 FERR_FAT_FBD, &value);
543 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
544 FERR_FAT_FBD, value);
545
546 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
547 FERR_NF_FBD, &value);
548 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
549 FERR_NF_FBD, value);
550 }
551
552 /**
553 * i7300_enable_error_reporting() - Enable the memory reporting logic at the
554 * hardware
555 * @mci: struct mem_ctl_info pointer
556 */
i7300_enable_error_reporting(struct mem_ctl_info * mci)557 static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
558 {
559 struct i7300_pvt *pvt = mci->pvt_info;
560 u32 fbd_error_mask;
561
562 /* Read the FBD Error Mask Register */
563 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
564 EMASK_FBD, &fbd_error_mask);
565
566 /* Enable with a '0' */
567 fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
568
569 pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
570 EMASK_FBD, fbd_error_mask);
571 }
572
573 /************************************************
574 * i7300 Functions related to memory enumberation
575 ************************************************/
576
577 /**
578 * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
579 * @pvt: pointer to the private data struct used by i7300 driver
580 * @slot: DIMM slot (0 to 7)
581 * @ch: Channel number within the branch (0 or 1)
582 * @branch: Branch number (0 or 1)
583 * @dinfo: Pointer to DIMM info where dimm size is stored
584 * @dimm: Pointer to the struct dimm_info that corresponds to that element
585 */
decode_mtr(struct i7300_pvt * pvt,int slot,int ch,int branch,struct i7300_dimm_info * dinfo,struct dimm_info * dimm)586 static int decode_mtr(struct i7300_pvt *pvt,
587 int slot, int ch, int branch,
588 struct i7300_dimm_info *dinfo,
589 struct dimm_info *dimm)
590 {
591 int mtr, ans, addrBits, channel;
592
593 channel = to_channel(ch, branch);
594
595 mtr = pvt->mtr[slot][branch];
596 ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
597
598 edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
599 slot, channel, ans ? "" : "NOT ");
600
601 /* Determine if there is a DIMM present in this DIMM slot */
602 if (!ans)
603 return 0;
604
605 /* Start with the number of bits for a Bank
606 * on the DRAM */
607 addrBits = MTR_DRAM_BANKS_ADDR_BITS;
608 /* Add thenumber of ROW bits */
609 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
610 /* add the number of COLUMN bits */
611 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
612 /* add the number of RANK bits */
613 addrBits += MTR_DIMM_RANKS(mtr);
614
615 addrBits += 6; /* add 64 bits per DIMM */
616 addrBits -= 20; /* divide by 2^^20 */
617 addrBits -= 3; /* 8 bits per bytes */
618
619 dinfo->megabytes = 1 << addrBits;
620
621 edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
622
623 edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
624 str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
625
626 edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
627 edac_dbg(2, "\t\tNUMRANK: %s\n",
628 MTR_DIMM_RANKS(mtr) ? "double" : "single");
629 edac_dbg(2, "\t\tNUMROW: %s\n",
630 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
631 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
632 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
633 "65,536 - 16 rows");
634 edac_dbg(2, "\t\tNUMCOL: %s\n",
635 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
636 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
637 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
638 "reserved");
639 edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
640
641 /*
642 * The type of error detection actually depends of the
643 * mode of operation. When it is just one single memory chip, at
644 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
645 * In normal or mirrored mode, it uses Lockstep mode,
646 * with the possibility of using an extended algorithm for x8 memories
647 * See datasheet Sections 7.3.6 to 7.3.8
648 */
649
650 dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
651 dimm->grain = 8;
652 dimm->mtype = MEM_FB_DDR2;
653 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
654 dimm->edac_mode = EDAC_SECDED;
655 edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
656 } else {
657 edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
658 if (MTR_DRAM_WIDTH(mtr) == 8)
659 dimm->edac_mode = EDAC_S8ECD8ED;
660 else
661 dimm->edac_mode = EDAC_S4ECD4ED;
662 }
663
664 /* ask what device type on this row */
665 if (MTR_DRAM_WIDTH(mtr) == 8) {
666 edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
667 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
668 "enhanced" : "normal");
669
670 dimm->dtype = DEV_X8;
671 } else
672 dimm->dtype = DEV_X4;
673
674 return mtr;
675 }
676
677 /**
678 * print_dimm_size() - Prints dump of the memory organization
679 * @pvt: pointer to the private data struct used by i7300 driver
680 *
681 * Useful for debug. If debug is disabled, this routine do nothing
682 */
print_dimm_size(struct i7300_pvt * pvt)683 static void print_dimm_size(struct i7300_pvt *pvt)
684 {
685 #ifdef CONFIG_EDAC_DEBUG
686 struct i7300_dimm_info *dinfo;
687 char *p;
688 int space, n;
689 int channel, slot;
690
691 space = PAGE_SIZE;
692 p = pvt->tmp_prt_buffer;
693
694 n = snprintf(p, space, " ");
695 p += n;
696 space -= n;
697 for (channel = 0; channel < MAX_CHANNELS; channel++) {
698 n = snprintf(p, space, "channel %d | ", channel);
699 p += n;
700 space -= n;
701 }
702 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
703 p = pvt->tmp_prt_buffer;
704 space = PAGE_SIZE;
705 n = snprintf(p, space, "-------------------------------"
706 "------------------------------");
707 p += n;
708 space -= n;
709 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
710 p = pvt->tmp_prt_buffer;
711 space = PAGE_SIZE;
712
713 for (slot = 0; slot < MAX_SLOTS; slot++) {
714 n = snprintf(p, space, "csrow/SLOT %d ", slot);
715 p += n;
716 space -= n;
717
718 for (channel = 0; channel < MAX_CHANNELS; channel++) {
719 dinfo = &pvt->dimm_info[slot][channel];
720 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
721 p += n;
722 space -= n;
723 }
724
725 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
726 p = pvt->tmp_prt_buffer;
727 space = PAGE_SIZE;
728 }
729
730 n = snprintf(p, space, "-------------------------------"
731 "------------------------------");
732 p += n;
733 space -= n;
734 edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
735 p = pvt->tmp_prt_buffer;
736 space = PAGE_SIZE;
737 #endif
738 }
739
740 /**
741 * i7300_init_csrows() - Initialize the 'csrows' table within
742 * the mci control structure with the
743 * addressing of memory.
744 * @mci: struct mem_ctl_info pointer
745 */
i7300_init_csrows(struct mem_ctl_info * mci)746 static int i7300_init_csrows(struct mem_ctl_info *mci)
747 {
748 struct i7300_pvt *pvt;
749 struct i7300_dimm_info *dinfo;
750 int rc = -ENODEV;
751 int mtr;
752 int ch, branch, slot, channel, max_channel, max_branch;
753 struct dimm_info *dimm;
754
755 pvt = mci->pvt_info;
756
757 edac_dbg(2, "Memory Technology Registers:\n");
758
759 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
760 max_branch = 1;
761 max_channel = 1;
762 } else {
763 max_branch = MAX_BRANCHES;
764 max_channel = MAX_CH_PER_BRANCH;
765 }
766
767 /* Get the AMB present registers for the four channels */
768 for (branch = 0; branch < max_branch; branch++) {
769 /* Read and dump branch 0's MTRs */
770 channel = to_channel(0, branch);
771 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
772 AMBPRESENT_0,
773 &pvt->ambpresent[channel]);
774 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
775 channel, pvt->ambpresent[channel]);
776
777 if (max_channel == 1)
778 continue;
779
780 channel = to_channel(1, branch);
781 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
782 AMBPRESENT_1,
783 &pvt->ambpresent[channel]);
784 edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
785 channel, pvt->ambpresent[channel]);
786 }
787
788 /* Get the set of MTR[0-7] regs by each branch */
789 for (slot = 0; slot < MAX_SLOTS; slot++) {
790 int where = mtr_regs[slot];
791 for (branch = 0; branch < max_branch; branch++) {
792 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
793 where,
794 &pvt->mtr[slot][branch]);
795 for (ch = 0; ch < max_channel; ch++) {
796 int channel = to_channel(ch, branch);
797
798 dimm = edac_get_dimm(mci, branch, ch, slot);
799
800 dinfo = &pvt->dimm_info[slot][channel];
801
802 mtr = decode_mtr(pvt, slot, ch, branch,
803 dinfo, dimm);
804
805 /* if no DIMMS on this row, continue */
806 if (!MTR_DIMMS_PRESENT(mtr))
807 continue;
808
809 rc = 0;
810
811 }
812 }
813 }
814
815 return rc;
816 }
817
818 /**
819 * decode_mir() - Decodes Memory Interleave Register (MIR) info
820 * @mir_no: number of the MIR register to decode
821 * @mir: array with the MIR data cached on the driver
822 */
decode_mir(int mir_no,u16 mir[MAX_MIR])823 static void decode_mir(int mir_no, u16 mir[MAX_MIR])
824 {
825 if (mir[mir_no] & 3)
826 edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
827 mir_no,
828 (mir[mir_no] >> 4) & 0xfff,
829 (mir[mir_no] & 1) ? "B0" : "",
830 (mir[mir_no] & 2) ? "B1" : "");
831 }
832
833 /**
834 * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
835 * @mci: struct mem_ctl_info pointer
836 *
837 * Data read is cached internally for its usage when needed
838 */
i7300_get_mc_regs(struct mem_ctl_info * mci)839 static int i7300_get_mc_regs(struct mem_ctl_info *mci)
840 {
841 struct i7300_pvt *pvt;
842 u32 actual_tolm;
843 int i, rc;
844
845 pvt = mci->pvt_info;
846
847 pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
848 (u32 *) &pvt->ambase);
849
850 edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
851
852 /* Get the Branch Map regs */
853 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
854 pvt->tolm >>= 12;
855 edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
856 pvt->tolm, pvt->tolm);
857
858 actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
859 edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
860 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
861
862 /* Get memory controller settings */
863 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
864 &pvt->mc_settings);
865 pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
866 &pvt->mc_settings_a);
867
868 if (IS_SINGLE_MODE(pvt->mc_settings_a))
869 edac_dbg(0, "Memory controller operating on single mode\n");
870 else
871 edac_dbg(0, "Memory controller operating on %smirrored mode\n",
872 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
873
874 edac_dbg(0, "Error detection is %s\n",
875 str_enabled_disabled(IS_ECC_ENABLED(pvt->mc_settings)));
876 edac_dbg(0, "Retry is %s\n",
877 str_enabled_disabled(IS_RETRY_ENABLED(pvt->mc_settings)));
878
879 /* Get Memory Interleave Range registers */
880 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
881 &pvt->mir[0]);
882 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
883 &pvt->mir[1]);
884 pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
885 &pvt->mir[2]);
886
887 /* Decode the MIR regs */
888 for (i = 0; i < MAX_MIR; i++)
889 decode_mir(i, pvt->mir);
890
891 rc = i7300_init_csrows(mci);
892 if (rc < 0)
893 return rc;
894
895 /* Go and determine the size of each DIMM and place in an
896 * orderly matrix */
897 print_dimm_size(pvt);
898
899 return 0;
900 }
901
902 /*************************************************
903 * i7300 Functions related to device probe/release
904 *************************************************/
905
906 /**
907 * i7300_put_devices() - Release the PCI devices
908 * @mci: struct mem_ctl_info pointer
909 */
i7300_put_devices(struct mem_ctl_info * mci)910 static void i7300_put_devices(struct mem_ctl_info *mci)
911 {
912 struct i7300_pvt *pvt;
913 int branch;
914
915 pvt = mci->pvt_info;
916
917 /* Decrement usage count for devices */
918 for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
919 pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
920 pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
921 pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
922 }
923
924 /**
925 * i7300_get_devices() - Find and perform 'get' operation on the MCH's
926 * device/functions we want to reference for this driver
927 * @mci: struct mem_ctl_info pointer
928 *
929 * Access and prepare the several devices for usage:
930 * I7300 devices used by this driver:
931 * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
932 * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
933 * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
934 */
i7300_get_devices(struct mem_ctl_info * mci)935 static int i7300_get_devices(struct mem_ctl_info *mci)
936 {
937 struct i7300_pvt *pvt;
938 struct pci_dev *pdev;
939
940 pvt = mci->pvt_info;
941
942 /* Attempt to 'get' the MCH register we want */
943 pdev = NULL;
944 while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
945 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
946 pdev))) {
947 /* Store device 16 funcs 1 and 2 */
948 switch (PCI_FUNC(pdev->devfn)) {
949 case 1:
950 if (!pvt->pci_dev_16_1_fsb_addr_map)
951 pvt->pci_dev_16_1_fsb_addr_map =
952 pci_dev_get(pdev);
953 break;
954 case 2:
955 if (!pvt->pci_dev_16_2_fsb_err_regs)
956 pvt->pci_dev_16_2_fsb_err_regs =
957 pci_dev_get(pdev);
958 break;
959 }
960 }
961
962 if (!pvt->pci_dev_16_1_fsb_addr_map ||
963 !pvt->pci_dev_16_2_fsb_err_regs) {
964 /* At least one device was not found */
965 i7300_printk(KERN_ERR,
966 "'system address,Process Bus' device not found:"
967 "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
968 PCI_VENDOR_ID_INTEL,
969 PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
970 goto error;
971 }
972
973 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
974 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
975 pvt->pci_dev_16_0_fsb_ctlr->vendor,
976 pvt->pci_dev_16_0_fsb_ctlr->device);
977 edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
978 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
979 pvt->pci_dev_16_1_fsb_addr_map->vendor,
980 pvt->pci_dev_16_1_fsb_addr_map->device);
981 edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
982 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
983 pvt->pci_dev_16_2_fsb_err_regs->vendor,
984 pvt->pci_dev_16_2_fsb_err_regs->device);
985
986 pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
987 PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
988 NULL);
989 if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
990 i7300_printk(KERN_ERR,
991 "MC: 'BRANCH 0' device not found:"
992 "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
993 PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
994 goto error;
995 }
996
997 pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
998 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
999 NULL);
1000 if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
1001 i7300_printk(KERN_ERR,
1002 "MC: 'BRANCH 1' device not found:"
1003 "vendor 0x%x device 0x%x Func 0 "
1004 "(broken BIOS?)\n",
1005 PCI_VENDOR_ID_INTEL,
1006 PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
1007 goto error;
1008 }
1009
1010 return 0;
1011
1012 error:
1013 i7300_put_devices(mci);
1014 return -ENODEV;
1015 }
1016
1017 /**
1018 * i7300_init_one() - Probe for one instance of the device
1019 * @pdev: struct pci_dev pointer
1020 * @id: struct pci_device_id pointer - currently unused
1021 */
i7300_init_one(struct pci_dev * pdev,const struct pci_device_id * id)1022 static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1023 {
1024 struct mem_ctl_info *mci;
1025 struct edac_mc_layer layers[3];
1026 struct i7300_pvt *pvt;
1027 int rc;
1028
1029 /* wake up device */
1030 rc = pci_enable_device(pdev);
1031 if (rc == -EIO)
1032 return rc;
1033
1034 edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1035 pdev->bus->number,
1036 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1037
1038 /* We only are looking for func 0 of the set */
1039 if (PCI_FUNC(pdev->devfn) != 0)
1040 return -ENODEV;
1041
1042 /* allocate a new MC control structure */
1043 layers[0].type = EDAC_MC_LAYER_BRANCH;
1044 layers[0].size = MAX_BRANCHES;
1045 layers[0].is_virt_csrow = false;
1046 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1047 layers[1].size = MAX_CH_PER_BRANCH;
1048 layers[1].is_virt_csrow = true;
1049 layers[2].type = EDAC_MC_LAYER_SLOT;
1050 layers[2].size = MAX_SLOTS;
1051 layers[2].is_virt_csrow = true;
1052 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1053 if (mci == NULL)
1054 return -ENOMEM;
1055
1056 edac_dbg(0, "MC: mci = %p\n", mci);
1057
1058 mci->pdev = &pdev->dev; /* record ptr to the generic device */
1059
1060 pvt = mci->pvt_info;
1061 pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
1062
1063 pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1064 if (!pvt->tmp_prt_buffer) {
1065 edac_mc_free(mci);
1066 return -ENOMEM;
1067 }
1068
1069 /* 'get' the pci devices we want to reserve for our use */
1070 if (i7300_get_devices(mci))
1071 goto fail0;
1072
1073 mci->mc_idx = 0;
1074 mci->mtype_cap = MEM_FLAG_FB_DDR2;
1075 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1076 mci->edac_cap = EDAC_FLAG_NONE;
1077 mci->mod_name = "i7300_edac.c";
1078 mci->ctl_name = i7300_devs[0].ctl_name;
1079 mci->dev_name = pci_name(pdev);
1080 mci->ctl_page_to_phys = NULL;
1081
1082 /* Set the function pointer to an actual operation function */
1083 mci->edac_check = i7300_check_error;
1084
1085 /* initialize the MC control structure 'csrows' table
1086 * with the mapping and control information */
1087 if (i7300_get_mc_regs(mci)) {
1088 edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1089 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
1090 } else {
1091 edac_dbg(1, "MC: Enable error reporting now\n");
1092 i7300_enable_error_reporting(mci);
1093 }
1094
1095 /* add this new MC control structure to EDAC's list of MCs */
1096 if (edac_mc_add_mc(mci)) {
1097 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1098 /* FIXME: perhaps some code should go here that disables error
1099 * reporting if we just enabled it
1100 */
1101 goto fail1;
1102 }
1103
1104 i7300_clear_error(mci);
1105
1106 /* allocating generic PCI control info */
1107 i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1108 if (!i7300_pci) {
1109 printk(KERN_WARNING
1110 "%s(): Unable to create PCI control\n",
1111 __func__);
1112 printk(KERN_WARNING
1113 "%s(): PCI error report via EDAC not setup\n",
1114 __func__);
1115 }
1116
1117 return 0;
1118
1119 /* Error exit unwinding stack */
1120 fail1:
1121
1122 i7300_put_devices(mci);
1123
1124 fail0:
1125 kfree(pvt->tmp_prt_buffer);
1126 edac_mc_free(mci);
1127 return -ENODEV;
1128 }
1129
1130 /**
1131 * i7300_remove_one() - Remove the driver
1132 * @pdev: struct pci_dev pointer
1133 */
i7300_remove_one(struct pci_dev * pdev)1134 static void i7300_remove_one(struct pci_dev *pdev)
1135 {
1136 struct mem_ctl_info *mci;
1137 char *tmp;
1138
1139 edac_dbg(0, "\n");
1140
1141 if (i7300_pci)
1142 edac_pci_release_generic_ctl(i7300_pci);
1143
1144 mci = edac_mc_del_mc(&pdev->dev);
1145 if (!mci)
1146 return;
1147
1148 tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
1149
1150 /* retrieve references to resources, and free those resources */
1151 i7300_put_devices(mci);
1152
1153 kfree(tmp);
1154 edac_mc_free(mci);
1155 }
1156
1157 /*
1158 * pci_device_id: table for which devices we are looking for
1159 *
1160 * Has only 8086:360c PCI ID
1161 */
1162 static const struct pci_device_id i7300_pci_tbl[] = {
1163 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1164 {0,} /* 0 terminated list. */
1165 };
1166
1167 MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
1168
1169 /*
1170 * i7300_driver: pci_driver structure for this module
1171 */
1172 static struct pci_driver i7300_driver = {
1173 .name = "i7300_edac",
1174 .probe = i7300_init_one,
1175 .remove = i7300_remove_one,
1176 .id_table = i7300_pci_tbl,
1177 };
1178
1179 /**
1180 * i7300_init() - Registers the driver
1181 */
i7300_init(void)1182 static int __init i7300_init(void)
1183 {
1184 int pci_rc;
1185
1186 edac_dbg(2, "\n");
1187
1188 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1189 opstate_init();
1190
1191 pci_rc = pci_register_driver(&i7300_driver);
1192
1193 return (pci_rc < 0) ? pci_rc : 0;
1194 }
1195
1196 /**
1197 * i7300_exit() - Unregisters the driver
1198 */
i7300_exit(void)1199 static void __exit i7300_exit(void)
1200 {
1201 edac_dbg(2, "\n");
1202 pci_unregister_driver(&i7300_driver);
1203 }
1204
1205 module_init(i7300_init);
1206 module_exit(i7300_exit);
1207
1208 MODULE_LICENSE("GPL");
1209 MODULE_AUTHOR("Mauro Carvalho Chehab");
1210 MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
1211 MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
1212 I7300_REVISION);
1213
1214 module_param(edac_op_state, int, 0444);
1215 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1216