1 /* 2 * Copyright (C) 2017 Marvell 3 * 4 * Hanna Hawa <hannah@marvell.com> 5 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 6 * 7 * This file is licensed under the terms of the GNU General Public 8 * License version 2. This program is licensed "as is" without any 9 * warranty of any kind, whether express or implied. 10 */ 11 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip.h> 15 #include <linux/irqdomain.h> 16 #include <linux/jump_label.h> 17 #include <linux/kernel.h> 18 #include <linux/msi.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_platform.h> 21 #include <linux/platform_device.h> 22 23 #include <linux/irqchip/irq-msi-lib.h> 24 25 #include <dt-bindings/interrupt-controller/mvebu-icu.h> 26 27 /* ICU registers */ 28 #define ICU_SETSPI_NSR_AL 0x10 29 #define ICU_SETSPI_NSR_AH 0x14 30 #define ICU_CLRSPI_NSR_AL 0x18 31 #define ICU_CLRSPI_NSR_AH 0x1c 32 #define ICU_SET_SEI_AL 0x50 33 #define ICU_SET_SEI_AH 0x54 34 #define ICU_CLR_SEI_AL 0x58 35 #define ICU_CLR_SEI_AH 0x5C 36 #define ICU_INT_CFG(x) (0x100 + 4 * (x)) 37 #define ICU_INT_ENABLE BIT(24) 38 #define ICU_IS_EDGE BIT(28) 39 #define ICU_GROUP_SHIFT 29 40 41 /* ICU definitions */ 42 #define ICU_MAX_IRQS 207 43 #define ICU_SATA0_ICU_ID 109 44 #define ICU_SATA1_ICU_ID 107 45 46 struct mvebu_icu_subset_data { 47 unsigned int icu_group; 48 unsigned int offset_set_ah; 49 unsigned int offset_set_al; 50 unsigned int offset_clr_ah; 51 unsigned int offset_clr_al; 52 }; 53 54 struct mvebu_icu { 55 void __iomem *base; 56 struct device *dev; 57 }; 58 59 struct mvebu_icu_msi_data { 60 struct mvebu_icu *icu; 61 atomic_t initialized; 62 const struct mvebu_icu_subset_data *subset_data; 63 }; 64 65 static DEFINE_STATIC_KEY_FALSE(legacy_bindings); 66 67 static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec, 68 unsigned long *hwirq, unsigned int *type) 69 { 70 unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2; 71 struct msi_domain_info *info = d->host_data; 72 struct mvebu_icu_msi_data *msi_data = info->chip_data; 73 struct mvebu_icu *icu = msi_data->icu; 74 75 /* Check the count of the parameters in dt */ 76 if (WARN_ON(fwspec->param_count != param_count)) { 77 dev_err(icu->dev, "wrong ICU parameter count %d\n", 78 fwspec->param_count); 79 return -EINVAL; 80 } 81 82 if (static_branch_unlikely(&legacy_bindings)) { 83 *hwirq = fwspec->param[1]; 84 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 85 if (fwspec->param[0] != ICU_GRP_NSR) { 86 dev_err(icu->dev, "wrong ICU group type %x\n", 87 fwspec->param[0]); 88 return -EINVAL; 89 } 90 } else { 91 *hwirq = fwspec->param[0]; 92 *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; 93 94 /* 95 * The ICU receives level interrupts. While the NSR are also 96 * level interrupts, SEI are edge interrupts. Force the type 97 * here in this case. Please note that this makes the interrupt 98 * handling unreliable. 99 */ 100 if (msi_data->subset_data->icu_group == ICU_GRP_SEI) 101 *type = IRQ_TYPE_EDGE_RISING; 102 } 103 104 if (*hwirq >= ICU_MAX_IRQS) { 105 dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq); 106 return -EINVAL; 107 } 108 109 return 0; 110 } 111 112 static void mvebu_icu_init(struct mvebu_icu *icu, 113 struct mvebu_icu_msi_data *msi_data, 114 struct msi_msg *msg) 115 { 116 const struct mvebu_icu_subset_data *subset = msi_data->subset_data; 117 118 if (atomic_cmpxchg(&msi_data->initialized, false, true)) 119 return; 120 121 /* Set 'SET' ICU SPI message address in AP */ 122 writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah); 123 writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al); 124 125 if (subset->icu_group != ICU_GRP_NSR) 126 return; 127 128 /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */ 129 writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah); 130 writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al); 131 } 132 133 static int mvebu_icu_msi_init(struct irq_domain *domain, struct msi_domain_info *info, 134 unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) 135 { 136 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, info->chip_data); 137 return irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false); 138 } 139 140 static void mvebu_icu_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) 141 { 142 arg->desc = desc; 143 arg->hwirq = (u32)desc->data.icookie.value; 144 } 145 146 static void mvebu_icu_write_msi_msg(struct irq_data *d, struct msi_msg *msg) 147 { 148 struct mvebu_icu_msi_data *msi_data = d->chip_data; 149 unsigned int icu_group = msi_data->subset_data->icu_group; 150 struct msi_desc *desc = irq_data_get_msi_desc(d); 151 struct mvebu_icu *icu = msi_data->icu; 152 unsigned int type; 153 u32 icu_int; 154 155 if (msg->address_lo || msg->address_hi) { 156 /* One off initialization per domain */ 157 mvebu_icu_init(icu, msi_data, msg); 158 /* Configure the ICU with irq number & type */ 159 icu_int = msg->data | ICU_INT_ENABLE; 160 type = (unsigned int)(desc->data.icookie.value >> 32); 161 if (type & IRQ_TYPE_EDGE_RISING) 162 icu_int |= ICU_IS_EDGE; 163 icu_int |= icu_group << ICU_GROUP_SHIFT; 164 } else { 165 /* De-configure the ICU */ 166 icu_int = 0; 167 } 168 169 writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq)); 170 171 /* 172 * The SATA unit has 2 ports, and a dedicated ICU entry per 173 * port. The ahci sata driver supports only one irq interrupt 174 * per SATA unit. To solve this conflict, we configure the 2 175 * SATA wired interrupts in the south bridge into 1 GIC 176 * interrupt in the north bridge. Even if only a single port 177 * is enabled, if sata node is enabled, both interrupts are 178 * configured (regardless of which port is actually in use). 179 */ 180 if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) { 181 writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID)); 182 writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID)); 183 } 184 } 185 186 static const struct msi_domain_template mvebu_icu_nsr_msi_template = { 187 .chip = { 188 .name = "ICU-NSR", 189 .irq_mask = irq_chip_mask_parent, 190 .irq_unmask = irq_chip_unmask_parent, 191 .irq_eoi = irq_chip_eoi_parent, 192 .irq_set_type = irq_chip_set_type_parent, 193 .irq_write_msi_msg = mvebu_icu_write_msi_msg, 194 .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, 195 }, 196 197 .ops = { 198 .msi_translate = mvebu_icu_translate, 199 .msi_init = mvebu_icu_msi_init, 200 .set_desc = mvebu_icu_set_desc, 201 }, 202 203 .info = { 204 .bus_token = DOMAIN_BUS_WIRED_TO_MSI, 205 .flags = MSI_FLAG_LEVEL_CAPABLE | 206 MSI_FLAG_USE_DEV_FWNODE, 207 }, 208 }; 209 210 static const struct msi_domain_template mvebu_icu_sei_msi_template = { 211 .chip = { 212 .name = "ICU-SEI", 213 .irq_mask = irq_chip_mask_parent, 214 .irq_unmask = irq_chip_unmask_parent, 215 .irq_ack = irq_chip_ack_parent, 216 .irq_set_type = irq_chip_set_type_parent, 217 .irq_write_msi_msg = mvebu_icu_write_msi_msg, 218 .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, 219 }, 220 221 .ops = { 222 .msi_translate = mvebu_icu_translate, 223 .msi_init = mvebu_icu_msi_init, 224 .set_desc = mvebu_icu_set_desc, 225 }, 226 227 .info = { 228 .bus_token = DOMAIN_BUS_WIRED_TO_MSI, 229 .flags = MSI_FLAG_LEVEL_CAPABLE | 230 MSI_FLAG_USE_DEV_FWNODE, 231 }, 232 }; 233 234 static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = { 235 .icu_group = ICU_GRP_NSR, 236 .offset_set_ah = ICU_SETSPI_NSR_AH, 237 .offset_set_al = ICU_SETSPI_NSR_AL, 238 .offset_clr_ah = ICU_CLRSPI_NSR_AH, 239 .offset_clr_al = ICU_CLRSPI_NSR_AL, 240 }; 241 242 static const struct mvebu_icu_subset_data mvebu_icu_sei_subset_data = { 243 .icu_group = ICU_GRP_SEI, 244 .offset_set_ah = ICU_SET_SEI_AH, 245 .offset_set_al = ICU_SET_SEI_AL, 246 }; 247 248 static const struct of_device_id mvebu_icu_subset_of_match[] = { 249 { 250 .compatible = "marvell,cp110-icu-nsr", 251 .data = &mvebu_icu_nsr_subset_data, 252 }, 253 { 254 .compatible = "marvell,cp110-icu-sei", 255 .data = &mvebu_icu_sei_subset_data, 256 }, 257 {}, 258 }; 259 260 static int mvebu_icu_subset_probe(struct platform_device *pdev) 261 { 262 const struct msi_domain_template *tmpl; 263 struct mvebu_icu_msi_data *msi_data; 264 struct device *dev = &pdev->dev; 265 bool sei; 266 267 msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL); 268 if (!msi_data) 269 return -ENOMEM; 270 271 if (static_branch_unlikely(&legacy_bindings)) { 272 msi_data->icu = dev_get_drvdata(dev); 273 msi_data->subset_data = &mvebu_icu_nsr_subset_data; 274 } else { 275 msi_data->icu = dev_get_drvdata(dev->parent); 276 msi_data->subset_data = of_device_get_match_data(dev); 277 } 278 279 dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_PLATFORM_MSI); 280 if (!dev->msi.domain) 281 return -EPROBE_DEFER; 282 283 if (!irq_domain_get_of_node(dev->msi.domain)) 284 return -ENODEV; 285 286 sei = msi_data->subset_data->icu_group == ICU_GRP_SEI; 287 tmpl = sei ? &mvebu_icu_sei_msi_template : &mvebu_icu_nsr_msi_template; 288 289 if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, tmpl, 290 ICU_MAX_IRQS, NULL, msi_data)) { 291 dev_err(dev, "Failed to create ICU MSI domain\n"); 292 return -ENOMEM; 293 } 294 295 return 0; 296 } 297 298 static struct platform_driver mvebu_icu_subset_driver = { 299 .probe = mvebu_icu_subset_probe, 300 .driver = { 301 .name = "mvebu-icu-subset", 302 .of_match_table = mvebu_icu_subset_of_match, 303 }, 304 }; 305 builtin_platform_driver(mvebu_icu_subset_driver); 306 307 static int mvebu_icu_probe(struct platform_device *pdev) 308 { 309 struct mvebu_icu *icu; 310 int i; 311 312 icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu), 313 GFP_KERNEL); 314 if (!icu) 315 return -ENOMEM; 316 317 icu->dev = &pdev->dev; 318 319 icu->base = devm_platform_ioremap_resource(pdev, 0); 320 if (IS_ERR(icu->base)) 321 return PTR_ERR(icu->base); 322 323 /* 324 * Legacy bindings: ICU is one node with one MSI parent: force manually 325 * the probe of the NSR interrupts side. 326 * New bindings: ICU node has children, one per interrupt controller 327 * having its own MSI parent: call platform_populate(). 328 * All ICU instances should use the same bindings. 329 */ 330 if (!of_get_child_count(pdev->dev.of_node)) 331 static_branch_enable(&legacy_bindings); 332 333 /* 334 * Clean all ICU interrupts of type NSR and SEI, required to 335 * avoid unpredictable SPI assignments done by firmware. 336 */ 337 for (i = 0 ; i < ICU_MAX_IRQS ; i++) { 338 u32 icu_int, icu_grp; 339 340 icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i)); 341 icu_grp = icu_int >> ICU_GROUP_SHIFT; 342 343 if (icu_grp == ICU_GRP_NSR || 344 (icu_grp == ICU_GRP_SEI && 345 !static_branch_unlikely(&legacy_bindings))) 346 writel_relaxed(0x0, icu->base + ICU_INT_CFG(i)); 347 } 348 349 platform_set_drvdata(pdev, icu); 350 351 if (static_branch_unlikely(&legacy_bindings)) 352 return mvebu_icu_subset_probe(pdev); 353 else 354 return devm_of_platform_populate(&pdev->dev); 355 } 356 357 static const struct of_device_id mvebu_icu_of_match[] = { 358 { .compatible = "marvell,cp110-icu", }, 359 {}, 360 }; 361 362 static struct platform_driver mvebu_icu_driver = { 363 .probe = mvebu_icu_probe, 364 .driver = { 365 .name = "mvebu-icu", 366 .of_match_table = mvebu_icu_of_match, 367 }, 368 }; 369 builtin_platform_driver(mvebu_icu_driver); 370