1 /* 2 * Allwinner A20/A31 SoCs NMI IRQ chip driver. 3 * 4 * Carlo Caione <carlo.caione@gmail.com> 5 * 6 * This file is licensed under the terms of the GNU General Public 7 * License version 2. This program is licensed "as is" without any 8 * warranty of any kind, whether express or implied. 9 */ 10 11 #define DRV_NAME "sunxi-nmi" 12 #define pr_fmt(fmt) DRV_NAME ": " fmt 13 14 #include <linux/bitops.h> 15 #include <linux/device.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/interrupt.h> 19 #include <linux/irqdomain.h> 20 #include <linux/of_irq.h> 21 #include <linux/of_address.h> 22 #include <linux/irqchip.h> 23 #include <linux/irqchip/chained_irq.h> 24 25 #define SUNXI_NMI_SRC_TYPE_MASK 0x00000003 26 27 #define SUNXI_NMI_IRQ_BIT BIT(0) 28 29 /* 30 * For deprecated sun6i-a31-sc-nmi compatible. 31 */ 32 #define SUN6I_NMI_CTRL 0x00 33 #define SUN6I_NMI_PENDING 0x04 34 #define SUN6I_NMI_ENABLE 0x34 35 36 #define SUN7I_NMI_CTRL 0x00 37 #define SUN7I_NMI_PENDING 0x04 38 #define SUN7I_NMI_ENABLE 0x08 39 40 #define SUN9I_NMI_CTRL 0x00 41 #define SUN9I_NMI_ENABLE 0x04 42 #define SUN9I_NMI_PENDING 0x08 43 44 enum { 45 SUNXI_SRC_TYPE_LEVEL_LOW = 0, 46 SUNXI_SRC_TYPE_EDGE_FALLING, 47 SUNXI_SRC_TYPE_LEVEL_HIGH, 48 SUNXI_SRC_TYPE_EDGE_RISING, 49 }; 50 51 struct sunxi_sc_nmi_data { 52 struct { 53 u32 ctrl; 54 u32 pend; 55 u32 enable; 56 } reg_offs; 57 u32 enable_val; 58 }; 59 60 static const struct sunxi_sc_nmi_data sun6i_data __initconst = { 61 .reg_offs.ctrl = SUN6I_NMI_CTRL, 62 .reg_offs.pend = SUN6I_NMI_PENDING, 63 .reg_offs.enable = SUN6I_NMI_ENABLE, 64 }; 65 66 static const struct sunxi_sc_nmi_data sun7i_data __initconst = { 67 .reg_offs.ctrl = SUN7I_NMI_CTRL, 68 .reg_offs.pend = SUN7I_NMI_PENDING, 69 .reg_offs.enable = SUN7I_NMI_ENABLE, 70 }; 71 72 static const struct sunxi_sc_nmi_data sun9i_data __initconst = { 73 .reg_offs.ctrl = SUN9I_NMI_CTRL, 74 .reg_offs.pend = SUN9I_NMI_PENDING, 75 .reg_offs.enable = SUN9I_NMI_ENABLE, 76 }; 77 78 static const struct sunxi_sc_nmi_data sun55i_a523_data __initconst = { 79 .reg_offs.ctrl = SUN9I_NMI_CTRL, 80 .reg_offs.pend = SUN9I_NMI_PENDING, 81 .reg_offs.enable = SUN9I_NMI_ENABLE, 82 .enable_val = BIT(31), 83 }; 84 85 static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val) 86 { 87 irq_reg_writel(gc, val, off); 88 } 89 90 static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) 91 { 92 return irq_reg_readl(gc, off); 93 } 94 95 static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc) 96 { 97 struct irq_domain *domain = irq_desc_get_handler_data(desc); 98 struct irq_chip *chip = irq_desc_get_chip(desc); 99 100 chained_irq_enter(chip, desc); 101 generic_handle_domain_irq(domain, 0); 102 chained_irq_exit(chip, desc); 103 } 104 105 static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) 106 { 107 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 108 struct irq_chip_type *ct = gc->chip_types; 109 u32 src_type_reg; 110 u32 ctrl_off = ct->regs.type; 111 unsigned int src_type; 112 unsigned int i; 113 114 guard(raw_spinlock)(&gc->lock); 115 116 switch (flow_type & IRQF_TRIGGER_MASK) { 117 case IRQ_TYPE_EDGE_FALLING: 118 src_type = SUNXI_SRC_TYPE_EDGE_FALLING; 119 break; 120 case IRQ_TYPE_EDGE_RISING: 121 src_type = SUNXI_SRC_TYPE_EDGE_RISING; 122 break; 123 case IRQ_TYPE_LEVEL_HIGH: 124 src_type = SUNXI_SRC_TYPE_LEVEL_HIGH; 125 break; 126 case IRQ_TYPE_NONE: 127 case IRQ_TYPE_LEVEL_LOW: 128 src_type = SUNXI_SRC_TYPE_LEVEL_LOW; 129 break; 130 default: 131 pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq); 132 return -EBADR; 133 } 134 135 irqd_set_trigger_type(data, flow_type); 136 irq_setup_alt_chip(data, flow_type); 137 138 for (i = 0; i < gc->num_ct; i++, ct++) 139 if (ct->type & flow_type) 140 ctrl_off = ct->regs.type; 141 142 src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off); 143 src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK; 144 src_type_reg |= src_type; 145 sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg); 146 return IRQ_SET_MASK_OK; 147 } 148 149 static int __init sunxi_sc_nmi_irq_init(struct device_node *node, 150 const struct sunxi_sc_nmi_data *data) 151 { 152 unsigned int irq, clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 153 struct irq_chip_generic *gc; 154 struct irq_domain *domain; 155 int ret; 156 157 domain = irq_domain_create_linear(of_fwnode_handle(node), 1, &irq_generic_chip_ops, NULL); 158 if (!domain) { 159 pr_err("Could not register interrupt domain.\n"); 160 return -ENOMEM; 161 } 162 163 ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME, 164 handle_fasteoi_irq, clr, 0, 165 IRQ_GC_INIT_MASK_CACHE); 166 if (ret) { 167 pr_err("Could not allocate generic interrupt chip.\n"); 168 goto fail_irqd_remove; 169 } 170 171 irq = irq_of_parse_and_map(node, 0); 172 if (irq <= 0) { 173 pr_err("unable to parse irq\n"); 174 ret = -EINVAL; 175 goto fail_irqd_remove; 176 } 177 178 gc = irq_get_domain_generic_chip(domain, 0); 179 gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node)); 180 if (IS_ERR(gc->reg_base)) { 181 pr_err("unable to map resource\n"); 182 ret = PTR_ERR(gc->reg_base); 183 goto fail_irqd_remove; 184 } 185 186 gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; 187 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 188 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 189 gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit; 190 gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type; 191 gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | 192 IRQCHIP_EOI_IF_HANDLED | 193 IRQCHIP_SKIP_SET_WAKE; 194 gc->chip_types[0].regs.ack = data->reg_offs.pend; 195 gc->chip_types[0].regs.mask = data->reg_offs.enable; 196 gc->chip_types[0].regs.type = data->reg_offs.ctrl; 197 198 gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; 199 gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; 200 gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; 201 gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; 202 gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type; 203 gc->chip_types[1].regs.ack = data->reg_offs.pend; 204 gc->chip_types[1].regs.mask = data->reg_offs.enable; 205 gc->chip_types[1].regs.type = data->reg_offs.ctrl; 206 gc->chip_types[1].handler = handle_edge_irq; 207 208 /* Disable any active interrupts */ 209 sunxi_sc_nmi_write(gc, data->reg_offs.enable, data->enable_val); 210 211 /* Clear any pending NMI interrupts */ 212 sunxi_sc_nmi_write(gc, data->reg_offs.pend, SUNXI_NMI_IRQ_BIT); 213 214 irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain); 215 216 return 0; 217 218 fail_irqd_remove: 219 irq_domain_remove(domain); 220 221 return ret; 222 } 223 224 static int __init sun6i_sc_nmi_irq_init(struct device_node *node, 225 struct device_node *parent) 226 { 227 return sunxi_sc_nmi_irq_init(node, &sun6i_data); 228 } 229 IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init); 230 231 static int __init sun7i_sc_nmi_irq_init(struct device_node *node, 232 struct device_node *parent) 233 { 234 return sunxi_sc_nmi_irq_init(node, &sun7i_data); 235 } 236 IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init); 237 238 static int __init sun9i_nmi_irq_init(struct device_node *node, 239 struct device_node *parent) 240 { 241 return sunxi_sc_nmi_irq_init(node, &sun9i_data); 242 } 243 IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init); 244 245 static int __init sun55i_nmi_irq_init(struct device_node *node, 246 struct device_node *parent) 247 { 248 return sunxi_sc_nmi_irq_init(node, &sun55i_a523_data); 249 } 250 IRQCHIP_DECLARE(sun55i_nmi, "allwinner,sun55i-a523-nmi", sun55i_nmi_irq_init); 251