1 /*
2 * NVEC: NVIDIA compliant embedded controller interface
3 *
4 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5 *
6 * Authors: Pierre-Hugues Husson <phhusson@free.fr>
7 * Ilya Petrov <ilya.muromec@gmail.com>
8 * Marc Dietrich <marvin24@gmx.de>
9 * Julian Andres Klode <jak@jak-linux.org>
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 *
15 */
16
17 /* #define DEBUG */
18
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/atomic.h>
22 #include <linux/clk.h>
23 #include <linux/completion.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/gpio.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/irq.h>
30 #include <linux/of.h>
31 #include <linux/of_gpio.h>
32 #include <linux/list.h>
33 #include <linux/mfd/core.h>
34 #include <linux/mutex.h>
35 #include <linux/notifier.h>
36 #include <linux/platform_device.h>
37 #include <linux/slab.h>
38 #include <linux/spinlock.h>
39 #include <linux/workqueue.h>
40
41 #include <mach/clk.h>
42 #include <mach/iomap.h>
43
44 #include "nvec.h"
45
46 #define I2C_CNFG 0x00
47 #define I2C_CNFG_PACKET_MODE_EN (1<<10)
48 #define I2C_CNFG_NEW_MASTER_SFM (1<<11)
49 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
50
51 #define I2C_SL_CNFG 0x20
52 #define I2C_SL_NEWL (1<<2)
53 #define I2C_SL_NACK (1<<1)
54 #define I2C_SL_RESP (1<<0)
55 #define I2C_SL_IRQ (1<<3)
56 #define END_TRANS (1<<4)
57 #define RCVD (1<<2)
58 #define RNW (1<<1)
59
60 #define I2C_SL_RCVD 0x24
61 #define I2C_SL_STATUS 0x28
62 #define I2C_SL_ADDR1 0x2c
63 #define I2C_SL_ADDR2 0x30
64 #define I2C_SL_DELAY_COUNT 0x3c
65
66 /**
67 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
68 * @NVEC_MSG_RX: The message is an incoming message (from EC)
69 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
70 */
71 enum nvec_msg_category {
72 NVEC_MSG_RX,
73 NVEC_MSG_TX,
74 };
75
76 static const unsigned char EC_DISABLE_EVENT_REPORTING[3] = "\x04\x00\x00";
77 static const unsigned char EC_ENABLE_EVENT_REPORTING[3] = "\x04\x00\x01";
78 static const unsigned char EC_GET_FIRMWARE_VERSION[2] = "\x07\x15";
79
80 static struct nvec_chip *nvec_power_handle;
81
82 static struct mfd_cell nvec_devices[] = {
83 {
84 .name = "nvec-kbd",
85 .id = 1,
86 },
87 {
88 .name = "nvec-mouse",
89 .id = 1,
90 },
91 {
92 .name = "nvec-power",
93 .id = 1,
94 },
95 {
96 .name = "nvec-power",
97 .id = 2,
98 },
99 {
100 .name = "nvec-leds",
101 .id = 1,
102 },
103 };
104
105 /**
106 * nvec_register_notifier - Register a notifier with nvec
107 * @nvec: A &struct nvec_chip
108 * @nb: The notifier block to register
109 *
110 * Registers a notifier with @nvec. The notifier will be added to an atomic
111 * notifier chain that is called for all received messages except those that
112 * correspond to a request initiated by nvec_write_sync().
113 */
nvec_register_notifier(struct nvec_chip * nvec,struct notifier_block * nb,unsigned int events)114 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
115 unsigned int events)
116 {
117 return atomic_notifier_chain_register(&nvec->notifier_list, nb);
118 }
119 EXPORT_SYMBOL_GPL(nvec_register_notifier);
120
121 /**
122 * nvec_status_notifier - The final notifier
123 *
124 * Prints a message about control events not handled in the notifier
125 * chain.
126 */
nvec_status_notifier(struct notifier_block * nb,unsigned long event_type,void * data)127 static int nvec_status_notifier(struct notifier_block *nb,
128 unsigned long event_type, void *data)
129 {
130 unsigned char *msg = (unsigned char *)data;
131
132 if (event_type != NVEC_CNTL)
133 return NOTIFY_DONE;
134
135 printk(KERN_WARNING "unhandled msg type %ld\n", event_type);
136 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
137 msg, msg[1] + 2, true);
138
139 return NOTIFY_OK;
140 }
141
142 /**
143 * nvec_msg_alloc:
144 * @nvec: A &struct nvec_chip
145 * @category: Pool category, see &enum nvec_msg_category
146 *
147 * Allocate a single &struct nvec_msg object from the message pool of
148 * @nvec. The result shall be passed to nvec_msg_free() if no longer
149 * used.
150 *
151 * Outgoing messages are placed in the upper 75% of the pool, keeping the
152 * lower 25% available for RX buffers only. The reason is to prevent a
153 * situation where all buffers are full and a message is thus endlessly
154 * retried because the response could never be processed.
155 */
nvec_msg_alloc(struct nvec_chip * nvec,enum nvec_msg_category category)156 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
157 enum nvec_msg_category category)
158 {
159 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
160
161 for (; i < NVEC_POOL_SIZE; i++) {
162 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
163 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
164 return &nvec->msg_pool[i];
165 }
166 }
167
168 dev_err(nvec->dev, "could not allocate %s buffer\n",
169 (category == NVEC_MSG_TX) ? "TX" : "RX");
170
171 return NULL;
172 }
173
174 /**
175 * nvec_msg_free:
176 * @nvec: A &struct nvec_chip
177 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
178 *
179 * Free the given message
180 */
nvec_msg_free(struct nvec_chip * nvec,struct nvec_msg * msg)181 inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
182 {
183 if (msg != &nvec->tx_scratch)
184 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
185 atomic_set(&msg->used, 0);
186 }
187 EXPORT_SYMBOL_GPL(nvec_msg_free);
188
189 /**
190 * nvec_msg_is_event - Return %true if @msg is an event
191 * @msg: A message
192 */
nvec_msg_is_event(struct nvec_msg * msg)193 static bool nvec_msg_is_event(struct nvec_msg *msg)
194 {
195 return msg->data[0] >> 7;
196 }
197
198 /**
199 * nvec_msg_size - Get the size of a message
200 * @msg: The message to get the size for
201 *
202 * This only works for received messages, not for outgoing messages.
203 */
nvec_msg_size(struct nvec_msg * msg)204 static size_t nvec_msg_size(struct nvec_msg *msg)
205 {
206 bool is_event = nvec_msg_is_event(msg);
207 int event_length = (msg->data[0] & 0x60) >> 5;
208
209 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
210 if (!is_event || event_length == NVEC_VAR_SIZE)
211 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
212 else if (event_length == NVEC_2BYTES)
213 return 2;
214 else if (event_length == NVEC_3BYTES)
215 return 3;
216 else
217 return 0;
218 }
219
220 /**
221 * nvec_gpio_set_value - Set the GPIO value
222 * @nvec: A &struct nvec_chip
223 * @value: The value to write (0 or 1)
224 *
225 * Like gpio_set_value(), but generating debugging information
226 */
nvec_gpio_set_value(struct nvec_chip * nvec,int value)227 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
228 {
229 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
230 gpio_get_value(nvec->gpio), value);
231 gpio_set_value(nvec->gpio, value);
232 }
233
234 /**
235 * nvec_write_async - Asynchronously write a message to NVEC
236 * @nvec: An nvec_chip instance
237 * @data: The message data, starting with the request type
238 * @size: The size of @data
239 *
240 * Queue a single message to be transferred to the embedded controller
241 * and return immediately.
242 *
243 * Returns: 0 on success, a negative error code on failure. If a failure
244 * occured, the nvec driver may print an error.
245 */
nvec_write_async(struct nvec_chip * nvec,const unsigned char * data,short size)246 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
247 short size)
248 {
249 struct nvec_msg *msg;
250 unsigned long flags;
251
252 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
253
254 if (msg == NULL)
255 return -ENOMEM;
256
257 msg->data[0] = size;
258 memcpy(msg->data + 1, data, size);
259 msg->size = size + 1;
260
261 spin_lock_irqsave(&nvec->tx_lock, flags);
262 list_add_tail(&msg->node, &nvec->tx_data);
263 spin_unlock_irqrestore(&nvec->tx_lock, flags);
264
265 queue_work(nvec->wq, &nvec->tx_work);
266
267 return 0;
268 }
269 EXPORT_SYMBOL(nvec_write_async);
270
271 /**
272 * nvec_write_sync - Write a message to nvec and read the response
273 * @nvec: An &struct nvec_chip
274 * @data: The data to write
275 * @size: The size of @data
276 *
277 * This is similar to nvec_write_async(), but waits for the
278 * request to be answered before returning. This function
279 * uses a mutex and can thus not be called from e.g.
280 * interrupt handlers.
281 *
282 * Returns: A pointer to the response message on success,
283 * %NULL on failure. Free with nvec_msg_free() once no longer
284 * used.
285 */
nvec_write_sync(struct nvec_chip * nvec,const unsigned char * data,short size)286 struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
287 const unsigned char *data, short size)
288 {
289 struct nvec_msg *msg;
290
291 mutex_lock(&nvec->sync_write_mutex);
292
293 nvec->sync_write_pending = (data[1] << 8) + data[0];
294
295 if (nvec_write_async(nvec, data, size) < 0)
296 return NULL;
297
298 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
299 nvec->sync_write_pending);
300 if (!(wait_for_completion_timeout(&nvec->sync_write,
301 msecs_to_jiffies(2000)))) {
302 dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
303 mutex_unlock(&nvec->sync_write_mutex);
304 return NULL;
305 }
306
307 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
308
309 msg = nvec->last_sync_msg;
310
311 mutex_unlock(&nvec->sync_write_mutex);
312
313 return msg;
314 }
315 EXPORT_SYMBOL(nvec_write_sync);
316
317 /**
318 * nvec_request_master - Process outgoing messages
319 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
320 *
321 * Processes all outgoing requests by sending the request and awaiting the
322 * response, then continuing with the next request. Once a request has a
323 * matching response, it will be freed and removed from the list.
324 */
nvec_request_master(struct work_struct * work)325 static void nvec_request_master(struct work_struct *work)
326 {
327 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
328 unsigned long flags;
329 long err;
330 struct nvec_msg *msg;
331
332 spin_lock_irqsave(&nvec->tx_lock, flags);
333 while (!list_empty(&nvec->tx_data)) {
334 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
335 spin_unlock_irqrestore(&nvec->tx_lock, flags);
336 nvec_gpio_set_value(nvec, 0);
337 err = wait_for_completion_interruptible_timeout(
338 &nvec->ec_transfer, msecs_to_jiffies(5000));
339
340 if (err == 0) {
341 dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
342 nvec_gpio_set_value(nvec, 1);
343 msg->pos = 0;
344 }
345
346 spin_lock_irqsave(&nvec->tx_lock, flags);
347
348 if (err > 0) {
349 list_del_init(&msg->node);
350 nvec_msg_free(nvec, msg);
351 }
352 }
353 spin_unlock_irqrestore(&nvec->tx_lock, flags);
354 }
355
356 /**
357 * parse_msg - Print some information and call the notifiers on an RX message
358 * @nvec: A &struct nvec_chip
359 * @msg: A message received by @nvec
360 *
361 * Paarse some pieces of the message and then call the chain of notifiers
362 * registered via nvec_register_notifier.
363 */
parse_msg(struct nvec_chip * nvec,struct nvec_msg * msg)364 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
365 {
366 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
367 dev_err(nvec->dev, "ec responded %02x %02x %02x %02x\n",
368 msg->data[0], msg->data[1], msg->data[2], msg->data[3]);
369 return -EINVAL;
370 }
371
372 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
373 print_hex_dump(KERN_WARNING, "ec system event ",
374 DUMP_PREFIX_NONE, 16, 1, msg->data,
375 msg->data[1] + 2, true);
376
377 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
378 msg->data);
379
380 return 0;
381 }
382
383 /**
384 * nvec_dispatch - Process messages received from the EC
385 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
386 *
387 * Process messages previously received from the EC and put into the RX
388 * queue of the &struct nvec_chip instance associated with @work.
389 */
nvec_dispatch(struct work_struct * work)390 static void nvec_dispatch(struct work_struct *work)
391 {
392 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
393 unsigned long flags;
394 struct nvec_msg *msg;
395
396 spin_lock_irqsave(&nvec->rx_lock, flags);
397 while (!list_empty(&nvec->rx_data)) {
398 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
399 list_del_init(&msg->node);
400 spin_unlock_irqrestore(&nvec->rx_lock, flags);
401
402 if (nvec->sync_write_pending ==
403 (msg->data[2] << 8) + msg->data[0]) {
404 dev_dbg(nvec->dev, "sync write completed!\n");
405 nvec->sync_write_pending = 0;
406 nvec->last_sync_msg = msg;
407 complete(&nvec->sync_write);
408 } else {
409 parse_msg(nvec, msg);
410 nvec_msg_free(nvec, msg);
411 }
412 spin_lock_irqsave(&nvec->rx_lock, flags);
413 }
414 spin_unlock_irqrestore(&nvec->rx_lock, flags);
415 }
416
417 /**
418 * nvec_tx_completed - Complete the current transfer
419 * @nvec: A &struct nvec_chip
420 *
421 * This is called when we have received an END_TRANS on a TX transfer.
422 */
nvec_tx_completed(struct nvec_chip * nvec)423 static void nvec_tx_completed(struct nvec_chip *nvec)
424 {
425 /* We got an END_TRANS, let's skip this, maybe there's an event */
426 if (nvec->tx->pos != nvec->tx->size) {
427 dev_err(nvec->dev, "premature END_TRANS, resending\n");
428 nvec->tx->pos = 0;
429 nvec_gpio_set_value(nvec, 0);
430 } else {
431 nvec->state = 0;
432 }
433 }
434
435 /**
436 * nvec_rx_completed - Complete the current transfer
437 * @nvec: A &struct nvec_chip
438 *
439 * This is called when we have received an END_TRANS on a RX transfer.
440 */
nvec_rx_completed(struct nvec_chip * nvec)441 static void nvec_rx_completed(struct nvec_chip *nvec)
442 {
443 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
444 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
445 (uint) nvec_msg_size(nvec->rx),
446 (uint) nvec->rx->pos);
447
448 nvec_msg_free(nvec, nvec->rx);
449 nvec->state = 0;
450
451 /* Battery quirk - Often incomplete, and likes to crash */
452 if (nvec->rx->data[0] == NVEC_BAT)
453 complete(&nvec->ec_transfer);
454
455 return;
456 }
457
458 spin_lock(&nvec->rx_lock);
459
460 /* add the received data to the work list
461 and move the ring buffer pointer to the next entry */
462 list_add_tail(&nvec->rx->node, &nvec->rx_data);
463
464 spin_unlock(&nvec->rx_lock);
465
466 nvec->state = 0;
467
468 if (!nvec_msg_is_event(nvec->rx))
469 complete(&nvec->ec_transfer);
470
471 queue_work(nvec->wq, &nvec->rx_work);
472 }
473
474 /**
475 * nvec_invalid_flags - Send an error message about invalid flags and jump
476 * @nvec: The nvec device
477 * @status: The status flags
478 * @reset: Whether we shall jump to state 0.
479 */
nvec_invalid_flags(struct nvec_chip * nvec,unsigned int status,bool reset)480 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
481 bool reset)
482 {
483 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
484 status, nvec->state);
485 if (reset)
486 nvec->state = 0;
487 }
488
489 /**
490 * nvec_tx_set - Set the message to transfer (nvec->tx)
491 * @nvec: A &struct nvec_chip
492 *
493 * Gets the first entry from the tx_data list of @nvec and sets the
494 * tx member to it. If the tx_data list is empty, this uses the
495 * tx_scratch message to send a no operation message.
496 */
nvec_tx_set(struct nvec_chip * nvec)497 static void nvec_tx_set(struct nvec_chip *nvec)
498 {
499 spin_lock(&nvec->tx_lock);
500 if (list_empty(&nvec->tx_data)) {
501 dev_err(nvec->dev, "empty tx - sending no-op\n");
502 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
503 nvec->tx_scratch.size = 3;
504 nvec->tx_scratch.pos = 0;
505 nvec->tx = &nvec->tx_scratch;
506 list_add_tail(&nvec->tx->node, &nvec->tx_data);
507 } else {
508 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
509 node);
510 nvec->tx->pos = 0;
511 }
512 spin_unlock(&nvec->tx_lock);
513
514 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
515 (uint)nvec->tx->size, nvec->tx->data[1]);
516 }
517
518 /**
519 * nvec_interrupt - Interrupt handler
520 * @irq: The IRQ
521 * @dev: The nvec device
522 *
523 * Interrupt handler that fills our RX buffers and empties our TX
524 * buffers. This uses a finite state machine with ridiculous amounts
525 * of error checking, in order to be fairly reliable.
526 */
nvec_interrupt(int irq,void * dev)527 static irqreturn_t nvec_interrupt(int irq, void *dev)
528 {
529 unsigned long status;
530 unsigned int received = 0;
531 unsigned char to_send = 0xff;
532 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
533 struct nvec_chip *nvec = dev;
534 unsigned int state = nvec->state;
535
536 status = readl(nvec->base + I2C_SL_STATUS);
537
538 /* Filter out some errors */
539 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
540 dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
541 return IRQ_HANDLED;
542 }
543 if ((status & I2C_SL_IRQ) == 0) {
544 dev_err(nvec->dev, "Spurious IRQ\n");
545 return IRQ_HANDLED;
546 }
547
548 /* The EC did not request a read, so it send us something, read it */
549 if ((status & RNW) == 0) {
550 received = readl(nvec->base + I2C_SL_RCVD);
551 if (status & RCVD)
552 writel(0, nvec->base + I2C_SL_RCVD);
553 }
554
555 if (status == (I2C_SL_IRQ | RCVD))
556 nvec->state = 0;
557
558 switch (nvec->state) {
559 case 0: /* Verify that its a transfer start, the rest later */
560 if (status != (I2C_SL_IRQ | RCVD))
561 nvec_invalid_flags(nvec, status, false);
562 break;
563 case 1: /* command byte */
564 if (status != I2C_SL_IRQ) {
565 nvec_invalid_flags(nvec, status, true);
566 } else {
567 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
568 /* Should not happen in a normal world */
569 if (unlikely(nvec->rx == NULL)) {
570 nvec->state = 0;
571 break;
572 }
573 nvec->rx->data[0] = received;
574 nvec->rx->pos = 1;
575 nvec->state = 2;
576 }
577 break;
578 case 2: /* first byte after command */
579 if (status == (I2C_SL_IRQ | RNW | RCVD)) {
580 udelay(33);
581 if (nvec->rx->data[0] != 0x01) {
582 dev_err(nvec->dev,
583 "Read without prior read command\n");
584 nvec->state = 0;
585 break;
586 }
587 nvec_msg_free(nvec, nvec->rx);
588 nvec->state = 3;
589 nvec_tx_set(nvec);
590 BUG_ON(nvec->tx->size < 1);
591 to_send = nvec->tx->data[0];
592 nvec->tx->pos = 1;
593 } else if (status == (I2C_SL_IRQ)) {
594 BUG_ON(nvec->rx == NULL);
595 nvec->rx->data[1] = received;
596 nvec->rx->pos = 2;
597 nvec->state = 4;
598 } else {
599 nvec_invalid_flags(nvec, status, true);
600 }
601 break;
602 case 3: /* EC does a block read, we transmit data */
603 if (status & END_TRANS) {
604 nvec_tx_completed(nvec);
605 } else if ((status & RNW) == 0 || (status & RCVD)) {
606 nvec_invalid_flags(nvec, status, true);
607 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
608 to_send = nvec->tx->data[nvec->tx->pos++];
609 } else {
610 dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
611 nvec->tx,
612 (uint) (nvec->tx ? nvec->tx->pos : 0),
613 (uint) (nvec->tx ? nvec->tx->size : 0));
614 nvec->state = 0;
615 }
616 break;
617 case 4: /* EC does some write, we read the data */
618 if ((status & (END_TRANS | RNW)) == END_TRANS)
619 nvec_rx_completed(nvec);
620 else if (status & (RNW | RCVD))
621 nvec_invalid_flags(nvec, status, true);
622 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
623 nvec->rx->data[nvec->rx->pos++] = received;
624 else
625 dev_err(nvec->dev,
626 "RX buffer overflow on %p: "
627 "Trying to write byte %u of %u\n",
628 nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
629 break;
630 default:
631 nvec->state = 0;
632 }
633
634 /* If we are told that a new transfer starts, verify it */
635 if ((status & (RCVD | RNW)) == RCVD) {
636 if (received != nvec->i2c_addr)
637 dev_err(nvec->dev,
638 "received address 0x%02x, expected 0x%02x\n",
639 received, nvec->i2c_addr);
640 nvec->state = 1;
641 }
642
643 /* Send data if requested, but not on end of transmission */
644 if ((status & (RNW | END_TRANS)) == RNW)
645 writel(to_send, nvec->base + I2C_SL_RCVD);
646
647 /* If we have send the first byte */
648 if (status == (I2C_SL_IRQ | RNW | RCVD))
649 nvec_gpio_set_value(nvec, 1);
650
651 dev_dbg(nvec->dev,
652 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
653 (status & RNW) == 0 ? "received" : "R=",
654 received,
655 (status & (RNW | END_TRANS)) ? "sent" : "S=",
656 to_send,
657 state,
658 status & END_TRANS ? " END_TRANS" : "",
659 status & RCVD ? " RCVD" : "",
660 status & RNW ? " RNW" : "");
661
662
663 /*
664 * TODO: A correct fix needs to be found for this.
665 *
666 * We experience less incomplete messages with this delay than without
667 * it, but we don't know why. Help is appreciated.
668 */
669 udelay(100);
670
671 return IRQ_HANDLED;
672 }
673
tegra_init_i2c_slave(struct nvec_chip * nvec)674 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
675 {
676 u32 val;
677
678 clk_enable(nvec->i2c_clk);
679
680 tegra_periph_reset_assert(nvec->i2c_clk);
681 udelay(2);
682 tegra_periph_reset_deassert(nvec->i2c_clk);
683
684 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
685 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
686 writel(val, nvec->base + I2C_CNFG);
687
688 clk_set_rate(nvec->i2c_clk, 8 * 80000);
689
690 writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG);
691 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
692
693 writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
694 writel(0, nvec->base + I2C_SL_ADDR2);
695
696 enable_irq(nvec->irq);
697
698 clk_disable(nvec->i2c_clk);
699 }
700
nvec_disable_i2c_slave(struct nvec_chip * nvec)701 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
702 {
703 disable_irq(nvec->irq);
704 writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
705 clk_disable(nvec->i2c_clk);
706 }
707
nvec_power_off(void)708 static void nvec_power_off(void)
709 {
710 nvec_write_async(nvec_power_handle, EC_DISABLE_EVENT_REPORTING, 3);
711 nvec_write_async(nvec_power_handle, "\x04\x01", 2);
712 }
713
tegra_nvec_probe(struct platform_device * pdev)714 static int __devinit tegra_nvec_probe(struct platform_device *pdev)
715 {
716 int err, ret;
717 struct clk *i2c_clk;
718 struct nvec_platform_data *pdata = pdev->dev.platform_data;
719 struct nvec_chip *nvec;
720 struct nvec_msg *msg;
721 struct resource *res;
722 struct resource *iomem;
723 void __iomem *base;
724
725 nvec = kzalloc(sizeof(struct nvec_chip), GFP_KERNEL);
726 if (nvec == NULL) {
727 dev_err(&pdev->dev, "failed to reserve memory\n");
728 return -ENOMEM;
729 }
730 platform_set_drvdata(pdev, nvec);
731 nvec->dev = &pdev->dev;
732
733 if (pdata) {
734 nvec->gpio = pdata->gpio;
735 nvec->i2c_addr = pdata->i2c_addr;
736 } else if (nvec->dev->of_node) {
737 nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
738 if (nvec->gpio < 0) {
739 dev_err(&pdev->dev, "no gpio specified");
740 goto failed;
741 }
742 if (of_property_read_u32(nvec->dev->of_node, "slave-addr", &nvec->i2c_addr)) {
743 dev_err(&pdev->dev, "no i2c address specified");
744 goto failed;
745 }
746 } else {
747 dev_err(&pdev->dev, "no platform data\n");
748 goto failed;
749 }
750
751 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
752 if (!res) {
753 dev_err(&pdev->dev, "no mem resource?\n");
754 return -ENODEV;
755 }
756
757 iomem = request_mem_region(res->start, resource_size(res), pdev->name);
758 if (!iomem) {
759 dev_err(&pdev->dev, "I2C region already claimed\n");
760 return -EBUSY;
761 }
762
763 base = ioremap(iomem->start, resource_size(iomem));
764 if (!base) {
765 dev_err(&pdev->dev, "Can't ioremap I2C region\n");
766 return -ENOMEM;
767 }
768
769 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
770 if (!res) {
771 dev_err(&pdev->dev, "no irq resource?\n");
772 ret = -ENODEV;
773 goto err_iounmap;
774 }
775
776 i2c_clk = clk_get_sys("tegra-i2c.2", NULL);
777 if (IS_ERR(i2c_clk)) {
778 dev_err(nvec->dev, "failed to get controller clock\n");
779 goto err_iounmap;
780 }
781
782 nvec->base = base;
783 nvec->irq = res->start;
784 nvec->i2c_clk = i2c_clk;
785 nvec->rx = &nvec->msg_pool[0];
786
787 /* Set the gpio to low when we've got something to say */
788 err = gpio_request(nvec->gpio, "nvec gpio");
789 if (err < 0)
790 dev_err(nvec->dev, "couldn't request gpio\n");
791
792 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
793
794 init_completion(&nvec->sync_write);
795 init_completion(&nvec->ec_transfer);
796 mutex_init(&nvec->sync_write_mutex);
797 spin_lock_init(&nvec->tx_lock);
798 spin_lock_init(&nvec->rx_lock);
799 INIT_LIST_HEAD(&nvec->rx_data);
800 INIT_LIST_HEAD(&nvec->tx_data);
801 INIT_WORK(&nvec->rx_work, nvec_dispatch);
802 INIT_WORK(&nvec->tx_work, nvec_request_master);
803 nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
804
805 err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
806 if (err) {
807 dev_err(nvec->dev, "couldn't request irq\n");
808 goto failed;
809 }
810 disable_irq(nvec->irq);
811
812 tegra_init_i2c_slave(nvec);
813
814 clk_enable(i2c_clk);
815
816 gpio_direction_output(nvec->gpio, 1);
817 gpio_set_value(nvec->gpio, 1);
818
819 /* enable event reporting */
820 nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
821 sizeof(EC_ENABLE_EVENT_REPORTING));
822
823 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
824 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
825
826 nvec_power_handle = nvec;
827 pm_power_off = nvec_power_off;
828
829 /* Get Firmware Version */
830 msg = nvec_write_sync(nvec, EC_GET_FIRMWARE_VERSION,
831 sizeof(EC_GET_FIRMWARE_VERSION));
832
833 if (msg) {
834 dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
835 msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
836
837 nvec_msg_free(nvec, msg);
838 }
839
840 ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
841 ARRAY_SIZE(nvec_devices), base, 0);
842 if (ret)
843 dev_err(nvec->dev, "error adding subdevices\n");
844
845 /* unmute speakers? */
846 nvec_write_async(nvec, "\x0d\x10\x59\x95", 4);
847
848 /* enable lid switch event */
849 nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x02\x00", 7);
850
851 /* enable power button event */
852 nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x80\x00", 7);
853
854 return 0;
855
856 err_iounmap:
857 iounmap(base);
858 failed:
859 kfree(nvec);
860 return -ENOMEM;
861 }
862
tegra_nvec_remove(struct platform_device * pdev)863 static int __devexit tegra_nvec_remove(struct platform_device *pdev)
864 {
865 struct nvec_chip *nvec = platform_get_drvdata(pdev);
866
867 nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
868 mfd_remove_devices(nvec->dev);
869 free_irq(nvec->irq, &nvec_interrupt);
870 iounmap(nvec->base);
871 gpio_free(nvec->gpio);
872 destroy_workqueue(nvec->wq);
873 kfree(nvec);
874
875 return 0;
876 }
877
878 #ifdef CONFIG_PM
879
tegra_nvec_suspend(struct platform_device * pdev,pm_message_t state)880 static int tegra_nvec_suspend(struct platform_device *pdev, pm_message_t state)
881 {
882 struct nvec_chip *nvec = platform_get_drvdata(pdev);
883 struct nvec_msg *msg;
884
885 dev_dbg(nvec->dev, "suspending\n");
886
887 /* keep these sync or you'll break suspend */
888 msg = nvec_write_sync(nvec, EC_DISABLE_EVENT_REPORTING, 3);
889 nvec_msg_free(nvec, msg);
890 msg = nvec_write_sync(nvec, "\x04\x02", 2);
891 nvec_msg_free(nvec, msg);
892
893 nvec_disable_i2c_slave(nvec);
894
895 return 0;
896 }
897
tegra_nvec_resume(struct platform_device * pdev)898 static int tegra_nvec_resume(struct platform_device *pdev)
899 {
900 struct nvec_chip *nvec = platform_get_drvdata(pdev);
901
902 dev_dbg(nvec->dev, "resuming\n");
903 tegra_init_i2c_slave(nvec);
904 nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING, 3);
905
906 return 0;
907 }
908
909 #else
910 #define tegra_nvec_suspend NULL
911 #define tegra_nvec_resume NULL
912 #endif
913
914 /* Match table for of_platform binding */
915 static const struct of_device_id nvidia_nvec_of_match[] __devinitconst = {
916 { .compatible = "nvidia,nvec", },
917 {},
918 };
919 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
920
921 static struct platform_driver nvec_device_driver = {
922 .probe = tegra_nvec_probe,
923 .remove = __devexit_p(tegra_nvec_remove),
924 .suspend = tegra_nvec_suspend,
925 .resume = tegra_nvec_resume,
926 .driver = {
927 .name = "nvec",
928 .owner = THIS_MODULE,
929 .of_match_table = nvidia_nvec_of_match,
930 }
931 };
932
tegra_nvec_init(void)933 static int __init tegra_nvec_init(void)
934 {
935 return platform_driver_register(&nvec_device_driver);
936 }
937
938 module_init(tegra_nvec_init);
939
940 MODULE_ALIAS("platform:nvec");
941 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
942 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
943 MODULE_LICENSE("GPL");
944