1 #include "libcflat.h" 2 #include "smp.h" 3 #include "asm/io.h" 4 #include "asm/page.h" 5 #include "vmalloc.h" 6 #ifndef USE_SERIAL 7 #define USE_SERIAL 8 #endif 9 10 static struct spinlock lock; 11 static int serial_iobase = 0x3f8; 12 static int serial_inited = 0; 13 14 static void serial_outb(char ch) 15 { 16 u8 lsr; 17 18 do { 19 lsr = inb(serial_iobase + 0x05); 20 } while (!(lsr & 0x20)); 21 22 outb(ch, serial_iobase + 0x00); 23 } 24 25 static void serial_put(char ch) 26 { 27 /* Force carriage return to be performed on \n */ 28 if (ch == '\n') 29 serial_outb('\r'); 30 serial_outb(ch); 31 } 32 33 static void serial_init(void) 34 { 35 u8 lcr; 36 37 /* set DLAB */ 38 lcr = inb(serial_iobase + 0x03); 39 lcr |= 0x80; 40 outb(lcr, serial_iobase + 0x03); 41 42 /* set baud rate to 115200 */ 43 outb(0x01, serial_iobase + 0x00); 44 outb(0x00, serial_iobase + 0x01); 45 46 /* clear DLAB */ 47 lcr = inb(serial_iobase + 0x03); 48 lcr &= ~0x80; 49 outb(lcr, serial_iobase + 0x03); 50 51 /* IER: disable interrupts */ 52 outb(0x00, serial_iobase + 0x01); 53 /* LCR: 8 bits, no parity, one stop bit */ 54 outb(0x03, serial_iobase + 0x03); 55 /* FCR: disable FIFO queues */ 56 outb(0x00, serial_iobase + 0x02); 57 /* MCR: RTS, DTR on */ 58 outb(0x03, serial_iobase + 0x04); 59 } 60 61 static void print_serial(const char *buf) 62 { 63 unsigned long len = strlen(buf); 64 #ifdef USE_SERIAL 65 unsigned long i; 66 if (!serial_inited) { 67 serial_init(); 68 serial_inited = 1; 69 } 70 71 for (i = 0; i < len; i++) { 72 serial_put(buf[i]); 73 } 74 #else 75 asm volatile ("rep/outsb" : "+S"(buf), "+c"(len) : "d"(0xf1)); 76 #endif 77 } 78 79 void puts(const char *s) 80 { 81 spin_lock(&lock); 82 print_serial(s); 83 spin_unlock(&lock); 84 } 85 86 void exit(int code) 87 { 88 #ifdef USE_SERIAL 89 static const char shutdown_str[8] = "Shutdown"; 90 int i; 91 92 /* test device exit (with status) */ 93 outl(code, 0xf4); 94 95 /* if that failed, try the Bochs poweroff port */ 96 for (i = 0; i < 8; i++) { 97 outb(shutdown_str[i], 0x8900); 98 } 99 #else 100 asm volatile("out %0, %1" : : "a"(code), "d"((short)0xf4)); 101 #endif 102 103 /* Fallback */ 104 while (1) { 105 asm volatile("hlt" ::: "memory"); 106 } 107 } 108 109 void __iomem *ioremap(phys_addr_t phys_addr, size_t size) 110 { 111 phys_addr_t base = phys_addr & PAGE_MASK; 112 phys_addr_t offset = phys_addr - base; 113 114 /* 115 * The kernel sets PTEs for an ioremap() with page cache disabled, 116 * but we do not do that right now. It would make sense that I/O 117 * mappings would be uncached - and may help us find bugs when we 118 * properly map that way. 119 */ 120 return vmap(phys_addr, size) + offset; 121 } 122