sirfsoc_uart.c 49 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Driver for CSR SiRFprimaII onboard UARTs.
 *
 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
 *
 * Licensed under GPLv2 or later.
 */

#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/sysrq.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/io.h>
23
#include <linux/of_gpio.h>
24 25 26
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
27 28 29 30 31 32 33 34 35 36 37
#include <asm/irq.h>
#include <asm/mach/irq.h>

#include "sirfsoc_uart.h"

static unsigned int
sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
static unsigned int
sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
static struct uart_driver sirfsoc_uart_drv;

38 39 40
static void sirfsoc_uart_tx_dma_complete_callback(void *param);
static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
static void sirfsoc_uart_rx_dma_complete_callback(void *param);
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
	{4000000, 2359296},
	{3500000, 1310721},
	{3000000, 1572865},
	{2500000, 1245186},
	{2000000, 1572866},
	{1500000, 1245188},
	{1152000, 1638404},
	{1000000, 1572869},
	{921600, 1114120},
	{576000, 1245196},
	{500000, 1245198},
	{460800, 1572876},
	{230400, 1310750},
	{115200, 1310781},
	{57600, 1310843},
	{38400, 1114328},
	{19200, 1114545},
	{9600, 1114979},
};

62
static struct sirfsoc_uart_port *sirf_ports[SIRFSOC_UART_NR];
63 64 65 66 67 68 69 70 71

static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
{
	return container_of(port, struct sirfsoc_uart_port, port);
}

static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
{
	unsigned long reg;
72 73 74 75
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
	reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
76
	return (reg & ufifo_st->ff_empty(port)) ? TIOCSER_TEMT : 0;
77 78 79 80 81
}

static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
82
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
83
	if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
84
		goto cts_asserted;
85
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
86 87
		if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
						SIRFUART_AFC_CTS_STATUS))
88 89 90
			goto cts_asserted;
		else
			goto cts_deasserted;
91 92 93 94 95
	} else {
		if (!gpio_get_value(sirfport->cts_gpio))
			goto cts_asserted;
		else
			goto cts_deasserted;
96 97 98 99 100 101 102 103 104 105
	}
cts_deasserted:
	return TIOCM_CAR | TIOCM_DSR;
cts_asserted:
	return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
}

static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
106
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
107 108 109
	unsigned int assert = mctrl & TIOCM_RTS;
	unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
	unsigned int current_val;
110 111 112 113

	if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
		return;
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
114
		current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
115
		val |= current_val;
116
		wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
117 118 119 120 121
	} else {
		if (!val)
			gpio_set_value(sirfport->rts_gpio, 1);
		else
			gpio_set_value(sirfport->rts_gpio, 0);
122 123 124 125 126
	}
}

static void sirfsoc_uart_stop_tx(struct uart_port *port)
{
127
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
128 129
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
130

131
	if (sirfport->tx_dma_chan) {
132 133 134 135
		if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
			dmaengine_pause(sirfport->tx_dma_chan);
			sirfport->tx_dma_state = TX_DMA_PAUSE;
		} else {
136
			if (!sirfport->is_atlas7)
137 138 139 140
				wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) &
				~uint_en->sirfsoc_txfifo_empty_en);
			else
141
				wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
142 143 144
				uint_en->sirfsoc_txfifo_empty_en);
		}
	} else {
145 146 147
		if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
			wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port,
				ureg->sirfsoc_tx_rx_en) & ~SIRFUART_TX_EN);
148
		if (!sirfport->is_atlas7)
149 150 151 152
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) &
				~uint_en->sirfsoc_txfifo_empty_en);
		else
153
			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
				uint_en->sirfsoc_txfifo_empty_en);
	}
}

static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
{
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct circ_buf *xmit = &port->state->xmit;
	unsigned long tran_size;
	unsigned long tran_start;
	unsigned long pio_tx_size;

	tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
	tran_start = (unsigned long)(xmit->buf + xmit->tail);
	if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
			!tran_size)
		return;
	if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
		dmaengine_resume(sirfport->tx_dma_chan);
		return;
	}
	if (sirfport->tx_dma_state == TX_DMA_RUNNING)
		return;
179
	if (!sirfport->is_atlas7)
180
		wr_regl(port, ureg->sirfsoc_int_en_reg,
181 182 183
				rd_regl(port, ureg->sirfsoc_int_en_reg)&
				~(uint_en->sirfsoc_txfifo_empty_en));
	else
184
		wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
185
				uint_en->sirfsoc_txfifo_empty_en);
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	/*
	 * DMA requires buffer address and buffer length are both aligned with
	 * 4 bytes, so we use PIO for
	 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
	 * bytes, and move to DMA for the left part aligned with 4bytes
	 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
	 * part first, move to PIO for the left 1~3 bytes
	 */
	if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
			SIRFUART_IO_MODE);
		if (BYTES_TO_ALIGN(tran_start)) {
			pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
				BYTES_TO_ALIGN(tran_start));
			tran_size -= pio_tx_size;
		}
		if (tran_size < 4)
			sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
206
		if (!sirfport->is_atlas7)
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)|
				uint_en->sirfsoc_txfifo_empty_en);
		else
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				uint_en->sirfsoc_txfifo_empty_en);
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
	} else {
		/* tx transfer mode switch into dma mode */
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
			~SIRFUART_IO_MODE);
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
		tran_size &= ~(0x3);

		sirfport->tx_dma_addr = dma_map_single(port->dev,
			xmit->buf + xmit->tail,
			tran_size, DMA_TO_DEVICE);
		sirfport->tx_dma_desc = dmaengine_prep_slave_single(
			sirfport->tx_dma_chan, sirfport->tx_dma_addr,
			tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
		if (!sirfport->tx_dma_desc) {
			dev_err(port->dev, "DMA prep slave single fail\n");
			return;
		}
		sirfport->tx_dma_desc->callback =
			sirfsoc_uart_tx_dma_complete_callback;
		sirfport->tx_dma_desc->callback_param = (void *)sirfport;
		sirfport->transfer_size = tran_size;

		dmaengine_submit(sirfport->tx_dma_desc);
		dma_async_issue_pending(sirfport->tx_dma_chan);
		sirfport->tx_dma_state = TX_DMA_RUNNING;
	}
242 243
}

244
static void sirfsoc_uart_start_tx(struct uart_port *port)
245 246
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
247 248
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
249
	if (sirfport->tx_dma_chan)
250 251
		sirfsoc_uart_tx_with_dma(sirfport);
	else {
252 253 254
		if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
			wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port,
				ureg->sirfsoc_tx_rx_en) | SIRFUART_TX_EN);
255
		sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize);
256
		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
257
		if (!sirfport->is_atlas7)
258 259 260 261 262 263 264
			wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)|
					uint_en->sirfsoc_txfifo_empty_en);
		else
			wr_regl(port, ureg->sirfsoc_int_en_reg,
					uint_en->sirfsoc_txfifo_empty_en);
	}
265 266 267 268
}

static void sirfsoc_uart_stop_rx(struct uart_port *port)
{
269
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
270 271
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
272

273
	wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
274
	if (sirfport->rx_dma_chan) {
275
		if (!sirfport->is_atlas7)
276 277
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) &
278 279
				~(SIRFUART_RX_DMA_INT_EN(uint_en,
				sirfport->uart_reg->uart_type) |
280 281
				uint_en->sirfsoc_rx_done_en));
		else
282 283 284 285
			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
				SIRFUART_RX_DMA_INT_EN(uint_en,
				sirfport->uart_reg->uart_type)|
				uint_en->sirfsoc_rx_done_en);
286 287
		dmaengine_terminate_all(sirfport->rx_dma_chan);
	} else {
288
		if (!sirfport->is_atlas7)
289 290
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)&
291 292
				~(SIRFUART_RX_IO_INT_EN(uint_en,
				sirfport->uart_reg->uart_type)));
293
		else
294 295 296
			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
				SIRFUART_RX_IO_INT_EN(uint_en,
				sirfport->uart_reg->uart_type));
297
	}
298 299 300 301 302
}

static void sirfsoc_uart_disable_ms(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
303 304
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
305

306 307
	if (!sirfport->hw_flow_ctrl)
		return;
308 309 310 311
	sirfport->ms_enabled = false;
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
		wr_regl(port, ureg->sirfsoc_afc_ctrl,
				rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
312
		if (!sirfport->is_atlas7)
313 314 315 316
			wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)&
					~uint_en->sirfsoc_cts_en);
		else
317
			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
318
					uint_en->sirfsoc_cts_en);
319
	} else
320 321 322 323 324 325 326
		disable_irq(gpio_to_irq(sirfport->cts_gpio));
}

static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
{
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
	struct uart_port *port = &sirfport->port;
327
	spin_lock(&port->lock);
328 329 330
	if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
		uart_handle_cts_change(port,
				!gpio_get_value(sirfport->cts_gpio));
331
	spin_unlock(&port->lock);
332
	return IRQ_HANDLED;
333 334 335 336 337
}

static void sirfsoc_uart_enable_ms(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
338 339
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
340

341 342
	if (!sirfport->hw_flow_ctrl)
		return;
343 344 345 346
	sirfport->ms_enabled = true;
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
		wr_regl(port, ureg->sirfsoc_afc_ctrl,
				rd_regl(port, ureg->sirfsoc_afc_ctrl) |
347 348
				SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN |
				SIRFUART_AFC_CTRL_RX_THD);
349
		if (!sirfport->is_atlas7)
350 351 352 353 354 355
			wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)
					| uint_en->sirfsoc_cts_en);
		else
			wr_regl(port, ureg->sirfsoc_int_en_reg,
					uint_en->sirfsoc_cts_en);
356
	} else
357
		enable_irq(gpio_to_irq(sirfport->cts_gpio));
358 359 360 361
}

static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
{
362 363 364 365 366 367 368 369 370 371
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
		unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
		if (break_state)
			ulcon |= SIRFUART_SET_BREAK;
		else
			ulcon &= ~SIRFUART_SET_BREAK;
		wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
	}
372 373 374 375 376
}

static unsigned int
sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
{
377 378 379
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
380
	unsigned int ch, rx_count = 0;
381 382 383 384 385
	struct tty_struct *tty;
	tty = tty_port_tty_get(&port->state->port);
	if (!tty)
		return -ENODEV;
	while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
386
					ufifo_st->ff_empty(port))) {
387 388
		ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
			SIRFUART_DUMMY_READ;
389 390 391 392 393 394 395 396
		if (unlikely(uart_handle_sysrq_char(port, ch)))
			continue;
		uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
		rx_count++;
		if (rx_count >= max_rx_count)
			break;
	}

397
	sirfport->rx_io_count += rx_count;
398
	port->icount.rx += rx_count;
399

400 401 402 403 404 405 406
	return rx_count;
}

static unsigned int
sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
{
	struct uart_port *port = &sirfport->port;
407 408
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
409 410 411
	struct circ_buf *xmit = &port->state->xmit;
	unsigned int num_tx = 0;
	while (!uart_circ_empty(xmit) &&
412
		!(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
413
					ufifo_st->ff_full(port)) &&
414
		count--) {
415 416
		wr_regl(port, ureg->sirfsoc_tx_fifo_data,
				xmit->buf[xmit->tail]);
417 418 419 420 421 422 423 424 425
		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		port->icount.tx++;
		num_tx++;
	}
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);
	return num_tx;
}

426 427 428 429 430 431 432
static void sirfsoc_uart_tx_dma_complete_callback(void *param)
{
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
	struct uart_port *port = &sirfport->port;
	struct circ_buf *xmit = &port->state->xmit;
	unsigned long flags;

433
	spin_lock_irqsave(&port->lock, flags);
434 435 436 437 438 439 440 441 442 443
	xmit->tail = (xmit->tail + sirfport->transfer_size) &
				(UART_XMIT_SIZE - 1);
	port->icount.tx += sirfport->transfer_size;
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);
	if (sirfport->tx_dma_addr)
		dma_unmap_single(port->dev, sirfport->tx_dma_addr,
				sirfport->transfer_size, DMA_TO_DEVICE);
	sirfport->tx_dma_state = TX_DMA_IDLE;
	sirfsoc_uart_tx_with_dma(sirfport);
444
	spin_unlock_irqrestore(&port->lock, flags);
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
}

static void sirfsoc_uart_insert_rx_buf_to_tty(
		struct sirfsoc_uart_port *sirfport, int count)
{
	struct uart_port *port = &sirfport->port;
	struct tty_port *tport = &port->state->port;
	int inserted;

	inserted = tty_insert_flip_string(tport,
		sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
	port->icount.rx += inserted;
}

static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);

	sirfport->rx_dma_items[index].xmit.tail =
		sirfport->rx_dma_items[index].xmit.head = 0;
	sirfport->rx_dma_items[index].desc =
		dmaengine_prep_slave_single(sirfport->rx_dma_chan,
		sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
		DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
469
	if (IS_ERR_OR_NULL(sirfport->rx_dma_items[index].desc)) {
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
		dev_err(port->dev, "DMA slave single fail\n");
		return;
	}
	sirfport->rx_dma_items[index].desc->callback =
		sirfsoc_uart_rx_dma_complete_callback;
	sirfport->rx_dma_items[index].desc->callback_param = sirfport;
	sirfport->rx_dma_items[index].cookie =
		dmaengine_submit(sirfport->rx_dma_items[index].desc);
	dma_async_issue_pending(sirfport->rx_dma_chan);
}

static void sirfsoc_rx_tmo_process_tl(unsigned long param)
{
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
	unsigned int count;
489
	struct dma_tx_state tx_state;
490
	unsigned long flags;
491
	int i = 0;
492

493
	spin_lock_irqsave(&port->lock, flags);
494
	while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
495 496
		sirfport->rx_dma_items[sirfport->rx_completed].cookie,
		&tx_state)) {
497 498
		sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
					SIRFSOC_RX_DMA_BUF_SIZE);
499
		sirfport->rx_completed++;
500
		sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
501 502 503
		i++;
		if (i > SIRFSOC_RX_LOOP_BUF_CNT)
			break;
504 505 506 507 508 509 510 511 512
	}
	count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
		sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
		SIRFSOC_RX_DMA_BUF_SIZE);
	if (count > 0)
		sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
			SIRFUART_IO_MODE);
513
	sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
514 515 516 517
	if (sirfport->rx_io_count == 4) {
		sirfport->rx_io_count = 0;
		wr_regl(port, ureg->sirfsoc_int_st_reg,
				uint_st->sirfsoc_rx_done);
518
		if (!sirfport->is_atlas7)
519 520 521 522
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) &
				~(uint_en->sirfsoc_rx_done_en));
		else
523
			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
524 525 526 527 528
					uint_en->sirfsoc_rx_done_en);
		sirfsoc_uart_start_next_rx_dma(port);
	} else {
		wr_regl(port, ureg->sirfsoc_int_st_reg,
				uint_st->sirfsoc_rx_done);
529
		if (!sirfport->is_atlas7)
530 531 532 533 534 535 536
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) |
				(uint_en->sirfsoc_rx_done_en));
		else
			wr_regl(port, ureg->sirfsoc_int_en_reg,
					uint_en->sirfsoc_rx_done_en);
	}
537 538
	spin_unlock_irqrestore(&port->lock, flags);
	tty_flip_buffer_push(&port->state->port);
539 540 541 542 543 544 545 546 547 548 549 550 551
}

static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
{
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct dma_tx_state tx_state;
	dmaengine_tx_status(sirfport->rx_dma_chan,
		sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
	dmaengine_terminate_all(sirfport->rx_dma_chan);
	sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
		SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
552
	if (!sirfport->is_atlas7)
553 554 555 556
		wr_regl(port, ureg->sirfsoc_int_en_reg,
			rd_regl(port, ureg->sirfsoc_int_en_reg) &
			~(uint_en->sirfsoc_rx_timeout_en));
	else
557
		wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
558 559 560 561 562 563 564 565 566 567 568 569 570 571
				uint_en->sirfsoc_rx_timeout_en);
	tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
}

static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
{
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;

	sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
	if (sirfport->rx_io_count == 4) {
		sirfport->rx_io_count = 0;
572
		if (!sirfport->is_atlas7)
573 574 575 576
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) &
				~(uint_en->sirfsoc_rx_done_en));
		else
577
			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
578 579 580 581 582 583 584
					uint_en->sirfsoc_rx_done_en);
		wr_regl(port, ureg->sirfsoc_int_st_reg,
				uint_st->sirfsoc_rx_timeout);
		sirfsoc_uart_start_next_rx_dma(port);
	}
}

585 586 587 588 589 590 591
static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
{
	unsigned long intr_status;
	unsigned long cts_status;
	unsigned long flag = TTY_NORMAL;
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
	struct uart_port *port = &sirfport->port;
592 593 594 595
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
	struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
596 597
	struct uart_state *state = port->state;
	struct circ_buf *xmit = &port->state->xmit;
598
	spin_lock(&port->lock);
599 600
	intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
	wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
601
	intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
602 603
	if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(uint_st,
				sirfport->uart_reg->uart_type)))) {
604 605
		if (intr_status & uint_st->sirfsoc_rxd_brk) {
			port->icount.brk++;
606 607 608
			if (uart_handle_break(port))
				goto recv_char;
		}
609
		if (intr_status & uint_st->sirfsoc_rx_oflow) {
610
			port->icount.overrun++;
611 612
			flag = TTY_OVERRUN;
		}
613
		if (intr_status & uint_st->sirfsoc_frm_err) {
614 615 616
			port->icount.frame++;
			flag = TTY_FRAME;
		}
617 618
		if (intr_status & uint_st->sirfsoc_parity_err) {
			port->icount.parity++;
619
			flag = TTY_PARITY;
620
		}
621 622 623
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
624 625
		intr_status &= port->read_status_mask;
		uart_insert_char(port, intr_status,
626
					uint_en->sirfsoc_rx_oflow_en, 0, flag);
627 628
	}
recv_char:
629
	if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
630 631
			(intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
			!sirfport->tx_dma_state) {
632 633 634 635 636 637 638 639
		cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
					SIRFUART_AFC_CTS_STATUS;
		if (cts_status != 0)
			cts_status = 0;
		else
			cts_status = 1;
		uart_handle_cts_change(port, cts_status);
		wake_up_interruptible(&state->port.delta_msr_wait);
640
	}
641
	if (sirfport->rx_dma_chan) {
642 643 644 645
		if (intr_status & uint_st->sirfsoc_rx_timeout)
			sirfsoc_uart_handle_rx_tmo(sirfport);
		if (intr_status & uint_st->sirfsoc_rx_done)
			sirfsoc_uart_handle_rx_done(sirfport);
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
	} else if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st)) {
		/*
		 * chip will trigger continuous RX_TIMEOUT interrupt
		 * in RXFIFO empty and not trigger if RXFIFO recevice
		 * data in limit time, original method use RX_TIMEOUT
		 * will trigger lots of useless interrupt in RXFIFO
		 * empty.RXFIFO received one byte will trigger RX_DONE
		 * interrupt.use RX_DONE to wait for data received
		 * into RXFIFO, use RX_THD/RX_FULL for lots data receive
		 * and use RX_TIMEOUT for the last left data.
		 */
		if (intr_status & uint_st->sirfsoc_rx_done) {
			if (!sirfport->is_atlas7) {
				wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)
					& ~(uint_en->sirfsoc_rx_done_en));
				wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)
				| (uint_en->sirfsoc_rx_timeout_en));
			} else {
				wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
					uint_en->sirfsoc_rx_done_en);
				wr_regl(port, ureg->sirfsoc_int_en_reg,
					uint_en->sirfsoc_rx_timeout_en);
			}
		} else {
			if (intr_status & uint_st->sirfsoc_rx_timeout) {
				if (!sirfport->is_atlas7) {
					wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)
					& ~(uint_en->sirfsoc_rx_timeout_en));
					wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)
					| (uint_en->sirfsoc_rx_done_en));
				} else {
					wr_regl(port,
						ureg->sirfsoc_int_en_clr_reg,
						uint_en->sirfsoc_rx_timeout_en);
					wr_regl(port, ureg->sirfsoc_int_en_reg,
						uint_en->sirfsoc_rx_done_en);
				}
			}
688
			sirfsoc_uart_pio_rx_chars(port, port->fifosize);
689
		}
690
	}
691 692 693
	spin_unlock(&port->lock);
	tty_flip_buffer_push(&state->port);
	spin_lock(&port->lock);
694
	if (intr_status & uint_st->sirfsoc_txfifo_empty) {
695
		if (sirfport->tx_dma_chan)
696 697 698 699 700 701 702
			sirfsoc_uart_tx_with_dma(sirfport);
		else {
			if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
				spin_unlock(&port->lock);
				return IRQ_HANDLED;
			} else {
				sirfsoc_uart_pio_tx_chars(sirfport,
703
						port->fifosize);
704
				if ((uart_circ_empty(xmit)) &&
705
				(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
706
				ufifo_st->ff_empty(port)))
707 708
					sirfsoc_uart_stop_tx(port);
			}
709 710
		}
	}
711
	spin_unlock(&port->lock);
712

713 714 715
	return IRQ_HANDLED;
}

716 717 718 719
static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
{
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
	struct uart_port *port = &sirfport->port;
720 721
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
722
	struct dma_tx_state tx_state;
723
	unsigned long flags;
724 725
	int i = 0;

726
	spin_lock_irqsave(&port->lock, flags);
727
	while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
728 729
		sirfport->rx_dma_items[sirfport->rx_completed].cookie,
		&tx_state)) {
730 731
		sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
					SIRFSOC_RX_DMA_BUF_SIZE);
732 733 734 735 736 737
		if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
				uint_en->sirfsoc_rx_timeout_en)
			sirfsoc_rx_submit_one_dma_desc(port,
					sirfport->rx_completed++);
		else
			sirfport->rx_completed++;
738
		sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
739 740 741
		i++;
		if (i > SIRFSOC_RX_LOOP_BUF_CNT)
			break;
742
	}
743 744
	spin_unlock_irqrestore(&port->lock, flags);
	tty_flip_buffer_push(&port->state->port);
745 746 747 748 749
}

static void sirfsoc_uart_rx_dma_complete_callback(void *param)
{
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
750 751 752
	unsigned long flags;

	spin_lock_irqsave(&sirfport->port.lock, flags);
753 754 755
	sirfport->rx_issued++;
	sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
	tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
756
	spin_unlock_irqrestore(&sirfport->port.lock, flags);
757 758 759 760
}

/* submit rx dma task into dmaengine */
static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
761
{
762
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
763 764
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
765 766 767 768 769 770 771 772
	int i;
	sirfport->rx_io_count = 0;
	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
		~SIRFUART_IO_MODE);
	for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
		sirfsoc_rx_submit_one_dma_desc(port, i);
	sirfport->rx_completed = sirfport->rx_issued = 0;
773
	if (!sirfport->is_atlas7)
774
		wr_regl(port, ureg->sirfsoc_int_en_reg,
775
				rd_regl(port, ureg->sirfsoc_int_en_reg) |
776 777
				SIRFUART_RX_DMA_INT_EN(uint_en,
				sirfport->uart_reg->uart_type));
778 779
	else
		wr_regl(port, ureg->sirfsoc_int_en_reg,
780 781
				SIRFUART_RX_DMA_INT_EN(uint_en,
				sirfport->uart_reg->uart_type));
782 783 784 785 786 787 788 789 790
}

static void sirfsoc_uart_start_rx(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;

	sirfport->rx_io_count = 0;
791 792 793
	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
	wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
794
	if (sirfport->rx_dma_chan)
795 796
		sirfsoc_uart_start_next_rx_dma(port);
	else {
797
		if (!sirfport->is_atlas7)
798 799
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) |
800 801
				SIRFUART_RX_IO_INT_EN(uint_en,
					sirfport->uart_reg->uart_type));
802 803
		else
			wr_regl(port, ureg->sirfsoc_int_en_reg,
804 805
				SIRFUART_RX_IO_INT_EN(uint_en,
					sirfport->uart_reg->uart_type));
806
	}
807 808 809 810 811 812 813 814 815 816 817
}

static unsigned int
sirfsoc_usp_calc_sample_div(unsigned long set_rate,
		unsigned long ioclk_rate, unsigned long *sample_reg)
{
	unsigned long min_delta = ~0UL;
	unsigned short sample_div;
	unsigned long ioclk_div = 0;
	unsigned long temp_delta;

818
	for (sample_div = SIRF_USP_MIN_SAMPLE_DIV;
819 820 821 822
			sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
		temp_delta = ioclk_rate -
		(ioclk_rate + (set_rate * sample_div) / 2)
		/ (set_rate * sample_div) * set_rate * sample_div;
823

824 825 826 827 828 829 830 831 832 833 834 835 836
		temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
		if (temp_delta < min_delta) {
			ioclk_div = (2 * ioclk_rate /
				(set_rate * sample_div) + 1) / 2 - 1;
			if (ioclk_div > SIRF_IOCLK_DIV_MAX)
				continue;
			min_delta = temp_delta;
			*sample_reg = sample_div;
			if (!temp_delta)
				break;
		}
	}
	return ioclk_div;
837 838 839
}

static unsigned int
840 841
sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
			unsigned long ioclk_rate, unsigned long *set_baud)
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
{
	unsigned long min_delta = ~0UL;
	unsigned short sample_div;
	unsigned int regv = 0;
	unsigned long ioclk_div;
	unsigned long baud_tmp;
	int temp_delta;

	for (sample_div = SIRF_MIN_SAMPLE_DIV;
			sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
		ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
		if (ioclk_div > SIRF_IOCLK_DIV_MAX)
			continue;
		baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
		temp_delta = baud_tmp - baud_rate;
		temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
		if (temp_delta < min_delta) {
			regv = regv & (~SIRF_IOCLK_DIV_MASK);
			regv = regv | ioclk_div;
			regv = regv & (~SIRF_SAMPLE_DIV_MASK);
			regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
			min_delta = temp_delta;
864
			*set_baud = baud_tmp;
865 866 867 868 869 870 871 872 873 874
		}
	}
	return regv;
}

static void sirfsoc_uart_set_termios(struct uart_port *port,
				       struct ktermios *termios,
				       struct ktermios *old)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
875 876
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
877 878
	unsigned long	config_reg = 0;
	unsigned long	baud_rate;
879
	unsigned long	set_baud;
880 881 882
	unsigned long	flags;
	unsigned long	ic;
	unsigned int	clk_div_reg = 0;
883
	unsigned long	txfifo_op_reg, ioclk_rate;
884 885
	unsigned long	rx_time_out;
	int		threshold_div;
886 887 888
	u32		data_bit_len, stop_bit_len, len_val;
	unsigned long	sample_div_reg = 0xf;
	ioclk_rate	= port->uartclk;
889 890 891 892

	switch (termios->c_cflag & CSIZE) {
	default:
	case CS8:
893
		data_bit_len = 8;
894 895 896
		config_reg |= SIRFUART_DATA_BIT_LEN_8;
		break;
	case CS7:
897
		data_bit_len = 7;
898 899 900
		config_reg |= SIRFUART_DATA_BIT_LEN_7;
		break;
	case CS6:
901
		data_bit_len = 6;
902 903 904
		config_reg |= SIRFUART_DATA_BIT_LEN_6;
		break;
	case CS5:
905
		data_bit_len = 5;
906 907 908
		config_reg |= SIRFUART_DATA_BIT_LEN_5;
		break;
	}
909
	if (termios->c_cflag & CSTOPB) {
910
		config_reg |= SIRFUART_STOP_BIT_LEN_2;
911 912 913 914
		stop_bit_len = 2;
	} else
		stop_bit_len = 1;

915
	spin_lock_irqsave(&port->lock, flags);
916
	port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
917
	port->ignore_status_mask = 0;
918 919 920 921
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
		if (termios->c_iflag & INPCK)
			port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
				uint_en->sirfsoc_parity_err_en;
922
	} else {
923 924 925
		if (termios->c_iflag & INPCK)
			port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
	}
Peter Hurley's avatar
Peter Hurley committed
926
	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
927 928 929 930 931 932 933 934 935 936 937 938 939
			port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
		if (termios->c_iflag & IGNPAR)
			port->ignore_status_mask |=
				uint_en->sirfsoc_frm_err_en |
				uint_en->sirfsoc_parity_err_en;
		if (termios->c_cflag & PARENB) {
			if (termios->c_cflag & CMSPAR) {
				if (termios->c_cflag & PARODD)
					config_reg |= SIRFUART_STICK_BIT_MARK;
				else
					config_reg |= SIRFUART_STICK_BIT_SPACE;
			} else {
940 941 942 943
				if (termios->c_cflag & PARODD)
					config_reg |= SIRFUART_STICK_BIT_ODD;
				else
					config_reg |= SIRFUART_STICK_BIT_EVEN;
944 945
			}
		}
946
	} else {
947 948 949 950 951 952 953 954
		if (termios->c_iflag & IGNPAR)
			port->ignore_status_mask |=
				uint_en->sirfsoc_frm_err_en;
		if (termios->c_cflag & PARENB)
			dev_warn(port->dev,
					"USP-UART not support parity err\n");
	}
	if (termios->c_iflag & IGNBRK) {
955
		port->ignore_status_mask |=
956 957 958 959 960
			uint_en->sirfsoc_rxd_brk_en;
		if (termios->c_iflag & IGNPAR)
			port->ignore_status_mask |=
				uint_en->sirfsoc_rx_oflow_en;
	}
961 962 963 964 965 966 967 968 969 970
	if ((termios->c_cflag & CREAD) == 0)
		port->ignore_status_mask |= SIRFUART_DUMMY_READ;
	/* Hardware Flow Control Settings */
	if (UART_ENABLE_MS(port, termios->c_cflag)) {
		if (!sirfport->ms_enabled)
			sirfsoc_uart_enable_ms(port);
	} else {
		if (sirfport->ms_enabled)
			sirfsoc_uart_disable_ms(port);
	}
971 972
	baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
	if (ioclk_rate == 150000000) {
973 974 975 976
		for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
			if (baud_rate == baudrate_to_regv[ic].baud_rate)
				clk_div_reg = baudrate_to_regv[ic].reg_val;
	}
977 978 979 980 981 982
	set_baud = baud_rate;
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
		if (unlikely(clk_div_reg == 0))
			clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
					ioclk_rate, &set_baud);
		wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
983
	} else {
984 985 986 987 988 989
		clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
				ioclk_rate, &sample_div_reg);
		sample_div_reg--;
		set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
				(sample_div_reg + 1));
		/* setting usp mode 2 */
990 991 992 993 994
		len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
				(1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
		len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
				<< SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
		wr_regl(port, ureg->sirfsoc_mode2, len_val);
995
	}
996
	if (tty_termios_baud_rate(termios))
997 998 999 1000
		tty_termios_encode_baud_rate(termios, set_baud, set_baud);
	/* set receive timeout && data bits len */
	rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
	rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
1001
	txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
1002
	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
1003
	wr_regl(port, ureg->sirfsoc_tx_fifo_op,
1004
			(txfifo_op_reg & ~SIRFUART_FIFO_START));
1005
	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
1006
		config_reg |= SIRFUART_UART_RECV_TIMEOUT(rx_time_out);
1007
		wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
1008
	} else {
1009
		/*tx frame ctrl*/
1010 1011 1012 1013 1014 1015 1016
		len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
		len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
				SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
		len_val |= ((data_bit_len - 1) <<
				SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
		len_val |= (((clk_div_reg & 0xc00) >> 10) <<
				SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
1017 1018
		wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
		/*rx frame ctrl*/
1019 1020 1021 1022 1023 1024 1025
		len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
		len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
				SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
		len_val |= (data_bit_len - 1) <<
				SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
		len_val |= (((clk_div_reg & 0xf000) >> 12) <<
				SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
1026 1027 1028
		wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
		/*async param*/
		wr_regl(port, ureg->sirfsoc_async_param_reg,
1029
			(SIRFUART_USP_RECV_TIMEOUT(rx_time_out)) |
1030 1031
			(sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
			SIRFSOC_USP_ASYNC_DIV2_OFFSET);
1032
	}
1033
	if (sirfport->tx_dma_chan)
1034 1035 1036
		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
	else
		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
1037
	if (sirfport->rx_dma_chan)
1038 1039 1040
		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
	else
		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
1041
	/* Reset Rx/Tx FIFO Threshold level for proper baudrate */
1042
	if (set_baud < 1000000)
1043 1044 1045
		threshold_div = 1;
	else
		threshold_div = 2;
1046 1047 1048 1049 1050 1051
	wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
				SIRFUART_FIFO_THD(port) / threshold_div);
	wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
				SIRFUART_FIFO_THD(port) / threshold_div);
	txfifo_op_reg |= SIRFUART_FIFO_START;
	wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
1052
	uart_update_timeout(port, termios->c_cflag, set_baud);
1053
	sirfsoc_uart_start_rx(port);
1054
	wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
1055 1056 1057
	spin_unlock_irqrestore(&port->lock, flags);
}

1058 1059 1060 1061
static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
			      unsigned int oldstate)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1062
	if (!state)
1063
		clk_prepare_enable(sirfport->clk);
1064
	else
1065 1066 1067
		clk_disable_unprepare(sirfport->clk);
}

1068 1069 1070
static int sirfsoc_uart_startup(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport	= to_sirfport(port);
1071
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
	unsigned int index			= port->line;
	int ret;
	set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
	ret = request_irq(port->irq,
				sirfsoc_uart_isr,
				0,
				SIRFUART_PORT_NAME,
				sirfport);
	if (ret != 0) {
		dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
							index, port->irq);
		goto irq_err;
	}
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
	/* initial hardware settings */
	wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
		rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
		SIRFUART_IO_MODE);
	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
		SIRFUART_IO_MODE);
	wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
	wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
	wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
	if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
		wr_regl(port, ureg->sirfsoc_mode1,
			SIRFSOC_USP_ENDIAN_CTRL_LSBF |
			SIRFSOC_USP_EN);
	wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
	wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
	wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
	wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
	wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1105
	if (sirfport->rx_dma_chan)
1106
		wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
1107 1108 1109 1110
			SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
			SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
			SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
	if (sirfport->tx_dma_chan) {
1111 1112 1113 1114 1115 1116
		sirfport->tx_dma_state = TX_DMA_IDLE;
		wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
				SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
				SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
				SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
	}
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
	sirfport->ms_enabled = false;
	if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
		sirfport->hw_flow_ctrl) {
		set_irq_flags(gpio_to_irq(sirfport->cts_gpio),
			IRQF_VALID | IRQF_NOAUTOEN);
		ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
			sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
			IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
		if (ret != 0) {
			dev_err(port->dev, "UART-USP:request gpio irq fail\n");
			goto init_rx_err;
		}
	}

1131
	enable_irq(port->irq);
1132

1133
	return 0;
1134 1135
init_rx_err:
	free_irq(port->irq, sirfport);
1136 1137 1138 1139 1140 1141 1142
irq_err:
	return ret;
}

static void sirfsoc_uart_shutdown(struct uart_port *port)
{
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1143
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1144
	if (!sirfport->is_atlas7)
1145
		wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
1146
	else
1147
		wr_regl(port, ureg->sirfsoc_int_en_clr_reg, ~0UL);
1148

1149
	free_irq(port->irq, sirfport);
1150
	if (sirfport->ms_enabled)
1151
		sirfsoc_uart_disable_ms(port);
1152 1153 1154 1155
	if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
			sirfport->hw_flow_ctrl) {
		gpio_set_value(sirfport->rts_gpio, 1);
		free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
1156
	}
1157
	if (sirfport->tx_dma_chan)
1158
		sirfport->tx_dma_state = TX_DMA_IDLE;
1159 1160 1161 1162 1163 1164 1165 1166 1167
}

static const char *sirfsoc_uart_type(struct uart_port *port)
{
	return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
}

static int sirfsoc_uart_request_port(struct uart_port *port)
{
1168 1169
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
1170 1171
	void *ret;
	ret = request_mem_region(port->mapbase,
1172
		SIRFUART_MAP_SIZE, uart_param->port_name);
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	return ret ? 0 : -EBUSY;
}

static void sirfsoc_uart_release_port(struct uart_port *port)
{
	release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
}

static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
{
	if (flags & UART_CONFIG_TYPE) {
		port->type = SIRFSOC_PORT_TYPE;
		sirfsoc_uart_request_port(port);
	}
}

static struct uart_ops sirfsoc_uart_ops = {
	.tx_empty	= sirfsoc_uart_tx_empty,
	.get_mctrl	= sirfsoc_uart_get_mctrl,
	.set_mctrl	= sirfsoc_uart_set_mctrl,
	.stop_tx	= sirfsoc_uart_stop_tx,
	.start_tx	= sirfsoc_uart_start_tx,
	.stop_rx	= sirfsoc_uart_stop_rx,
	.enable_ms	= sirfsoc_uart_enable_ms,
	.break_ctl	= sirfsoc_uart_break_ctl,
	.startup	= sirfsoc_uart_startup,
	.shutdown	= sirfsoc_uart_shutdown,
	.set_termios	= sirfsoc_uart_set_termios,
1201
	.pm		= sirfsoc_uart_pm,
1202 1203 1204 1205 1206 1207 1208
	.type		= sirfsoc_uart_type,
	.release_port	= sirfsoc_uart_release_port,
	.request_port	= sirfsoc_uart_request_port,
	.config_port	= sirfsoc_uart_config_port,
};

#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1209 1210
static int __init
sirfsoc_uart_console_setup(struct console *co, char *options)
1211 1212 1213 1214 1215
{
	unsigned int baud = 115200;
	unsigned int bits = 8;
	unsigned int parity = 'n';
	unsigned int flow = 'n';
1216 1217
	struct sirfsoc_uart_port *sirfport;
	struct sirfsoc_register *ureg;
1218 1219
	if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
		return -EINVAL;
1220 1221 1222 1223 1224
	sirfport = sirf_ports[co->index];
	if (!sirfport)
		return -ENODEV;
	ureg = &sirfport->uart_reg->uart_reg;
	if (!sirfport->port.mapbase)
1225 1226
		return -ENODEV;

1227 1228
	/* enable usp in mode1 register */
	if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1229
		wr_regl(&sirfport->port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
1230
				SIRFSOC_USP_ENDIAN_CTRL_LSBF);
1231 1232
	if (options)
		uart_parse_options(options, &baud, &parity, &bits, &flow);
1233
	sirfport->port.cons = co;
1234

1235
	/* default console tx/rx transfer using io mode */
1236 1237
	sirfport->rx_dma_chan = NULL;
	sirfport->tx_dma_chan = NULL;
1238
	return uart_set_options(&sirfport->port, co, baud, parity, bits, flow);
1239 1240 1241 1242
}

static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
{
1243 1244 1245
	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
1246 1247
	while (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
		ufifo_st->ff_full(port))
1248
		cpu_relax();
1249
	wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch);
1250 1251 1252 1253 1254
}

static void sirfsoc_uart_console_write(struct console *co, const char *s,
							unsigned int count)
{
1255 1256 1257 1258
	struct sirfsoc_uart_port *sirfport = sirf_ports[co->index];

	uart_console_write(&sirfport->port, s, count,
			sirfsoc_uart_console_putchar);
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
}

static struct console sirfsoc_uart_console = {
	.name		= SIRFSOC_UART_NAME,
	.device		= uart_console_device,
	.flags		= CON_PRINTBUFFER,
	.index		= -1,
	.write		= sirfsoc_uart_console_write,
	.setup		= sirfsoc_uart_console_setup,
	.data           = &sirfsoc_uart_drv,
};

static int __init sirfsoc_uart_console_init(void)
{
	register_console(&sirfsoc_uart_console);
	return 0;
}
console_initcall(sirfsoc_uart_console_init);
#endif

static struct uart_driver sirfsoc_uart_drv = {
	.owner		= THIS_MODULE,
	.driver_name	= SIRFUART_PORT_NAME,
	.nr		= SIRFSOC_UART_NR,
	.dev_name	= SIRFSOC_UART_NAME,
	.major		= SIRFSOC_UART_MAJOR,
	.minor		= SIRFSOC_UART_MINOR,
#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
	.cons			= &sirfsoc_uart_console,
#else
	.cons			= NULL,
#endif
};

1293
static struct of_device_id sirfsoc_uart_ids[] = {
1294
	{ .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
1295
	{ .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart},
1296
	{ .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
1297
	{ .compatible = "sirf,atlas7-usp-uart", .data = &sirfsoc_usp},
1298 1299 1300 1301
	{}
};
MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);

1302
static int sirfsoc_uart_probe(struct platform_device *pdev)
1303 1304 1305 1306 1307
{
	struct sirfsoc_uart_port *sirfport;
	struct uart_port *port;
	struct resource *res;
	int ret;
1308 1309 1310 1311 1312 1313 1314
	int i, j;
	struct dma_slave_config slv_cfg = {
		.src_maxburst = 2,
	};
	struct dma_slave_config tx_slv_cfg = {
		.dst_maxburst = 2,
	};
1315
	const struct of_device_id *match;
1316

1317
	match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
1318 1319 1320
	sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL);
	if (!sirfport) {
		ret = -ENOMEM;
1321 1322
		goto err;
	}
1323 1324 1325 1326
	sirfport->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
	sirf_ports[sirfport->port.line] = sirfport;
	sirfport->port.iotype = UPIO_MEM;
	sirfport->port.flags = UPF_BOOT_AUTOCONF;
1327 1328 1329
	port = &sirfport->port;
	port->dev = &pdev->dev;
	port->private_data = sirfport;
1330
	sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
1331

1332 1333
	sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
		"sirf,uart-has-rtscts");
1334 1335
	if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart") ||
		of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart"))
1336
		sirfport->uart_reg->uart_type = SIRF_REAL_UART;
1337 1338 1339
	if (of_device_is_compatible(pdev->dev.of_node,
		"sirf,prima2-usp-uart") || of_device_is_compatible(
		pdev->dev.of_node, "sirf,atlas7-usp-uart")) {
1340
		sirfport->uart_reg->uart_type =	SIRF_USP_UART;
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
		if (!sirfport->hw_flow_ctrl)
			goto usp_no_flow_control;
		if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
			sirfport->cts_gpio = of_get_named_gpio(
					pdev->dev.of_node, "cts-gpios", 0);
		else
			sirfport->cts_gpio = -1;
		if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
			sirfport->rts_gpio = of_get_named_gpio(
					pdev->dev.of_node, "rts-gpios", 0);
		else
			sirfport->rts_gpio = -1;

		if ((!gpio_is_valid(sirfport->cts_gpio) ||
			 !gpio_is_valid(sirfport->rts_gpio))) {
			ret = -EINVAL;
			dev_err(&pdev->dev,
1358
				"Usp flow control must have cts and rts gpio");
1359 1360 1361
			goto err;
		}
		ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
1362
				"usp-cts-gpio");
1363
		if (ret) {
1364
			dev_err(&pdev->dev, "Unable request cts gpio");
1365 1366 1367 1368
			goto err;
		}
		gpio_direction_input(sirfport->cts_gpio);
		ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
1369
				"usp-rts-gpio");
1370
		if (ret) {
1371
			dev_err(&pdev->dev, "Unable request rts gpio");
1372 1373 1374 1375 1376
			goto err;
		}
		gpio_direction_output(sirfport->rts_gpio, 1);
	}
usp_no_flow_control:
1377 1378
	if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart") ||
	    of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-usp-uart"))
1379
		sirfport->is_atlas7 = true;
1380

1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
	if (of_property_read_u32(pdev->dev.of_node,
			"fifosize",
			&port->fifosize)) {
		dev_err(&pdev->dev,
			"Unable to find fifosize in uart node.\n");
		ret = -EFAULT;
		goto err;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Insufficient resources.\n");
		ret = -EFAULT;
		goto err;
	}
1396 1397 1398 1399
	tasklet_init(&sirfport->rx_dma_complete_tasklet,
			sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
	tasklet_init(&sirfport->rx_tmo_process_tasklet,
			sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
	port->mapbase = res->start;
	port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
	if (!port->membase) {
		dev_err(&pdev->dev, "Cannot remap resource.\n");
		ret = -ENOMEM;
		goto err;
	}
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Insufficient resources.\n");
		ret = -EFAULT;
1411
		goto err;
1412 1413 1414
	}
	port->irq = res->start;

1415
	sirfport->clk = devm_clk_get(&pdev->dev, NULL);
1416 1417
	if (IS_ERR(sirfport->clk)) {
		ret = PTR_ERR(sirfport->clk);
1418
		goto err;
1419 1420 1421
	}
	port->uartclk = clk_get_rate(sirfport->clk);

1422 1423 1424 1425 1426 1427 1428
	port->ops = &sirfsoc_uart_ops;
	spin_lock_init(&port->lock);

	platform_set_drvdata(pdev, sirfport);
	ret = uart_add_one_port(&sirfsoc_uart_drv, port);
	if (ret != 0) {
		dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
1429
		goto err;
1430 1431
	}

1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
	sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
	for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
		sirfport->rx_dma_items[i].xmit.buf =
			dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
			&sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
		if (!sirfport->rx_dma_items[i].xmit.buf) {
			dev_err(port->dev, "Uart alloc bufa failed\n");
			ret = -ENOMEM;
			goto alloc_coherent_err;
		}
		sirfport->rx_dma_items[i].xmit.head =
			sirfport->rx_dma_items[i].xmit.tail = 0;
	}
	if (sirfport->rx_dma_chan)
		dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
	sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
	if (sirfport->tx_dma_chan)
		dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
1450

1451 1452 1453 1454 1455 1456 1457
	return 0;
alloc_coherent_err:
	for (j = 0; j < i; j++)
		dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
				sirfport->rx_dma_items[j].xmit.buf,
				sirfport->rx_dma_items[j].dma_addr);
	dma_release_channel(sirfport->rx_dma_chan);
1458 1459 1460 1461 1462 1463 1464 1465 1466
err:
	return ret;
}

static int sirfsoc_uart_remove(struct platform_device *pdev)
{
	struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
	struct uart_port *port = &sirfport->port;
	uart_remove_one_port(&sirfsoc_uart_drv, port);
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
	if (sirfport->rx_dma_chan) {
		int i;
		dmaengine_terminate_all(sirfport->rx_dma_chan);
		dma_release_channel(sirfport->rx_dma_chan);
		for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
			dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
					sirfport->rx_dma_items[i].xmit.buf,
					sirfport->rx_dma_items[i].dma_addr);
	}
	if (sirfport->tx_dma_chan) {
		dmaengine_terminate_all(sirfport->tx_dma_chan);
		dma_release_channel(sirfport->tx_dma_chan);
	}
1480 1481 1482
	return 0;
}

1483
#ifdef CONFIG_PM_SLEEP
1484
static int
1485
sirfsoc_uart_suspend(struct device *pdev)
1486
{
1487
	struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1488 1489 1490 1491 1492
	struct uart_port *port = &sirfport->port;
	uart_suspend_port(&sirfsoc_uart_drv, port);
	return 0;
}

1493
static int sirfsoc_uart_resume(struct device *pdev)
1494
{
1495
	struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1496 1497 1498 1499
	struct uart_port *port = &sirfport->port;
	uart_resume_port(&sirfsoc_uart_drv, port);
	return 0;
}
1500 1501 1502 1503 1504
#endif

static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
};
1505 1506 1507

static struct platform_driver sirfsoc_uart_driver = {
	.probe		= sirfsoc_uart_probe,
1508
	.remove		= sirfsoc_uart_remove,
1509 1510 1511
	.driver		= {
		.name	= SIRFUART_PORT_NAME,
		.of_match_table = sirfsoc_uart_ids,
1512
		.pm	= &sirfsoc_uart_pm_ops,
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
	},
};

static int __init sirfsoc_uart_init(void)
{
	int ret = 0;

	ret = uart_register_driver(&sirfsoc_uart_drv);
	if (ret)
		goto out;

	ret = platform_driver_register(&sirfsoc_uart_driver);
	if (ret)
		uart_unregister_driver(&sirfsoc_uart_drv);
out:
	return ret;
}
module_init(sirfsoc_uart_init);

static void __exit sirfsoc_uart_exit(void)
{
	platform_driver_unregister(&sirfsoc_uart_driver);
	uart_unregister_driver(&sirfsoc_uart_drv);
}
module_exit(sirfsoc_uart_exit);

MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");