sba_iommu.c 61.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3
/*
**  System Bus Adapter (SBA) I/O MMU manager
**
4 5 6
**	(c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org>
**	(c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com>
**	(c) Copyright 2000-2004 Hewlett-Packard Company
Linus Torvalds's avatar
Linus Torvalds committed
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
**
**	Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
**
**	This program is free software; you can redistribute it and/or modify
**	it under the terms of the GNU General Public License as published by
**      the Free Software Foundation; either version 2 of the License, or
**      (at your option) any later version.
**
**
** This module initializes the IOC (I/O Controller) found on B1000/C3000/
** J5000/J7000/N-class/L-class machines and their successors.
**
** FIXME: add DMA hint support programming in both sba and lba modules.
*/

#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
Linus Torvalds's avatar
Linus Torvalds committed
26
#include <linux/slab.h>
Linus Torvalds's avatar
Linus Torvalds committed
27 28 29 30 31 32 33 34 35 36
#include <linux/init.h>

#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>

#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/dma.h>		/* for DMA_CHUNK_SIZE */

Matthew Wilcox's avatar
Matthew Wilcox committed
37
#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
Linus Torvalds's avatar
Linus Torvalds committed
38 39 40

#include <linux/proc_fs.h>
#include <asm/runway.h>		/* for proc_runway_root */
Matthew Wilcox's avatar
Matthew Wilcox committed
41
#include <asm/pdc.h>		/* for PDC_MODEL_* */
Matthew Wilcox's avatar
Matthew Wilcox committed
42
#include <asm/pdcpat.h>		/* for is_pdc_pat() */
Matthew Wilcox's avatar
Matthew Wilcox committed
43
#include <asm/parisc-device.h>
Linus Torvalds's avatar
Linus Torvalds committed
44

Matthew Wilcox's avatar
Matthew Wilcox committed
45 46 47 48

/* declared in arch/parisc/kernel/setup.c */
extern struct proc_dir_entry * proc_mckinley_root;

Linus Torvalds's avatar
Linus Torvalds committed
49 50
#define MODULE_NAME "SBA"

51 52 53 54 55
#ifdef CONFIG_PROC_FS
/* depends on proc fs support. But costs CPU performance */
#undef SBA_COLLECT_STATS
#endif

Linus Torvalds's avatar
Linus Torvalds committed
56 57 58
/*
** The number of debug flags is a clue - this code is fragile.
** Don't even think about messing with it unless you have
Matthew Wilcox's avatar
Matthew Wilcox committed
59
** plenty of 710's to sacrifice to the computer gods. :^)
Linus Torvalds's avatar
Linus Torvalds committed
60
*/
Matthew Wilcox's avatar
Matthew Wilcox committed
61
#undef DEBUG_SBA_ASSERT
Linus Torvalds's avatar
Linus Torvalds committed
62 63 64 65 66 67
#undef DEBUG_SBA_INIT
#undef DEBUG_SBA_RUN
#undef DEBUG_SBA_RUN_SG
#undef DEBUG_SBA_RESOURCE
#undef ASSERT_PDIR_SANITY
#undef DEBUG_LARGE_SG_ENTRIES
Matthew Wilcox's avatar
Matthew Wilcox committed
68
#undef DEBUG_DMB_TRAP
Linus Torvalds's avatar
Linus Torvalds committed
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94

#ifdef DEBUG_SBA_INIT
#define DBG_INIT(x...)	printk(x)
#else
#define DBG_INIT(x...)
#endif

#ifdef DEBUG_SBA_RUN
#define DBG_RUN(x...)	printk(x)
#else
#define DBG_RUN(x...)
#endif

#ifdef DEBUG_SBA_RUN_SG
#define DBG_RUN_SG(x...)	printk(x)
#else
#define DBG_RUN_SG(x...)
#endif


#ifdef DEBUG_SBA_RESOURCE
#define DBG_RES(x...)	printk(x)
#else
#define DBG_RES(x...)
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
#ifdef DEBUG_SBA_ASSERT
#undef ASSERT
#define ASSERT(expr) \
	if(!(expr)) { \
		printk("\n%s:%d: Assertion " #expr " failed!\n", \
				__FILE__, __LINE__); \
		panic(#expr); \
	}
#else
#define ASSERT(expr)
#endif


#if defined(__LP64__) && !defined(CONFIG_PDC_NARROW)
/* "low end" PA8800 machines use ZX1 chipset */
#define ZX1_SUPPORT
#endif

#define SBA_INLINE	__inline__


Linus Torvalds's avatar
Linus Torvalds committed
116 117 118 119 120 121 122 123 124 125 126
/*
** The number of pdir entries to "free" before issueing
** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this
** less interesting).
*/
#define DELAYED_RESOURCE_CNT	16

#define DEFAULT_DMA_HINT_REG	0

Matthew Wilcox's avatar
Matthew Wilcox committed
127 128
#define ASTRO_RUNWAY_PORT	0x582
#define ASTRO_ROPES_PORT	0x780
Linus Torvalds's avatar
Linus Torvalds committed
129

Matthew Wilcox's avatar
Matthew Wilcox committed
130 131
#define IKE_MERCED_PORT		0x803
#define IKE_ROPES_PORT		0x781
Linus Torvalds's avatar
Linus Torvalds committed
132

Matthew Wilcox's avatar
Matthew Wilcox committed
133 134
#define REO_MERCED_PORT		0x804
#define REO_ROPES_PORT		0x782
Linus Torvalds's avatar
Linus Torvalds committed
135

Matthew Wilcox's avatar
Matthew Wilcox committed
136 137
#define REOG_MERCED_PORT	0x805
#define REOG_ROPES_PORT		0x783
Linus Torvalds's avatar
Linus Torvalds committed
138

Matthew Wilcox's avatar
Matthew Wilcox committed
139 140 141
#define PLUTO_MCKINLEY_PORT	0x880
#define PLUTO_ROPES_PORT	0x784

Linus Torvalds's avatar
Linus Torvalds committed
142 143 144
#define SBA_FUNC_ID	0x0000	/* function id */
#define SBA_FCLASS	0x0008	/* function class, bist, header, rev... */

Matthew Wilcox's avatar
Matthew Wilcox committed
145 146 147 148 149
#define IS_ASTRO(id) \
(((id)->hversion == ASTRO_RUNWAY_PORT) || ((id)->hversion == ASTRO_ROPES_PORT))

#define IS_IKE(id) \
(((id)->hversion == IKE_MERCED_PORT) || ((id)->hversion == IKE_ROPES_PORT))
Linus Torvalds's avatar
Linus Torvalds committed
150

Matthew Wilcox's avatar
Matthew Wilcox committed
151 152 153
#define IS_PLUTO(id) \
(((id)->hversion == PLUTO_MCKINLEY_PORT) || ((id)->hversion == PLUTO_ROPES_PORT))

Matthew Wilcox's avatar
Matthew Wilcox committed
154
#define SBA_FUNC_SIZE 4096   /* SBA configuration function reg set */
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157

#define ASTRO_IOC_OFFSET 0x20000
/* Ike's IOC's occupy functions 2 and 3 (not 0 and 1) */
Matthew Wilcox's avatar
Matthew Wilcox committed
158
#define IKE_IOC_OFFSET(p) ((p+2)*SBA_FUNC_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
159

Matthew Wilcox's avatar
Matthew Wilcox committed
160 161
#define PLUTO_IOC_OFFSET 0x1000

Linus Torvalds's avatar
Linus Torvalds committed
162
#define IOC_CTRL          0x8	/* IOC_CTRL offset */
Matthew Wilcox's avatar
Matthew Wilcox committed
163 164 165 166 167
#define IOC_CTRL_TC       (1 << 0) /* TOC Enable */
#define IOC_CTRL_CE       (1 << 1) /* Coalesce Enable */
#define IOC_CTRL_DE       (1 << 2) /* Dillon Enable */
#define IOC_CTRL_RM       (1 << 8) /* Real Mode */
#define IOC_CTRL_NC       (1 << 9) /* Non Coherent Mode */
Linus Torvalds's avatar
Linus Torvalds committed
168

Matthew Wilcox's avatar
Matthew Wilcox committed
169
#define MAX_IOC		2	/* per Ike. Pluto/Astro only have 1. */
Linus Torvalds's avatar
Linus Torvalds committed
170

171 172
#define ROPES_PER_IOC	8	/* per Ike half or Pluto/Astro */

Linus Torvalds's avatar
Linus Torvalds committed
173 174 175 176 177

/*
** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
** Firmware programs this stuff. Don't touch it.
*/
178 179 180 181 182 183 184 185
#define LMMIO_DIRECT0_BASE  0x300
#define LMMIO_DIRECT0_MASK  0x308
#define LMMIO_DIRECT0_ROUTE 0x310

#define LMMIO_DIST_BASE  0x360
#define LMMIO_DIST_MASK  0x368
#define LMMIO_DIST_ROUTE 0x370

Linus Torvalds's avatar
Linus Torvalds committed
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
#define IOS_DIST_BASE	0x390
#define IOS_DIST_MASK	0x398
#define IOS_DIST_ROUTE	0x3A0

#define IOS_DIRECT_BASE	0x3C0
#define IOS_DIRECT_MASK	0x3C8
#define IOS_DIRECT_ROUTE 0x3D0

/*
** Offsets into I/O TLB (Function 2 and 3 on Ike)
*/
#define ROPE0_CTL	0x200  /* "regbus pci0" */
#define ROPE1_CTL	0x208
#define ROPE2_CTL	0x210
#define ROPE3_CTL	0x218
#define ROPE4_CTL	0x220
#define ROPE5_CTL	0x228
#define ROPE6_CTL	0x230
#define ROPE7_CTL	0x238

#define HF_ENABLE	0x40


#define IOC_IBASE	0x300	/* IO TLB */
#define IOC_IMASK	0x308
#define IOC_PCOM	0x310
#define IOC_TCNFG	0x318
#define IOC_PDIR_BASE	0x320

Matthew Wilcox's avatar
Matthew Wilcox committed
215 216 217
/* AGP GART driver looks for this */
#define SBA_IOMMU_COOKIE    0x0000badbadc0ffeeUL

Linus Torvalds's avatar
Linus Torvalds committed
218 219 220 221 222 223 224 225 226 227

/*
** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
** It's safer (avoid memory corruption) to keep DMA page mappings
** equivalently sized to VM PAGE_SIZE.
**
** We really can't avoid generating a new mapping for each
** page since the Virtual Coherence Index has to be generated
** and updated for each page.
**
Matthew Wilcox's avatar
Matthew Wilcox committed
228
** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
Linus Torvalds's avatar
Linus Torvalds committed
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
*/
#define IOVP_SIZE	PAGE_SIZE
#define IOVP_SHIFT	PAGE_SHIFT
#define IOVP_MASK	PAGE_MASK

#define SBA_PERF_CFG	0x708	/* Performance Counter stuff */
#define SBA_PERF_MASK1	0x718
#define SBA_PERF_MASK2	0x730


/*
** Offsets into PCI Performance Counters (functions 12 and 13)
** Controlled by PERF registers in function 2 & 3 respectively.
*/
#define SBA_PERF_CNT1	0x200
#define SBA_PERF_CNT2	0x208
#define SBA_PERF_CNT3	0x210


struct ioc {
Matthew Wilcox's avatar
Matthew Wilcox committed
249
	unsigned long	ioc_hpa;	/* I/O MMU base address */
Linus Torvalds's avatar
Linus Torvalds committed
250 251
	char	*res_map;	/* resource map, bit == pdir entry */
	u64	*pdir_base;	/* physical base address */
Matthew Wilcox's avatar
Matthew Wilcox committed
252 253 254 255 256
	unsigned long	ibase;	/* pdir IOV Space base - shared w/lba_pci */
	unsigned long	imask;	/* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
	unsigned long	iovp_mask;	/* help convert IOVA to IOVP */
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
257 258
	unsigned long	*res_hint;	/* next avail IOVP - circular search */
	spinlock_t	res_lock;
Linus Torvalds's avatar
Linus Torvalds committed
259 260
	unsigned int	res_bitshift;	/* from the LEFT! */
	unsigned int	res_size;	/* size of resource map in bytes */
Matthew Wilcox's avatar
Matthew Wilcox committed
261 262 263
#if SBA_HINT_SUPPORT
/* FIXME : DMA HINTs not used */
	unsigned long	hint_mask_pdir;	/* bits used for DMA hints */
Linus Torvalds's avatar
Linus Torvalds committed
264
	unsigned int	hint_shift_pdir;
Matthew Wilcox's avatar
Matthew Wilcox committed
265
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
266 267 268 269 270 271
#if DELAYED_RESOURCE_CNT > 0
	int saved_cnt;
	struct sba_dma_pair {
		dma_addr_t	iova;
		size_t		size;
	} saved[DELAYED_RESOURCE_CNT];
Linus Torvalds's avatar
Linus Torvalds committed
272 273
#endif

274
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
#define SBA_SEARCH_SAMPLE	0x100
	unsigned long avg_search[SBA_SEARCH_SAMPLE];
	unsigned long avg_idx;	/* current index into avg_search */
	unsigned long used_pages;
	unsigned long msingle_calls;
	unsigned long msingle_pages;
	unsigned long msg_calls;
	unsigned long msg_pages;
	unsigned long usingle_calls;
	unsigned long usingle_pages;
	unsigned long usg_calls;
	unsigned long usg_pages;
#endif

	/* STUFF We don't need in performance path */
	unsigned int	pdir_size;	/* in bytes, determined by IOV Space size */
};

struct sba_device {
Matthew Wilcox's avatar
Matthew Wilcox committed
294 295 296 297 298
	struct sba_device	*next;	/* list of SBA's in system */
	struct parisc_device	*dev;	/* dev found in bus walk */
	struct parisc_device_id	*iodc;	/* data about dev from firmware */
	const char 		*name;
	unsigned long		sba_hpa; /* base address */
Linus Torvalds's avatar
Linus Torvalds committed
299
	spinlock_t		sba_lock;
Matthew Wilcox's avatar
Matthew Wilcox committed
300 301
	unsigned int		flags;  /* state/functionality enabled */
	unsigned int		hw_rev;  /* HW revision of chip */
Linus Torvalds's avatar
Linus Torvalds committed
302

303 304 305
	struct resource		chip_resv; /* MMIO reserved for chip */
	struct resource		iommu_resv; /* MMIO reserved for iommu */

Matthew Wilcox's avatar
Matthew Wilcox committed
306
	unsigned int		num_ioc;  /* number of on-board IOC's */
Linus Torvalds's avatar
Linus Torvalds committed
307 308 309 310 311
	struct ioc		ioc[MAX_IOC];
};


static struct sba_device *sba_list;
Matthew Wilcox's avatar
Matthew Wilcox committed
312 313

static unsigned long ioc_needs_fdc = 0;
Linus Torvalds's avatar
Linus Torvalds committed
314 315

/* Ratio of Host MEM to IOV Space size */
Matthew Wilcox's avatar
Matthew Wilcox committed
316 317 318 319 320 321 322
static unsigned long sba_mem_ratio = 8;

/* global count of IOMMUs in the system */
static unsigned int global_ioc_cnt = 0;

/* PA8700 (Piranha 2.2) bug workaround */
static unsigned long piranha_bad_128k = 0;
Linus Torvalds's avatar
Linus Torvalds committed
323 324 325 326

/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))

Matthew Wilcox's avatar
Matthew Wilcox committed
327 328 329
#if SBA_AGP_SUPPORT
static int reserve_sba_gart = 1;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
330 331 332 333 334 335 336 337 338

#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))


/************************************
** SBA register read and write support
**
** BE WARNED: register writes are posted.
**  (ie follow writes which must reach HW with a read)
Matthew Wilcox's avatar
Matthew Wilcox committed
339 340
**
** Superdome (in particular, REO) allows only 64-bit CSR accesses.
Linus Torvalds's avatar
Linus Torvalds committed
341
*/
Matthew Wilcox's avatar
Matthew Wilcox committed
342 343 344 345 346 347 348 349 350 351 352 353
#define READ_REG32(addr)	 le32_to_cpu(__raw_readl(addr))
#define READ_REG64(addr)	 le64_to_cpu(__raw_readq(addr))
#define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr)
#define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr)

#ifdef __LP64__
#define READ_REG(addr)		READ_REG64(addr)
#define WRITE_REG(value, addr)	WRITE_REG64(value, addr)
#else
#define READ_REG(addr)		READ_REG32(addr)
#define WRITE_REG(value, addr)	WRITE_REG32(value, addr)
#endif
Linus Torvalds's avatar
Linus Torvalds committed
354 355 356

#ifdef DEBUG_SBA_INIT

Matthew Wilcox's avatar
Matthew Wilcox committed
357 358 359 360 361 362 363 364 365
/* NOTE: When __LP64__ isn't defined, READ_REG64() is two 32-bit reads */

/**
 * sba_dump_ranges - debugging only - print ranges assigned to this IOA
 * @hpa: base address of the sba
 *
 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO
 * IO Adapter (aka Bus Converter).
 */
Linus Torvalds's avatar
Linus Torvalds committed
366
static void
Matthew Wilcox's avatar
Matthew Wilcox committed
367
sba_dump_ranges(unsigned long hpa)
Linus Torvalds's avatar
Linus Torvalds committed
368
{
Matthew Wilcox's avatar
Matthew Wilcox committed
369 370 371 372 373 374 375 376
	DBG_INIT("SBA at 0x%lx\n", hpa);
	DBG_INIT("IOS_DIST_BASE   : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
	DBG_INIT("IOS_DIST_MASK   : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
	DBG_INIT("IOS_DIST_ROUTE  : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
	DBG_INIT("\n");
	DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
	DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
	DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
Linus Torvalds's avatar
Linus Torvalds committed
377 378
}

Matthew Wilcox's avatar
Matthew Wilcox committed
379 380 381 382 383 384
/**
 * sba_dump_tlb - debugging only - print IOMMU operating parameters
 * @hpa: base address of the IOMMU
 *
 * Print the size/location of the IO MMU PDIR.
 */
Linus Torvalds's avatar
Linus Torvalds committed
385
static void
Matthew Wilcox's avatar
Matthew Wilcox committed
386
sba_dump_tlb(unsigned long hpa)
Linus Torvalds's avatar
Linus Torvalds committed
387
{
Matthew Wilcox's avatar
Matthew Wilcox committed
388
	DBG_INIT("IO TLB at 0x%lx\n", hpa);
Matthew Wilcox's avatar
Matthew Wilcox committed
389 390 391 392
	DBG_INIT("IOC_IBASE    : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
	DBG_INIT("IOC_IMASK    : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
	DBG_INIT("IOC_TCNFG    : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
	DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
Matthew Wilcox's avatar
Matthew Wilcox committed
393
	DBG_INIT("\n");
Linus Torvalds's avatar
Linus Torvalds committed
394
}
Matthew Wilcox's avatar
Matthew Wilcox committed
395 396 397
#else
#define sba_dump_ranges(x)
#define sba_dump_tlb(x)
Linus Torvalds's avatar
Linus Torvalds committed
398 399 400 401 402
#endif


#ifdef ASSERT_PDIR_SANITY

Matthew Wilcox's avatar
Matthew Wilcox committed
403 404 405 406 407 408 409 410
/**
 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
 * @ioc: IO MMU structure which owns the pdir we are interested in.
 * @msg: text to print ont the output line.
 * @pide: pdir index.
 *
 * Print one entry of the IO MMU PDIR in human readable form.
 */
Linus Torvalds's avatar
Linus Torvalds committed
411 412 413 414 415 416 417 418
static void
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
	/* start printing from lowest pde in rval */
	u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
	unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
	uint rcnt;

Matthew Wilcox's avatar
Matthew Wilcox committed
419
	printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
Linus Torvalds's avatar
Linus Torvalds committed
420 421 422 423 424
		 msg,
		 rptr, pide & (BITS_PER_LONG - 1), *rptr);

	rcnt = 0;
	while (rcnt < BITS_PER_LONG) {
Matthew Wilcox's avatar
Matthew Wilcox committed
425
		printk(KERN_DEBUG "%s %2d %p %016Lx\n",
Linus Torvalds's avatar
Linus Torvalds committed
426 427 428 429 430 431
			(rcnt == (pide & (BITS_PER_LONG - 1)))
				? "    -->" : "       ",
			rcnt, ptr, *ptr );
		rcnt++;
		ptr++;
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
432
	printk(KERN_DEBUG "%s", msg);
Linus Torvalds's avatar
Linus Torvalds committed
433 434 435
}


Matthew Wilcox's avatar
Matthew Wilcox committed
436 437 438 439 440 441 442
/**
 * sba_check_pdir - debugging only - consistency checker
 * @ioc: IO MMU structure which owns the pdir we are interested in.
 * @msg: text to print ont the output line.
 *
 * Verify the resource map and pdir state is consistent
 */
Linus Torvalds's avatar
Linus Torvalds committed
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
static int
sba_check_pdir(struct ioc *ioc, char *msg)
{
	u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
	u32 *rptr = (u32 *) ioc->res_map;	/* resource map ptr */
	u64 *pptr = ioc->pdir_base;	/* pdir ptr */
	uint pide = 0;

	while (rptr < rptr_end) {
		u32 rval = *rptr;
		int rcnt = 32;	/* number of bits we might check */

		while (rcnt) {
			/* Get last byte and highest bit from that */
			u32 pde = ((u32) (((char *)pptr)[7])) << 24;
			if ((rval ^ pde) & 0x80000000)
			{
				/*
				** BUMMER!  -- res_map != pdir --
				** Dump rval and matching pdir entries
				*/
				sba_dump_pdir_entry(ioc, msg, pide);
				return(1);
			}
			rcnt--;
			rval <<= 1;	/* try the next bit */
			pptr++;
			pide++;
		}
		rptr++;	/* look at next word of res_map */
	}
	/* It'd be nice if we always got here :^) */
	return 0;
}


Matthew Wilcox's avatar
Matthew Wilcox committed
479 480 481 482 483 484 485 486
/**
 * sba_dump_sg - debugging only - print Scatter-Gather list
 * @ioc: IO MMU structure which owns the pdir we are interested in.
 * @startsg: head of the SG list
 * @nents: number of entries in SG list
 *
 * print the SG list so we can verify it's correct by hand.
 */
Linus Torvalds's avatar
Linus Torvalds committed
487 488 489 490
static void
sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{
	while (nents-- > 0) {
Matthew Wilcox's avatar
Matthew Wilcox committed
491
		printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
Linus Torvalds's avatar
Linus Torvalds committed
492 493 494
				nents,
				(unsigned long) sg_dma_address(startsg),
				sg_dma_len(startsg),
Matthew Wilcox's avatar
Matthew Wilcox committed
495
				sg_virt_addr(startsg), startsg->length);
Linus Torvalds's avatar
Linus Torvalds committed
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
		startsg++;
	}
}

#endif /* ASSERT_PDIR_SANITY */




/**************************************************************
*
*   I/O Pdir Resource Management
*
*   Bits set in the resource map are in use.
*   Each bit can represent a number of pages.
*   LSbs represent lower addresses (IOVA's).
*
***************************************************************/
#define PAGES_PER_RANGE 1	/* could increase this to 4 or 8 if needed */

/* Convert from IOVP to IOVA and vice versa. */

Matthew Wilcox's avatar
Matthew Wilcox committed
518 519 520 521 522 523 524 525 526 527
#ifdef ZX1_SUPPORT
/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
#else
/* only support Astro and ancestors. Saves a few cycles in key places */
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
#define SBA_IOVP(ioc,iova) (iova)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
528 529 530 531 532 533
#define PDIR_INDEX(iovp)   ((iovp)>>IOVP_SHIFT)

#define RESMAP_MASK(n)    (~0UL << (BITS_PER_LONG - (n)))
#define RESMAP_IDX_MASK   (sizeof(unsigned long) - 1)


Matthew Wilcox's avatar
Matthew Wilcox committed
534 535 536 537 538 539 540 541 542
/**
 * sba_search_bitmap - find free space in IO PDIR resource bitmap
 * @ioc: IO MMU structure which owns the pdir we are interested in.
 * @bits_wanted: number of entries we need.
 *
 * Find consecutive free bits in resource bitmap.
 * Each bit represents one entry in the IO Pdir.
 * Cool perf optimization: search for log2(size) bits at a time.
 */
Linus Torvalds's avatar
Linus Torvalds committed
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
{
	unsigned long *res_ptr = ioc->res_hint;
	unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
	unsigned long pide = ~0UL;

	ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
	ASSERT(res_ptr < res_end);
	if (bits_wanted > (BITS_PER_LONG/2)) {
		/* Search word at a time - no mask needed */
		for(; res_ptr < res_end; ++res_ptr) {
			if (*res_ptr == 0) {
				*res_ptr = RESMAP_MASK(bits_wanted);
				pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
				pide <<= 3;	/* convert to bit address */
				break;
			}
		}
		/* point to the next word on next pass */
		res_ptr++;
		ioc->res_bitshift = 0;
	} else {
		/*
		** Search the resource bit map on well-aligned values.
		** "o" is the alignment.
		** We need the alignment to invalidate I/O TLB using
		** SBA HW features in the unmap path.
		*/
		unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
		uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
		unsigned long mask;

		if (bitshiftcnt >= BITS_PER_LONG) {
			bitshiftcnt = 0;
			res_ptr++;
		}
		mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;

Matthew Wilcox's avatar
Matthew Wilcox committed
582
		DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
Linus Torvalds's avatar
Linus Torvalds committed
583 584 585
		while(res_ptr < res_end)
		{ 
			DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);
586
			BUG_ON(0 == mask);
Linus Torvalds's avatar
Linus Torvalds committed
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
			if(0 == ((*res_ptr) & mask)) {
				*res_ptr |= mask;     /* mark resources busy! */
				pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
				pide <<= 3;	/* convert to bit address */
				pide += bitshiftcnt;
				break;
			}
			mask >>= o;
			bitshiftcnt += o;
			if (0 == mask) {
				mask = RESMAP_MASK(bits_wanted);
				bitshiftcnt=0;
				res_ptr++;
			}
		}
		/* look in the same word on the next pass */
		ioc->res_bitshift = bitshiftcnt + bits_wanted;
	}

	/* wrapped ? */
Matthew Wilcox's avatar
Matthew Wilcox committed
607 608 609 610 611 612
	if (res_end <= res_ptr) {
		ioc->res_hint = (unsigned long *) ioc->res_map;
		ioc->res_bitshift = 0;
	} else {
		ioc->res_hint = res_ptr;
	}
Linus Torvalds's avatar
Linus Torvalds committed
613 614 615 616
	return (pide);
}


Matthew Wilcox's avatar
Matthew Wilcox committed
617 618 619 620 621 622 623 624
/**
 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
 * @ioc: IO MMU structure which owns the pdir we are interested in.
 * @size: number of bytes to create a mapping for
 *
 * Given a size, find consecutive unmarked and then mark those bits in the
 * resource bit map.
 */
Linus Torvalds's avatar
Linus Torvalds committed
625 626 627 628
static int
sba_alloc_range(struct ioc *ioc, size_t size)
{
	unsigned int pages_needed = size >> IOVP_SHIFT;
629
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
630 631 632 633 634
	unsigned long cr_start = mfctl(16);
#endif
	unsigned long pide;

	ASSERT(pages_needed);
Matthew Wilcox's avatar
Matthew Wilcox committed
635 636
	ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE);
	ASSERT(pages_needed <= BITS_PER_LONG);
Linus Torvalds's avatar
Linus Torvalds committed
637 638 639 640 641 642 643 644 645 646 647
	ASSERT(0 == (size & ~IOVP_MASK));

	/*
	** "seek and ye shall find"...praying never hurts either...
	** ggg sacrifices another 710 to the computer gods.
	*/

	pide = sba_search_bitmap(ioc, pages_needed);
	if (pide >= (ioc->res_size << 3)) {
		pide = sba_search_bitmap(ioc, pages_needed);
		if (pide >= (ioc->res_size << 3))
648 649
			panic("%s: I/O MMU @ %lx is out of mapping resources\n",
			      __FILE__, ioc->ioc_hpa);
Linus Torvalds's avatar
Linus Torvalds committed
650 651 652 653 654 655 656 657 658
	}

#ifdef ASSERT_PDIR_SANITY
	/* verify the first enable bit is clear */
	if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
		sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
	}
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
659 660
	DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
		__FUNCTION__, size, pages_needed, pide,
Linus Torvalds's avatar
Linus Torvalds committed
661 662 663
		(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
		ioc->res_bitshift );

664
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
	{
		unsigned long cr_end = mfctl(16);
		unsigned long tmp = cr_end - cr_start;
		/* check for roll over */
		cr_start = (cr_end < cr_start) ?  -(tmp) : (tmp);
	}
	ioc->avg_search[ioc->avg_idx++] = cr_start;
	ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;

	ioc->used_pages += pages_needed;
#endif

	return (pide);
}


Matthew Wilcox's avatar
Matthew Wilcox committed
681 682 683 684 685 686 687 688
/**
 * sba_free_range - unmark bits in IO PDIR resource bitmap
 * @ioc: IO MMU structure which owns the pdir we are interested in.
 * @iova: IO virtual address which was previously allocated.
 * @size: number of bytes to create a mapping for
 *
 * clear bits in the ioc's resource map
 */
Linus Torvalds's avatar
Linus Torvalds committed
689 690 691 692 693 694 695 696 697 698 699 700 701
static SBA_INLINE void
sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
{
	unsigned long iovp = SBA_IOVP(ioc, iova);
	unsigned int pide = PDIR_INDEX(iovp);
	unsigned int ridx = pide >> 3;	/* convert bit to byte address */
	unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);

	int bits_not_wanted = size >> IOVP_SHIFT;

	/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
	unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));

Matthew Wilcox's avatar
Matthew Wilcox committed
702 703
	DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
		__FUNCTION__, (uint) iova, size,
Linus Torvalds's avatar
Linus Torvalds committed
704 705
		bits_not_wanted, m, pide, res_ptr, *res_ptr);

706
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
707 708 709 710 711
	ioc->used_pages -= bits_not_wanted;
#endif

	ASSERT(m != 0);
	ASSERT(bits_not_wanted);
Matthew Wilcox's avatar
Matthew Wilcox committed
712 713
	ASSERT((bits_not_wanted * IOVP_SIZE) <= DMA_CHUNK_SIZE);
	ASSERT(bits_not_wanted <= BITS_PER_LONG);
Linus Torvalds's avatar
Linus Torvalds committed
714 715 716 717 718 719 720 721 722 723 724
	ASSERT((*res_ptr & m) == m); /* verify same bits are set */
	*res_ptr &= ~m;
}


/**************************************************************
*
*   "Dynamic DMA Mapping" support (aka "Coherent I/O")
*
***************************************************************/

Matthew Wilcox's avatar
Matthew Wilcox committed
725
#if SBA_HINT_SUPPORT
Linus Torvalds's avatar
Linus Torvalds committed
726
#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
Matthew Wilcox's avatar
Matthew Wilcox committed
727
#endif
Linus Torvalds's avatar
Linus Torvalds committed
728 729 730 731

typedef unsigned long space_t;
#define KERNEL_SPACE 0

Matthew Wilcox's avatar
Matthew Wilcox committed
732 733 734 735 736 737 738 739 740 741
/**
 * sba_io_pdir_entry - fill in one IO PDIR entry
 * @pdir_ptr:  pointer to IO PDIR entry
 * @sid: process Space ID
 * @vba: Virtual CPU address of buffer to map
 *
 * SBA Mapping Routine
 *
 * Given a virtual address (vba, arg2) and space id, (sid, arg1)
 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
Matthew Wilcox's avatar
Matthew Wilcox committed
742 743 744 745
 * pdir_ptr (arg0). 
 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
 * for Astro/Ike looks like:
 *
Matthew Wilcox's avatar
Matthew Wilcox committed
746 747 748 749 750 751
 *
 *  0                    19                                 51   55       63
 * +-+---------------------+----------------------------------+----+--------+
 * |V|        U            |            PPN[43:12]            | U  |   VI   |
 * +-+---------------------+----------------------------------+----+--------+
 *
Matthew Wilcox's avatar
Matthew Wilcox committed
752 753 754 755 756 757 758 759
 * Pluto is basically identical, supports fewer physical address bits:
 *
 *  0                       23                              51   55       63
 * +-+------------------------+-------------------------------+----+--------+
 * |V|        U               |         PPN[39:12]            | U  |   VI   |
 * +-+------------------------+-------------------------------+----+--------+
 *
 *  V  == Valid Bit  (Most Significant Bit is bit 0)
Matthew Wilcox's avatar
Matthew Wilcox committed
760 761 762 763
 *  U  == Unused
 * PPN == Physical Page Number
 * VI  == Virtual Index (aka Coherent Index)
 *
Matthew Wilcox's avatar
Matthew Wilcox committed
764 765
 * LPA instruction output is put into PPN field.
 * LCI (Load Coherence Index) instruction provides the "VI" bits.
Matthew Wilcox's avatar
Matthew Wilcox committed
766
 *
Matthew Wilcox's avatar
Matthew Wilcox committed
767 768
 * We pre-swap the bytes since PCX-W is Big Endian and the
 * IOMMU uses little endian for the pdir.
Matthew Wilcox's avatar
Matthew Wilcox committed
769 770
 */

Linus Torvalds's avatar
Linus Torvalds committed
771 772

void SBA_INLINE
Matthew Wilcox's avatar
Matthew Wilcox committed
773 774
sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
		  unsigned long hint)
Linus Torvalds's avatar
Linus Torvalds committed
775 776 777 778
{
	u64 pa; /* physical address */
	register unsigned ci; /* coherent index */

Matthew Wilcox's avatar
Matthew Wilcox committed
779 780 781 782 783
	/* We currently only support kernel addresses.
	 * fdc instr below will need to reload sr1 with KERNEL_SPACE
	 * once we try to support direct DMA to user space.
	 */
	ASSERT(sid == KERNEL_SPACE);
Linus Torvalds's avatar
Linus Torvalds committed
784 785

	pa = virt_to_phys(vba);
Matthew Wilcox's avatar
Matthew Wilcox committed
786
	pa &= IOVP_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
787 788 789 790 791 792 793

	mtsp(sid,1);
	asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
	pa |= (ci >> 12) & 0xff;  /* move CI (8 bits) into lowest byte */

	pa |= 0x8000000000000000ULL;	/* set "valid" bit */
	*pdir_ptr = cpu_to_le64(pa);	/* swap and store into I/O Pdir */
Matthew Wilcox's avatar
Matthew Wilcox committed
794 795 796 797 798 799 800 801 802

	/*
	 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
	 * (bit #61, big endian), we have to flush and sync every time
	 * IO-PDIR is changed in Ike/Astro.
	 */
	if (ioc_needs_fdc) {
		asm volatile("fdc 0(%%sr1,%0)\n\tsync" : : "r" (pdir_ptr));
	}
Linus Torvalds's avatar
Linus Torvalds committed
803 804 805
}


Matthew Wilcox's avatar
Matthew Wilcox committed
806 807 808 809 810 811 812 813 814
/**
 * sba_mark_invalid - invalidate one or more IO PDIR entries
 * @ioc: IO MMU structure which owns the pdir we are interested in.
 * @iova:  IO Virtual Address mapped earlier
 * @byte_cnt:  number of bytes this mapping covers.
 *
 * Marking the IO PDIR entry(ies) as Invalid and invalidate
 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register)
 * is to purge stale entries in the IO TLB when unmapping entries.
Linus Torvalds's avatar
Linus Torvalds committed
815 816 817 818
 *
 * The PCOM register supports purging of multiple pages, with a minium
 * of 1 page and a maximum of 2GB. Hardware requires the address be
 * aligned to the size of the range being purged. The size of the range
Matthew Wilcox's avatar
Matthew Wilcox committed
819 820 821
 * must be a power of 2. The "Cool perf optimization" in the
 * allocation routine helps keep that true.
 */
Linus Torvalds's avatar
Linus Torvalds committed
822 823 824 825 826 827
static SBA_INLINE void
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
	u32 iovp = (u32) SBA_IOVP(ioc,iova);

	/* Even though this is a big-endian machine, the entries
Matthew Wilcox's avatar
Matthew Wilcox committed
828
	** in the iopdir are little endian. That's why we clear the byte
Linus Torvalds's avatar
Linus Torvalds committed
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
	** at +7 instead of at +0.
	*/
	int off = PDIR_INDEX(iovp)*sizeof(u64)+7;

	/* Must be non-zero and rounded up */
	ASSERT(byte_cnt > 0);
	ASSERT(0 == (byte_cnt & ~IOVP_MASK));

#ifdef ASSERT_PDIR_SANITY
	/* Assert first pdir entry is set */
	if (0x80 != (((u8 *) ioc->pdir_base)[off])) {
		sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
	}
#endif

	if (byte_cnt <= IOVP_SIZE)
	{
		ASSERT( off < ioc->pdir_size);

		iovp |= IOVP_SHIFT;     /* set "size" field for PCOM */

		/*
		** clear I/O PDIR entry "valid" bit
		** Do NOT clear the rest - save it for debugging.
		** We should only clear bits that have previously
		** been enabled.
		*/
		((u8 *)(ioc->pdir_base))[off] = 0;
	} else {
		u32 t = get_order(byte_cnt) + PAGE_SHIFT;

		iovp |= t;
		ASSERT(t <= 31);   /* 2GB! Max value of "size" field */

		do {
			/* verify this pdir entry is enabled */
			ASSERT(0x80 == (((u8 *) ioc->pdir_base)[off] & 0x80));
			/* clear I/O Pdir entry "valid" bit first */
			((u8 *)(ioc->pdir_base))[off] = 0;
			off += sizeof(u64);
			byte_cnt -= IOVP_SIZE;
		} while (byte_cnt > 0);
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
873
	WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
Linus Torvalds's avatar
Linus Torvalds committed
874 875
}

Matthew Wilcox's avatar
Matthew Wilcox committed
876 877 878 879 880 881 882
/**
 * sba_dma_supported - PCI driver can query DMA support
 * @dev: instance of PCI owned by the driver that's asking
 * @mask:  number of address bits this PCI device can handle
 *
 * See Documentation/DMA-mapping.txt
 */
Linus Torvalds's avatar
Linus Torvalds committed
883
static int
Matthew Wilcox's avatar
Matthew Wilcox committed
884
sba_dma_supported( struct device *dev, u64 mask)
Linus Torvalds's avatar
Linus Torvalds committed
885 886
{
	if (dev == NULL) {
Matthew Wilcox's avatar
Matthew Wilcox committed
887
		printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
Linus Torvalds's avatar
Linus Torvalds committed
888 889 890 891
		BUG();
		return(0);
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
892
	/* only support 32-bit PCI devices - no DAC support (yet) */
Matthew Wilcox's avatar
Matthew Wilcox committed
893
	return((int) (mask == 0xffffffffUL));
Linus Torvalds's avatar
Linus Torvalds committed
894 895 896
}


Matthew Wilcox's avatar
Matthew Wilcox committed
897 898 899 900 901 902 903 904 905
/**
 * sba_map_single - map one buffer and return IOVA for DMA
 * @dev: instance of PCI owned by the driver that's asking.
 * @addr:  driver buffer to map.
 * @size:  number of bytes to map in driver buffer.
 * @direction:  R/W or both.
 *
 * See Documentation/DMA-mapping.txt
 */
Linus Torvalds's avatar
Linus Torvalds committed
906
static dma_addr_t
Matthew Wilcox's avatar
Matthew Wilcox committed
907 908
sba_map_single(struct device *dev, void *addr, size_t size,
	       enum dma_data_direction direction)
Linus Torvalds's avatar
Linus Torvalds committed
909
{
Matthew Wilcox's avatar
Matthew Wilcox committed
910
	struct ioc *ioc;
Linus Torvalds's avatar
Linus Torvalds committed
911 912 913 914 915 916 917
	unsigned long flags; 
	dma_addr_t iovp;
	dma_addr_t offset;
	u64 *pdir_start;
	int pide;

	ASSERT(size > 0);
Matthew Wilcox's avatar
Matthew Wilcox committed
918 919 920 921
	ASSERT(size <= DMA_CHUNK_SIZE);

	ioc = GET_IOC(dev);
	ASSERT(ioc);
Linus Torvalds's avatar
Linus Torvalds committed
922 923

	/* save offset bits */
Matthew Wilcox's avatar
Matthew Wilcox committed
924
	offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
925 926 927 928 929 930 931 932 933

	/* round up to nearest IOVP_SIZE */
	size = (size + offset + ~IOVP_MASK) & IOVP_MASK;

	spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef ASSERT_PDIR_SANITY
	sba_check_pdir(ioc,"Check before sba_map_single()");
#endif

934
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
935 936 937 938 939 940
	ioc->msingle_calls++;
	ioc->msingle_pages += size >> IOVP_SHIFT;
#endif
	pide = sba_alloc_range(ioc, size);
	iovp = (dma_addr_t) pide << IOVP_SHIFT;

Matthew Wilcox's avatar
Matthew Wilcox committed
941
	DBG_RUN("%s() 0x%p -> 0x%lx\n",
Matthew Wilcox's avatar
Matthew Wilcox committed
942
		__FUNCTION__, addr, (long) iovp | offset);
Linus Torvalds's avatar
Linus Torvalds committed
943 944 945 946 947

	pdir_start = &(ioc->pdir_base[pide]);

	while (size > 0) {
		ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
Matthew Wilcox's avatar
Matthew Wilcox committed
948
		sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
Linus Torvalds's avatar
Linus Torvalds committed
949

Matthew Wilcox's avatar
Matthew Wilcox committed
950
		DBG_RUN("	pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
Linus Torvalds's avatar
Linus Torvalds committed
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
			pdir_start,
			(u8) (((u8 *) pdir_start)[7]),
			(u8) (((u8 *) pdir_start)[6]),
			(u8) (((u8 *) pdir_start)[5]),
			(u8) (((u8 *) pdir_start)[4]),
			(u8) (((u8 *) pdir_start)[3]),
			(u8) (((u8 *) pdir_start)[2]),
			(u8) (((u8 *) pdir_start)[1]),
			(u8) (((u8 *) pdir_start)[0])
			);

		addr += IOVP_SIZE;
		size -= IOVP_SIZE;
		pdir_start++;
	}
	/* form complete address */
#ifdef ASSERT_PDIR_SANITY
	sba_check_pdir(ioc,"Check after sba_map_single()");
#endif
	spin_unlock_irqrestore(&ioc->res_lock, flags);
	return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
}


Matthew Wilcox's avatar
Matthew Wilcox committed
975 976 977 978 979 980 981 982 983
/**
 * sba_unmap_single - unmap one IOVA and free resources
 * @dev: instance of PCI owned by the driver that's asking.
 * @iova:  IOVA of driver buffer previously mapped.
 * @size:  number of bytes mapped in driver buffer.
 * @direction:  R/W or both.
 *
 * See Documentation/DMA-mapping.txt
 */
Linus Torvalds's avatar
Linus Torvalds committed
984
static void
Matthew Wilcox's avatar
Matthew Wilcox committed
985 986
sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
		 enum dma_data_direction direction)
Linus Torvalds's avatar
Linus Torvalds committed
987
{
Matthew Wilcox's avatar
Matthew Wilcox committed
988 989 990
	struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
	struct sba_dma_pair *d;
Linus Torvalds's avatar
Linus Torvalds committed
991 992 993
#endif
	unsigned long flags; 
	dma_addr_t offset;
Matthew Wilcox's avatar
Matthew Wilcox committed
994 995 996 997

	ioc = GET_IOC(dev);
	ASSERT(ioc);

Linus Torvalds's avatar
Linus Torvalds committed
998 999
	offset = iova & ~IOVP_MASK;

Matthew Wilcox's avatar
Matthew Wilcox committed
1000 1001
	DBG_RUN("%s() iovp 0x%lx/%x\n",
		__FUNCTION__, (long) iova, size);
Linus Torvalds's avatar
Linus Torvalds committed
1002 1003 1004 1005 1006 1007

	iova ^= offset;        /* clear offset bits */
	size += offset;
	size = ROUNDUP(size, IOVP_SIZE);

	spin_lock_irqsave(&ioc->res_lock, flags);
Matthew Wilcox's avatar
Matthew Wilcox committed
1008

1009
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
1010 1011 1012 1013
	ioc->usingle_calls++;
	ioc->usingle_pages += size >> IOVP_SHIFT;
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
1014 1015
	sba_mark_invalid(ioc, iova, size);

Matthew Wilcox's avatar
Matthew Wilcox committed
1016
#if DELAYED_RESOURCE_CNT > 0
Matthew Wilcox's avatar
Matthew Wilcox committed
1017 1018 1019
	/* Delaying when we re-use a IO Pdir entry reduces the number
	 * of MMIO reads needed to flush writes to the PCOM register.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	d = &(ioc->saved[ioc->saved_cnt]);
	d->iova = iova;
	d->size = size;
	if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
		int cnt = ioc->saved_cnt;
		while (cnt--) {
			sba_free_range(ioc, d->iova, d->size);
			d--;
		}
		ioc->saved_cnt = 0;
		READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */
Linus Torvalds's avatar
Linus Torvalds committed
1031
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
1032 1033 1034 1035
#else /* DELAYED_RESOURCE_CNT == 0 */
	sba_free_range(ioc, iova, size);
	READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */
Linus Torvalds's avatar
Linus Torvalds committed
1036
	spin_unlock_irqrestore(&ioc->res_lock, flags);
Matthew Wilcox's avatar
Matthew Wilcox committed
1037 1038 1039 1040 1041 1042 1043 1044 1045

	/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
	** For Astro based systems this isn't a big deal WRT performance.
	** As long as 2.4 kernels copyin/copyout data from/to userspace,
	** we don't need the syncdma. The issue here is I/O MMU cachelines
	** are *not* coherent in all cases.  May be hwrev dependent.
	** Need to investigate more.
	asm volatile("syncdma");	
	*/
Linus Torvalds's avatar
Linus Torvalds committed
1046 1047 1048
}


Matthew Wilcox's avatar
Matthew Wilcox committed
1049 1050 1051 1052 1053 1054 1055 1056
/**
 * sba_alloc_consistent - allocate/map shared mem for DMA
 * @hwdev: instance of PCI owned by the driver that's asking.
 * @size:  number of bytes mapped in driver buffer.
 * @dma_handle:  IOVA of new buffer.
 *
 * See Documentation/DMA-mapping.txt
 */
1057 1058
static void *sba_alloc_consistent(struct device *hwdev, size_t size,
					dma_addr_t *dma_handle, int gfp)
Linus Torvalds's avatar
Linus Torvalds committed
1059 1060 1061 1062 1063 1064 1065 1066 1067
{
	void *ret;

	if (!hwdev) {
		/* only support PCI */
		*dma_handle = 0;
		return 0;
	}

1068
        ret = (void *) __get_free_pages(gfp, get_order(size));
Linus Torvalds's avatar
Linus Torvalds committed
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078

	if (ret) {
		memset(ret, 0, size);
		*dma_handle = sba_map_single(hwdev, ret, size, 0);
	}

	return ret;
}


Matthew Wilcox's avatar
Matthew Wilcox committed
1079 1080 1081 1082 1083 1084 1085 1086 1087
/**
 * sba_free_consistent - free/unmap shared mem for DMA
 * @hwdev: instance of PCI owned by the driver that's asking.
 * @size:  number of bytes mapped in driver buffer.
 * @vaddr:  virtual address IOVA of "consistent" buffer.
 * @dma_handler:  IO virtual address of "consistent" buffer.
 *
 * See Documentation/DMA-mapping.txt
 */
Linus Torvalds's avatar
Linus Torvalds committed
1088
static void
Matthew Wilcox's avatar
Matthew Wilcox committed
1089 1090
sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
		    dma_addr_t dma_handle)
Linus Torvalds's avatar
Linus Torvalds committed
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
{
	sba_unmap_single(hwdev, dma_handle, size, 0);
	free_pages((unsigned long) vaddr, get_order(size));
}


/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x80000000UL

1104
#ifdef SBA_COLLECT_STATS
Matthew Wilcox's avatar
Matthew Wilcox committed
1105
#define IOMMU_MAP_STATS
Linus Torvalds's avatar
Linus Torvalds committed
1106
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
1107
#include "iommu-helpers.h"
Linus Torvalds's avatar
Linus Torvalds committed
1108 1109

#ifdef DEBUG_LARGE_SG_ENTRIES
Matthew Wilcox's avatar
Matthew Wilcox committed
1110
int dump_run_sg = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1111
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
1112

Linus Torvalds's avatar
Linus Torvalds committed
1113

Matthew Wilcox's avatar
Matthew Wilcox committed
1114 1115 1116 1117 1118 1119 1120 1121 1122
/**
 * sba_map_sg - map Scatter/Gather list
 * @dev: instance of PCI owned by the driver that's asking.
 * @sglist:  array of buffer/length pairs
 * @nents:  number of entries in list
 * @direction:  R/W or both.
 *
 * See Documentation/DMA-mapping.txt
 */
Linus Torvalds's avatar
Linus Torvalds committed
1123
static int
Matthew Wilcox's avatar
Matthew Wilcox committed
1124 1125
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
	   enum dma_data_direction direction)
Linus Torvalds's avatar
Linus Torvalds committed
1126
{
Matthew Wilcox's avatar
Matthew Wilcox committed
1127
	struct ioc *ioc;
Linus Torvalds's avatar
Linus Torvalds committed
1128 1129 1130 1131 1132
	int coalesced, filled = 0;
	unsigned long flags;

	DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);

Matthew Wilcox's avatar
Matthew Wilcox committed
1133 1134 1135
	ioc = GET_IOC(dev);
	ASSERT(ioc);

Linus Torvalds's avatar
Linus Torvalds committed
1136 1137
	/* Fast path single entry scatterlists. */
	if (nents == 1) {
Matthew Wilcox's avatar
Matthew Wilcox committed
1138 1139
		sg_dma_address(sglist) = sba_map_single(dev,
						(void *)sg_virt_addr(sglist),
Linus Torvalds's avatar
Linus Torvalds committed
1140
						sglist->length, direction);
Matthew Wilcox's avatar
Matthew Wilcox committed
1141
		sg_dma_len(sglist)     = sglist->length;
Linus Torvalds's avatar
Linus Torvalds committed
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
		return 1;
	}

	spin_lock_irqsave(&ioc->res_lock, flags);

#ifdef ASSERT_PDIR_SANITY
	if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
	{
		sba_dump_sg(ioc, sglist, nents);
		panic("Check before sba_map_sg()");
	}
#endif

1155
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	ioc->msg_calls++;
#endif

	/*
	** First coalesce the chunks and allocate I/O pdir space
	**
	** If this is one DMA stream, we can properly map using the
	** correct virtual address associated with each DMA page.
	** w/o this association, we wouldn't have coherent DMA!
	** Access to the virtual address is what forces a two pass algorithm.
	*/
Matthew Wilcox's avatar
Matthew Wilcox committed
1167
	coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range);
Linus Torvalds's avatar
Linus Torvalds committed
1168 1169 1170 1171 1172 1173 1174 1175 1176

	/*
	** Program the I/O Pdir
	**
	** map the virtual addresses to the I/O Pdir
	** o dma_address will contain the pdir index
	** o dma_len will contain the number of bytes to map 
	** o address contains the virtual address.
	*/
Matthew Wilcox's avatar
Matthew Wilcox committed
1177
	filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
Linus Torvalds's avatar
Linus Torvalds committed
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195

#ifdef ASSERT_PDIR_SANITY
	if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
	{
		sba_dump_sg(ioc, sglist, nents);
		panic("Check after sba_map_sg()\n");
	}
#endif

	spin_unlock_irqrestore(&ioc->res_lock, flags);

	ASSERT(coalesced == filled);
	DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);

	return filled;
}


Matthew Wilcox's avatar
Matthew Wilcox committed
1196 1197 1198 1199 1200 1201 1202 1203 1204
/**
 * sba_unmap_sg - unmap Scatter/Gather list
 * @dev: instance of PCI owned by the driver that's asking.
 * @sglist:  array of buffer/length pairs
 * @nents:  number of entries in list
 * @direction:  R/W or both.
 *
 * See Documentation/DMA-mapping.txt
 */
Linus Torvalds's avatar
Linus Torvalds committed
1205
static void 
Matthew Wilcox's avatar
Matthew Wilcox committed
1206 1207
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
	     enum dma_data_direction direction)
Linus Torvalds's avatar
Linus Torvalds committed
1208
{
Matthew Wilcox's avatar
Matthew Wilcox committed
1209
	struct ioc *ioc;
Linus Torvalds's avatar
Linus Torvalds committed
1210 1211 1212 1213 1214
#ifdef ASSERT_PDIR_SANITY
	unsigned long flags;
#endif

	DBG_RUN_SG("%s() START %d entries,  %p,%x\n",
Matthew Wilcox's avatar
Matthew Wilcox committed
1215 1216 1217 1218
		__FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);

	ioc = GET_IOC(dev);
	ASSERT(ioc);
Linus Torvalds's avatar
Linus Torvalds committed
1219

1220
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
	ioc->usg_calls++;
#endif

#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	sba_check_pdir(ioc,"Check before sba_unmap_sg()");
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif

	while (sg_dma_len(sglist) && nents--) {

Matthew Wilcox's avatar
Matthew Wilcox committed
1232
		sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
1233
#ifdef SBA_COLLECT_STATS
Matthew Wilcox's avatar
Matthew Wilcox committed
1234 1235
		ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
		ioc->usingle_calls--;	/* kluge since call is unmap_sg() */
Linus Torvalds's avatar
Linus Torvalds committed
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
#endif
		++sglist;
	}

	DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__,  nents);

#ifdef ASSERT_PDIR_SANITY
	spin_lock_irqsave(&ioc->res_lock, flags);
	sba_check_pdir(ioc,"Check after sba_unmap_sg()");
	spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif

}

Matthew Wilcox's avatar
Matthew Wilcox committed
1250 1251 1252 1253 1254 1255 1256 1257 1258
static struct hppa_dma_ops sba_ops = {
	.dma_supported =	sba_dma_supported,
	.alloc_consistent =	sba_alloc_consistent,
	.alloc_noncoherent =	sba_alloc_consistent,
	.free_consistent =	sba_free_consistent,
	.map_single =		sba_map_single,
	.unmap_single =		sba_unmap_single,
	.map_sg =		sba_map_sg,
	.unmap_sg =		sba_unmap_sg,
1259 1260 1261 1262
	.dma_sync_single_for_cpu =	NULL,
	.dma_sync_single_for_device =	NULL,
	.dma_sync_sg_for_cpu =		NULL,
	.dma_sync_sg_for_device =	NULL,
Linus Torvalds's avatar
Linus Torvalds committed
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
};


/**************************************************************************
**
**   SBA PAT PDC support
**
**   o call pdc_pat_cell_module()
**   o store ranges in PCI "resource" structures
**
**************************************************************************/

static void
sba_get_pat_resources(struct sba_device *sba_dev)
{
#if 0
/*
** TODO/REVISIT/FIXME: support for directed ranges requires calls to
**      PAT PDC to program the SBA/LBA directed range registers...this
**      burden may fall on the LBA code since it directly supports the
**      PCI subsystem. It's not clear yet. - ggg
*/
PAT_MOD(mod)->mod_info.mod_pages   = PAT_GET_MOD_PAGES(temp);
	FIXME : ???
PAT_MOD(mod)->mod_info.dvi         = PAT_GET_DVI(temp);
	Tells where the dvi bits are located in the address.
PAT_MOD(mod)->mod_info.ioc         = PAT_GET_IOC(temp);
	FIXME : ???
#endif
}


/**************************************************************
*
*   Initialization and claim
*
***************************************************************/
Matthew Wilcox's avatar
Matthew Wilcox committed
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
#define PIRANHA_ADDR_MASK	0x00160000UL /* bit 17,18,20 */
#define PIRANHA_ADDR_VAL	0x00060000UL /* bit 17,18 on */
static void *
sba_alloc_pdir(unsigned int pdir_size)
{
        unsigned long pdir_base;
	unsigned long pdir_order = get_order(pdir_size);

	pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
	if (NULL == (void *) pdir_base)
		panic("sba_ioc_init() could not allocate I/O Page Table\n");

	/* If this is not PA8700 (PCX-W2)
	**	OR newer than ver 2.2
	**	OR in a system that doesn't need VINDEX bits from SBA,
	**
	** then we aren't exposed to the HW bug.
	*/
	if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
			|| (boot_cpu_data.pdc.versions > 0x202)
			|| (boot_cpu_data.pdc.capabilities & 0x08L) )
		return (void *) pdir_base;

	/*
	 * PA8700 (PCX-W2, aka piranha) silent data corruption fix
	 *
	 * An interaction between PA8700 CPU (Ver 2.2 or older) and
	 * Ike/Astro can cause silent data corruption. This is only
	 * a problem if the I/O PDIR is located in memory such that
	 * (little-endian)  bits 17 and 18 are on and bit 20 is off.
	 *
	 * Since the max IO Pdir size is 2MB, by cleverly allocating the
	 * right physical address, we can either avoid (IOPDIR <= 1MB)
	 * or minimize (2MB IO Pdir) the problem if we restrict the
	 * IO Pdir to a maximum size of 2MB-128K (1902K).
	 *
	 * Because we always allocate 2^N sized IO pdirs, either of the
	 * "bad" regions will be the last 128K if at all. That's easy
	 * to test for.
	 * 
	 */
	if (pdir_order <= (19-12)) {
		if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
			/* allocate a new one on 512k alignment */
			unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
			/* release original */
			free_pages(pdir_base, pdir_order);

			pdir_base = new_pdir;

			/* release excess */
			while (pdir_order < (19-12)) {
				new_pdir += pdir_size;
				free_pages(new_pdir, pdir_order);
				pdir_order +=1;
				pdir_size <<=1;
			}
		}
	} else {
		/*
		** 1MB or 2MB Pdir
		** Needs to be aligned on an "odd" 1MB boundary.
		*/
		unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */

		/* release original */
		free_pages( pdir_base, pdir_order);

		/* release first 1MB */
		free_pages(new_pdir, 20-12);

		pdir_base = new_pdir + 1024*1024;

		if (pdir_order > (20-12)) {
			/*
			** 2MB Pdir.
			**
			** Flag tells init_bitmap() to mark bad 128k as used
			** and to reduce the size by 128k.
			*/
			piranha_bad_128k = 1;

			new_pdir += 3*1024*1024;
			/* release last 1MB */
			free_pages(new_pdir, 20-12);

			/* release unusable 128KB */
			free_pages(new_pdir - 128*1024 , 17-12);

			pdir_size -= 128*1024;
		}
	}

	memset((void *) pdir_base, 0, pdir_size);
	return (void *) pdir_base;
}
Linus Torvalds's avatar
Linus Torvalds committed
1396

Matthew Wilcox's avatar
Matthew Wilcox committed
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
static void
sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
{
        /* lba_set_iregs() is in arch/parisc/kernel/lba_pci.c */
        extern void lba_set_iregs(struct parisc_device *, u32, u32);

	u32 iova_space_mask;
	u32 iova_space_size;
	int iov_order, tcnfg;
	struct parisc_device *lba;
#if SBA_AGP_SUPPORT
	int agp_found = 0;
#endif
	/*
	** Firmware programs the base and size of a "safe IOVA space"
	** (one that doesn't overlap memory or LMMIO space) in the
	** IBASE and IMASK registers.
	*/
	ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
	iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;

	if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
		printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
		iova_space_size /= 2;
	}

	/*
	** iov_order is always based on a 1GB IOVA space since we want to
	** turn on the other half for AGP GART.
	*/
	iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
	ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);

	DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits)\n",
		__FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20,
		iov_order + PAGE_SHIFT);

	ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
						   get_order(ioc->pdir_size));
	if (!ioc->pdir_base)
		panic("Couldn't allocate I/O Page Table\n");

	memset(ioc->pdir_base, 0, ioc->pdir_size);

	DBG_INIT("%s() pdir %p size %x\n",
			__FUNCTION__, ioc->pdir_base, ioc->pdir_size);

#if SBA_HINT_SUPPORT
	ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
	ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));

	DBG_INIT("	hint_shift_pdir %x hint_mask_pdir %lx\n",
		ioc->hint_shift_pdir, ioc->hint_mask_pdir);
#endif

	ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
	WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);

	/* build IMASK for IOC and Elroy */
	iova_space_mask =  0xffffffff;
	iova_space_mask <<= (iov_order + PAGE_SHIFT);
	ioc->imask = iova_space_mask;
#ifdef ZX1_SUPPORT
	ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
#endif
	sba_dump_tlb(ioc->ioc_hpa);

	/*
	** setup Mercury IBASE/IMASK registers as well.
	*/
	for (lba = sba->child; lba; lba = lba->sibling) {
		int rope_num = (lba->hpa >> 13) & 0xf;
		if (rope_num >> 3 == ioc_num)
			lba_set_iregs(lba, ioc->ibase, ioc->imask);
	}

	WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);

#ifdef __LP64__
	/*
	** Setting the upper bits makes checking for bypass addresses
	** a little faster later on.
	*/
	ioc->imask |= 0xFFFFFFFF00000000UL;
#endif

	/* Set I/O PDIR Page size to system page size */
	switch (PAGE_SHIFT) {
		case 12: tcnfg = 0; break;	/*  4K */
		case 13: tcnfg = 1; break;	/*  8K */
		case 14: tcnfg = 2; break;	/* 16K */
		case 16: tcnfg = 3; break;	/* 64K */
		default:
			panic(__FILE__ "Unsupported system page size %d",
				1 << PAGE_SHIFT);
			break;
	}
	WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);

	/*
	** Program the IOC's ibase and enable IOVA translation
	** Bit zero == enable bit.
	*/
	WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);

	/*
	** Clear I/O TLB of any possible entries.
	** (Yes. This is a bit paranoid...but so what)
	*/
	WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);

#if SBA_AGP_SUPPORT
	/*
	** If an AGP device is present, only use half of the IOV space
	** for PCI DMA.  Unfortunately we can't know ahead of time
	** whether GART support will actually be used, for now we
	** can just key on any AGP device found in the system.
	** We program the next pdir index after we stop w/ a key for
	** the GART code to handshake on.
	*/
	device=NULL;
	for (lba = sba->child; lba; lba = lba->sibling) {
		if (IS_QUICKSILVER(lba))
			break;
	}

	if (lba) {
		DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__);
		ioc->pdir_size /= 2;
		((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE;
	} else {
		DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__);
	}
#endif /* 0 */

}
Linus Torvalds's avatar
Linus Torvalds committed
1533 1534

static void
Matthew Wilcox's avatar
Matthew Wilcox committed
1535
sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
Linus Torvalds's avatar
Linus Torvalds committed
1536
{
Matthew Wilcox's avatar
Matthew Wilcox committed
1537 1538
	/* lba_set_iregs() is in arch/parisc/kernel/lba_pci.c */
	extern void lba_set_iregs(struct parisc_device *, u32, u32);
Linus Torvalds's avatar
Linus Torvalds committed
1539 1540 1541

	u32 iova_space_size, iova_space_mask;
	int pdir_size, iov_order;
Matthew Wilcox's avatar
Matthew Wilcox committed
1542 1543
	unsigned long physmem;
	struct parisc_device *lba;
Linus Torvalds's avatar
Linus Torvalds committed
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557

	/*
	** Determine IOVA Space size from memory size.
	**
	** Ideally, PCI drivers would register the maximum number
	** of DMA they can have outstanding for each device they
	** own.  Next best thing would be to guess how much DMA
	** can be outstanding based on PCI Class/sub-class. Both
	** methods still require some "extra" to support PCI
	** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
	**
	** While we have 32-bits "IOVA" space, top two 2 bits are used
	** for DMA hints - ergo only 30 bits max.
	*/
Matthew Wilcox's avatar
Matthew Wilcox committed
1558 1559 1560 1561

	physmem = num_physpages << PAGE_SHIFT;
	iova_space_size = (u32) (physmem/(sba_mem_ratio*global_ioc_cnt));

Linus Torvalds's avatar
Linus Torvalds committed
1562
	/* limit IOVA space size to 1MB-1GB */
Matthew Wilcox's avatar
Matthew Wilcox committed
1563
	if (iova_space_size < 1024*1024) {
Linus Torvalds's avatar
Linus Torvalds committed
1564
		iova_space_size = 1024*1024;
Matthew Wilcox's avatar
Matthew Wilcox committed
1565
	}
Linus Torvalds's avatar
Linus Torvalds committed
1566
#ifdef __LP64__
Matthew Wilcox's avatar
Matthew Wilcox committed
1567
	else if (iova_space_size > 512*1024*1024) {
Linus Torvalds's avatar
Linus Torvalds committed
1568 1569
		iova_space_size = 512*1024*1024;
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
1570
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1571 1572 1573 1574

	/*
	** iova space must be log2() in size.
	** thus, pdir/res_map will also be log2().
Matthew Wilcox's avatar
Matthew Wilcox committed
1575
	** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced)
Linus Torvalds's avatar
Linus Torvalds committed
1576 1577 1578 1579 1580 1581 1582 1583
	*/
	iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT));
	ASSERT(iov_order <= (30 - IOVP_SHIFT));   /* iova_space_size <= 1GB */
	ASSERT(iov_order >= (20 - IOVP_SHIFT));   /* iova_space_size >= 1MB */
	iova_space_size = 1 << (iov_order + IOVP_SHIFT);

	ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);

Matthew Wilcox's avatar
Matthew Wilcox committed
1584
	ASSERT(pdir_size < 4*1024*1024);   /* max pdir size == 2MB */
Linus Torvalds's avatar
Linus Torvalds committed
1585 1586 1587 1588

	/* Verify it's a power of two */
	ASSERT((1 << get_order(pdir_size)) == (pdir_size >> PAGE_SHIFT));

Matthew Wilcox's avatar
Matthew Wilcox committed
1589 1590
	DBG_INIT("%s() hpa 0x%lx mem %dMB IOV %dMB (%d bits) PDIR size 0x%0x\n",
		__FUNCTION__, ioc->ioc_hpa, (int) (physmem>>20),
Linus Torvalds's avatar
Linus Torvalds committed
1591 1592
		iova_space_size>>20, iov_order + PAGE_SHIFT, pdir_size);

Matthew Wilcox's avatar
Matthew Wilcox committed
1593 1594 1595 1596 1597 1598
	ioc->pdir_base = sba_alloc_pdir(pdir_size);

	DBG_INIT("%s() pdir %p size %x\n",
			__FUNCTION__, ioc->pdir_base, pdir_size);

#if SBA_HINT_SUPPORT
Linus Torvalds's avatar
Linus Torvalds committed
1599 1600 1601 1602
	/* FIXME : DMA HINTs not used */
	ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
	ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));

Matthew Wilcox's avatar
Matthew Wilcox committed
1603 1604 1605
	DBG_INIT("	hint_shift_pdir %x hint_mask_pdir %lx\n",
			ioc->hint_shift_pdir, ioc->hint_mask_pdir);
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1606

Matthew Wilcox's avatar
Matthew Wilcox committed
1607 1608
	ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
	WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
Linus Torvalds's avatar
Linus Torvalds committed
1609 1610 1611 1612 1613 1614 1615 1616 1617

	/* build IMASK for IOC and Elroy */
	iova_space_mask =  0xffffffff;
	iova_space_mask <<= (iov_order + PAGE_SHIFT);

	/*
	** On C3000 w/512MB mem, HP-UX 10.20 reports:
	**     ibase=0, imask=0xFE000000, size=0x2000000.
	*/
Matthew Wilcox's avatar
Matthew Wilcox committed
1618
	ioc->ibase = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1619
	ioc->imask = iova_space_mask;	/* save it */
Matthew Wilcox's avatar
Matthew Wilcox committed
1620 1621 1622
#ifdef ZX1_SUPPORT
	ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1623

Matthew Wilcox's avatar
Matthew Wilcox committed
1624 1625
	DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
		__FUNCTION__, ioc->ibase, ioc->imask);
Linus Torvalds's avatar
Linus Torvalds committed
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635

	/*
	** FIXME: Hint registers are programmed with default hint
	** values during boot, so hints should be sane even if we
	** can't reprogram them the way drivers want.
	*/

	/*
	** setup Elroy IBASE/IMASK registers as well.
	*/
Matthew Wilcox's avatar
Matthew Wilcox committed
1636 1637 1638 1639 1640
	for (lba = sba->child; lba; lba = lba->sibling) {
		int rope_num = (lba->hpa >> 13) & 0xf;
		if (rope_num >> 3 == ioc_num)
			lba_set_iregs(lba, ioc->ibase, ioc->imask);
	}
Linus Torvalds's avatar
Linus Torvalds committed
1641 1642 1643 1644

	/*
	** Program the IOC's ibase and enable IOVA translation
	*/
Matthew Wilcox's avatar
Matthew Wilcox committed
1645
	WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
Matthew Wilcox's avatar
Matthew Wilcox committed
1646
	WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
Linus Torvalds's avatar
Linus Torvalds committed
1647 1648

	/* Set I/O PDIR Page size to 4K */
Matthew Wilcox's avatar
Matthew Wilcox committed
1649
	WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
Linus Torvalds's avatar
Linus Torvalds committed
1650 1651 1652

	/*
	** Clear I/O TLB of any possible entries.
Matthew Wilcox's avatar
Matthew Wilcox committed
1653
	** (Yes. This is a bit paranoid...but so what)
Linus Torvalds's avatar
Linus Torvalds committed
1654
	*/
Matthew Wilcox's avatar
Matthew Wilcox committed
1655
	WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
Linus Torvalds's avatar
Linus Torvalds committed
1656

Matthew Wilcox's avatar
Matthew Wilcox committed
1657 1658
	ioc->ibase = 0; /* used by SBA_IOVA and related macros */	

Linus Torvalds's avatar
Linus Torvalds committed
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
	DBG_INIT("%s() DONE\n", __FUNCTION__);
}



/**************************************************************************
**
**   SBA initialization code (HW and SW)
**
**   o identify SBA chip itself
**   o initialize SBA chip modes (HardFail)
**   o initialize SBA chip modes (HardFail)
**   o FIXME: initialize DMA hints for reasonable defaults
**
**************************************************************************/

static void
sba_hw_init(struct sba_device *sba_dev)
{ 
	int i;
	int num_ioc;
Matthew Wilcox's avatar
Matthew Wilcox committed
1680
	u64 ioc_ctl;
Linus Torvalds's avatar
Linus Torvalds committed
1681

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
	if (!is_pdc_pat()) {
		/* Shutdown the USB controller on Astro-based workstations.
		** Once we reprogram the IOMMU, the next DMA performed by
		** USB will HPMC the box.
		*/
		pdc_io_reset_devices();

		/*
		** XXX May need something more sophisticated to deal
		**     with DMA from LAN. Maybe use page zero boot device
		**     as a handle to talk to PDC about which device to
		**     shutdown. This also needs to work for is_pdc_pat(). 
		*/
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
1697 1698 1699 1700 1701 1702
	if (!IS_PLUTO(sba_dev->iodc)) {
		ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
		DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
			__FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
		ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
		ioc_ctl |= IOC_CTRL_TC;	/* Astro: firmware enables this */
Linus Torvalds's avatar
Linus Torvalds committed
1703

Matthew Wilcox's avatar
Matthew Wilcox committed
1704
		WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
Linus Torvalds's avatar
Linus Torvalds committed
1705

Matthew Wilcox's avatar
Matthew Wilcox committed
1706
#ifdef DEBUG_SBA_INIT
Matthew Wilcox's avatar
Matthew Wilcox committed
1707 1708
		ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
		DBG_INIT(" 0x%Lx\n", ioc_ctl);
Linus Torvalds's avatar
Linus Torvalds committed
1709
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
1710
	} /* if !PLUTO */
Linus Torvalds's avatar
Linus Torvalds committed
1711 1712

	if (IS_ASTRO(sba_dev->iodc)) {
1713
		int err;
Linus Torvalds's avatar
Linus Torvalds committed
1714
		/* PAT_PDC (L-class) also reports the same goofy base */
Matthew Wilcox's avatar
Matthew Wilcox committed
1715
		sba_dev->ioc[0].ioc_hpa = ASTRO_IOC_OFFSET;
Linus Torvalds's avatar
Linus Torvalds committed
1716
		num_ioc = 1;
1717 1718 1719 1720 1721 1722 1723 1724 1725

		sba_dev->chip_resv.name = "Astro Intr Ack";
		sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
		sba_dev->chip_resv.end   = PCI_F_EXTEND | (0xff000000UL - 1) ;
		err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
		if (err < 0) {
			BUG();
		}

Matthew Wilcox's avatar
Matthew Wilcox committed
1726
	} else if (IS_PLUTO(sba_dev->iodc)) {
1727 1728
		int err;

Matthew Wilcox's avatar
Matthew Wilcox committed
1729 1730 1731 1732 1733 1734
		/* We use a negative value for IOC HPA so it gets 
                 * corrected when we add it with IKE's IOC offset.
		 * Doesnt look clean, but fewer code. 
                 */
		sba_dev->ioc[0].ioc_hpa = -PLUTO_IOC_OFFSET;
		num_ioc = 1;
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746

		sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
		sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
		sba_dev->chip_resv.end   = PCI_F_EXTEND | (0xff200000UL - 1);
		err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
		BUG_ON(err < 0);

		sba_dev->iommu_resv.name = "IOVA Space";
		sba_dev->iommu_resv.start = 0x40000000UL;
		sba_dev->iommu_resv.end   = 0x50000000UL - 1;
		err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
		BUG_ON(err < 0);
Linus Torvalds's avatar
Linus Torvalds committed
1747
	} else {
1748
		/* IS_IKE (ie N-class, L3000, L1500) */
Linus Torvalds's avatar
Linus Torvalds committed
1749 1750
		sba_dev->ioc[0].ioc_hpa = sba_dev->ioc[1].ioc_hpa = 0;
		num_ioc = 2;
1751 1752

		/* TODO - LOOKUP Ike/Stretch chipset mem map */
Linus Torvalds's avatar
Linus Torvalds committed
1753 1754 1755
	}

	sba_dev->num_ioc = num_ioc;
Matthew Wilcox's avatar
Matthew Wilcox committed
1756 1757
	for (i = 0; i < num_ioc; i++) {
		sba_dev->ioc[i].ioc_hpa += sba_dev->sba_hpa + IKE_IOC_OFFSET(i);
Linus Torvalds's avatar
Linus Torvalds committed
1758 1759 1760 1761

		/*
		** Make sure the box crashes if we get any errors on a rope.
		*/
Matthew Wilcox's avatar
Matthew Wilcox committed
1762 1763 1764 1765 1766 1767 1768 1769
		WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL);
		WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL);
		WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL);
		WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL);
		WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL);
		WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL);
		WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL);
		WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
Linus Torvalds's avatar
Linus Torvalds committed
1770 1771

		/* flush out the writes */
Matthew Wilcox's avatar
Matthew Wilcox committed
1772
		READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
Linus Torvalds's avatar
Linus Torvalds committed
1773

Matthew Wilcox's avatar
Matthew Wilcox committed
1774 1775 1776 1777 1778
		if (IS_PLUTO(sba_dev->iodc)) {
			sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
		} else {
			sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
		}
Linus Torvalds's avatar
Linus Torvalds committed
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
	}
}

static void
sba_common_init(struct sba_device *sba_dev)
{
	int i;

	/* add this one to the head of the list (order doesn't matter)
	** This will be useful for debugging - especially if we get coredumps
	*/
	sba_dev->next = sba_list;
	sba_list = sba_dev;

	for(i=0; i< sba_dev->num_ioc; i++) {
		int res_size;
Matthew Wilcox's avatar
Matthew Wilcox committed
1795
#ifdef DEBUG_DMB_TRAP
Linus Torvalds's avatar
Linus Torvalds committed
1796 1797 1798 1799 1800 1801 1802
		extern void iterate_pages(unsigned long , unsigned long ,
					  void (*)(pte_t * , unsigned long),
					  unsigned long );
		void set_data_memory_break(pte_t * , unsigned long);
#endif
		/* resource map size dictated by pdir_size */
		res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
Matthew Wilcox's avatar
Matthew Wilcox committed
1803 1804 1805 1806 1807 1808

		/* Second part of PIRANHA BUG */
		if (piranha_bad_128k) {
			res_size -= (128*1024)/sizeof(u64);
		}

Linus Torvalds's avatar
Linus Torvalds committed
1809
		res_size >>= 3;  /* convert bit count to byte count */
Matthew Wilcox's avatar
Matthew Wilcox committed
1810 1811
		DBG_INIT("%s() res_size 0x%x\n",
			__FUNCTION__, res_size);
Linus Torvalds's avatar
Linus Torvalds committed
1812 1813 1814 1815

		sba_dev->ioc[i].res_size = res_size;
		sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));

Matthew Wilcox's avatar
Matthew Wilcox committed
1816
#ifdef DEBUG_DMB_TRAP
Linus Torvalds's avatar
Linus Torvalds committed
1817 1818 1819 1820 1821 1822
		iterate_pages( sba_dev->ioc[i].res_map, res_size,
				set_data_memory_break, 0);
#endif

		if (NULL == sba_dev->ioc[i].res_map)
		{
1823 1824
			panic("%s:%s() could not allocate resource map\n",
			      __FILE__, __FUNCTION__ );
Linus Torvalds's avatar
Linus Torvalds committed
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
		}

		memset(sba_dev->ioc[i].res_map, 0, res_size);
		/* next available IOVP - circular search */
		sba_dev->ioc[i].res_hint = (unsigned long *)
				&(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);

#ifdef ASSERT_PDIR_SANITY
		/* Mark first bit busy - ie no IOVA 0 */
		sba_dev->ioc[i].res_map[0] = 0x80;
		sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
		/* Third (and last) part of PIRANHA BUG */
		if (piranha_bad_128k) {
			/* region from +1408K to +1536 is un-usable. */

			int idx_start = (1408*1024/sizeof(u64)) >> 3;
			int idx_end   = (1536*1024/sizeof(u64)) >> 3;
			long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
			long *p_end   = (long *) &(sba_dev->ioc[i].res_map[idx_end]);

			/* mark that part of the io pdir busy */
			while (p_start < p_end)
				*p_start++ = -1;
				
		}

#ifdef DEBUG_DMB_TRAP
Linus Torvalds's avatar
Linus Torvalds committed
1854 1855 1856 1857 1858 1859
		iterate_pages( sba_dev->ioc[i].res_map, res_size,
				set_data_memory_break, 0);
		iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
				set_data_memory_break, 0);
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
1860 1861
		DBG_INIT("%s() %d res_map %x %p\n",
			__FUNCTION__, i, res_size, sba_dev->ioc[i].res_map);
Linus Torvalds's avatar
Linus Torvalds committed
1862 1863 1864
	}

	sba_dev->sba_lock = SPIN_LOCK_UNLOCKED;
Matthew Wilcox's avatar
Matthew Wilcox committed
1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
	ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;

#ifdef DEBUG_SBA_INIT
	/*
	 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
	 * (bit #61, big endian), we have to flush and sync every time
	 * IO-PDIR is changed in Ike/Astro.
	 */
	if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) {
		printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
	} else {
		printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
	}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1879 1880 1881 1882 1883 1884
}

#ifdef CONFIG_PROC_FS
static int sba_proc_info(char *buf, char **start, off_t offset, int len)
{
	struct sba_device *sba_dev = sba_list;
Matthew Wilcox's avatar
Matthew Wilcox committed
1885
	struct ioc *ioc = &sba_dev->ioc[0];	/* FIXME: Multi-IOC support! */
Linus Torvalds's avatar
Linus Torvalds committed
1886
	int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1887
	unsigned long i;
1888
#ifdef SBA_COLLECT_STATS
1889
	unsigned long avg = 0, min, max;
1890
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1891 1892

	sprintf(buf, "%s rev %d.%d\n",
Matthew Wilcox's avatar
Matthew Wilcox committed
1893
		sba_dev->name,
Linus Torvalds's avatar
Linus Torvalds committed
1894 1895 1896 1897 1898
		(sba_dev->hw_rev & 0x7) + 1,
		(sba_dev->hw_rev & 0x18) >> 3
		);
	sprintf(buf, "%sIO PDIR size    : %d bytes (%d entries)\n",
		buf,
Matthew Wilcox's avatar
Matthew Wilcox committed
1899 1900
		(int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
		total_pages);
Linus Torvalds's avatar
Linus Torvalds committed
1901

1902 1903 1904
	sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", 
		buf, ioc->res_size, ioc->res_size << 3);   /* 8 bits per byte */

1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
	sprintf(buf, "%sLMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
		buf,
		READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
		READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
		READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)
		);

	for (i=0; i<4; i++)
		sprintf(buf, "%sDIR%ld_BASE/MASK/ROUTE %08x %08x %08x\n",
			buf, i,
			READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE  + i*0x18),
			READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK  + i*0x18),
			READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)
		);

1920
#ifdef SBA_COLLECT_STATS
Linus Torvalds's avatar
Linus Torvalds committed
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
	sprintf(buf, "%sIO PDIR entries : %ld free  %ld used (%d%%)\n", buf,
		total_pages - ioc->used_pages, ioc->used_pages,
		(int) (ioc->used_pages * 100 / total_pages));

	min = max = ioc->avg_search[0];
	for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
		avg += ioc->avg_search[i];
		if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
		if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
	}
	avg /= SBA_SEARCH_SAMPLE;
	sprintf(buf, "%s  Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
		buf, min, avg, max);

Matthew Wilcox's avatar
Matthew Wilcox committed
1935
	sprintf(buf, "%spci_map_single(): %12ld calls  %12ld pages (avg %d/1000)\n",
Linus Torvalds's avatar
Linus Torvalds committed
1936 1937 1938 1939
		buf, ioc->msingle_calls, ioc->msingle_pages,
		(int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));

	/* KLUGE - unmap_sg calls unmap_single for each mapped page */
Matthew Wilcox's avatar
Matthew Wilcox committed
1940
	min = ioc->usingle_calls;
Linus Torvalds's avatar
Linus Torvalds committed
1941
	max = ioc->usingle_pages - ioc->usg_pages;
Matthew Wilcox's avatar
Matthew Wilcox committed
1942
	sprintf(buf, "%spci_unmap_single: %12ld calls  %12ld pages (avg %d/1000)\n",
Linus Torvalds's avatar
Linus Torvalds committed
1943 1944 1945
		buf, min, max,
		(int) ((max * 1000)/min));

Matthew Wilcox's avatar
Matthew Wilcox committed
1946
	sprintf(buf, "%spci_map_sg()    : %12ld calls  %12ld pages (avg %d/1000)\n",
Linus Torvalds's avatar
Linus Torvalds committed
1947 1948 1949
		buf, ioc->msg_calls, ioc->msg_pages,
		(int) ((ioc->msg_pages * 1000)/ioc->msg_calls));

Matthew Wilcox's avatar
Matthew Wilcox committed
1950
	sprintf(buf, "%spci_unmap_sg()  : %12ld calls  %12ld pages (avg %d/1000)\n",
Linus Torvalds's avatar
Linus Torvalds committed
1951 1952
		buf, ioc->usg_calls, ioc->usg_pages,
		(int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1953
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1954 1955 1956 1957

	return strlen(buf);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
1958 1959
#if 0
/* XXX too much output - exceeds 4k limit and needs to be re-written */
Linus Torvalds's avatar
Linus Torvalds committed
1960 1961 1962 1963
static int
sba_resource_map(char *buf, char **start, off_t offset, int len)
{
	struct sba_device *sba_dev = sba_list;
Matthew Wilcox's avatar
Matthew Wilcox committed
1964 1965
	struct ioc *ioc = &sba_dev->ioc[0];	/* FIXME: Mutli-IOC suppoer! */
	unsigned int *res_ptr = (unsigned int *)ioc->res_map;
Linus Torvalds's avatar
Linus Torvalds committed
1966 1967
	int i;

Matthew Wilcox's avatar
Matthew Wilcox committed
1968 1969
	buf[0] = '\0';
	for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) {
Linus Torvalds's avatar
Linus Torvalds committed
1970 1971
		if ((i & 7) == 0)
		    strcat(buf,"\n   ");
Matthew Wilcox's avatar
Matthew Wilcox committed
1972
		sprintf(buf, "%s %08x", buf, *res_ptr);
Linus Torvalds's avatar
Linus Torvalds committed
1973 1974 1975 1976 1977
	}
	strcat(buf, "\n");

	return strlen(buf);
}
Matthew Wilcox's avatar
Matthew Wilcox committed
1978 1979 1980 1981 1982 1983 1984 1985
#endif /* 0 */
#endif /* CONFIG_PROC_FS */

static struct parisc_device_id sba_tbl[] = {
	{ HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
	{ HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
	{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
	{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
Matthew Wilcox's avatar
Matthew Wilcox committed
1986
	{ HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
Matthew Wilcox's avatar
Matthew Wilcox committed
1987 1988 1989 1990 1991
/* These two entries commented out because we don't find them in a
 * buswalk yet.  If/when we do, they would cause us to think we had
 * many more SBAs then we really do.
 *	{ HPHW_BCPORT, HVERSION_REV_ANY_ID, ASTRO_ROPES_PORT, 0xc },
 *	{ HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_ROPES_PORT, 0xc },
Matthew Wilcox's avatar
Matthew Wilcox committed
1992 1993 1994 1995
 */
/* We shall also comment out Pluto Ropes Port since bus walk doesnt
 * report it yet. 
 *	{ HPHW_BCPORT, HVERSION_REV_ANY_ID, PLUTO_ROPES_PORT, 0xc },
Matthew Wilcox's avatar
Matthew Wilcox committed
1996 1997 1998 1999 2000 2001 2002
 */
	{ 0, }
};

int sba_driver_callback(struct parisc_device *);

static struct parisc_driver sba_driver = {
Matthew Wilcox's avatar
Matthew Wilcox committed
2003 2004 2005
	.name =		MODULE_NAME,
	.id_table =	sba_tbl,
	.probe =	sba_driver_callback,
Matthew Wilcox's avatar
Matthew Wilcox committed
2006
};
Linus Torvalds's avatar
Linus Torvalds committed
2007 2008

/*
2009
** Determine if sba should claim this chip (return 0) or not (return 1).
Linus Torvalds's avatar
Linus Torvalds committed
2010 2011 2012 2013
** If so, initialize the chip and tell other partners in crime they
** have work to do.
*/
int
Matthew Wilcox's avatar
Matthew Wilcox committed
2014
sba_driver_callback(struct parisc_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
2015 2016 2017 2018
{
	struct sba_device *sba_dev;
	u32 func_class;
	int i;
Matthew Wilcox's avatar
Matthew Wilcox committed
2019
	char *version;
Linus Torvalds's avatar
Linus Torvalds committed
2020

Matthew Wilcox's avatar
Matthew Wilcox committed
2021 2022 2023 2024 2025 2026 2027
	sba_dump_ranges(dev->hpa);

	/* Read HW Rev First */
	func_class = READ_REG(dev->hpa + SBA_FCLASS);

	if (IS_ASTRO(&dev->id)) {
		unsigned long fclass;
Linus Torvalds's avatar
Linus Torvalds committed
2028 2029
		static char astro_rev[]="Astro ?.?";

Matthew Wilcox's avatar
Matthew Wilcox committed
2030 2031
		/* Astro is broken...Read HW Rev First */
		fclass = READ_REG(dev->hpa);
Linus Torvalds's avatar
Linus Torvalds committed
2032

Matthew Wilcox's avatar
Matthew Wilcox committed
2033 2034 2035
		astro_rev[6] = '1' + (char) (fclass & 0x7);
		astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
		version = astro_rev;
Linus Torvalds's avatar
Linus Torvalds committed
2036

Matthew Wilcox's avatar
Matthew Wilcox committed
2037
	} else if (IS_IKE(&dev->id)) {
Matthew Wilcox's avatar
Matthew Wilcox committed
2038
		static char ike_rev[] = "Ike rev ?";
Linus Torvalds's avatar
Linus Torvalds committed
2039
		ike_rev[8] = '0' + (char) (func_class & 0xff);
Matthew Wilcox's avatar
Matthew Wilcox committed
2040
		version = ike_rev;
Matthew Wilcox's avatar
Matthew Wilcox committed
2041 2042 2043 2044 2045
	} else if (IS_PLUTO(&dev->id)) {
		static char pluto_rev[]="Pluto ?.?";
		pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); 
		pluto_rev[8] = '0' + (char) (func_class & 0x0f); 
		version = pluto_rev;
Matthew Wilcox's avatar
Matthew Wilcox committed
2046
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
2047
		static char reo_rev[] = "REO rev ?";
Matthew Wilcox's avatar
Matthew Wilcox committed
2048 2049 2050 2051 2052 2053 2054
		reo_rev[8] = '0' + (char) (func_class & 0xff);
		version = reo_rev;
	}

	if (!global_ioc_cnt) {
		global_ioc_cnt = count_parisc_driver(&sba_driver);

Matthew Wilcox's avatar
Matthew Wilcox committed
2055 2056
		/* Astro and Pluto have one IOC per SBA */
		if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id)))
Matthew Wilcox's avatar
Matthew Wilcox committed
2057
			global_ioc_cnt *= 2;
Linus Torvalds's avatar
Linus Torvalds committed
2058 2059
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
2060 2061 2062
	printk(KERN_INFO "%s found %s at 0x%lx\n",
		MODULE_NAME, version, dev->hpa);

Linus Torvalds's avatar
Linus Torvalds committed
2063
	sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL);
Matthew Wilcox's avatar
Matthew Wilcox committed
2064 2065
	if (NULL == sba_dev) {
		printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
Linus Torvalds's avatar
Linus Torvalds committed
2066 2067
		return(1);
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
2068 2069

	dev->sysdata = (void *) sba_dev;
Linus Torvalds's avatar
Linus Torvalds committed
2070
	memset(sba_dev, 0, sizeof(struct sba_device));
Matthew Wilcox's avatar
Matthew Wilcox committed
2071

Linus Torvalds's avatar
Linus Torvalds committed
2072 2073 2074
	for(i=0; i<MAX_IOC; i++)
		spin_lock_init(&(sba_dev->ioc[i].res_lock));

Matthew Wilcox's avatar
Matthew Wilcox committed
2075
	sba_dev->dev = dev;
Linus Torvalds's avatar
Linus Torvalds committed
2076
	sba_dev->hw_rev = func_class;
Matthew Wilcox's avatar
Matthew Wilcox committed
2077 2078 2079
	sba_dev->iodc = &dev->id;
	sba_dev->name = dev->name;
	sba_dev->sba_hpa = dev->hpa;  /* faster access */
Linus Torvalds's avatar
Linus Torvalds committed
2080 2081 2082 2083 2084 2085 2086 2087

	sba_get_pat_resources(sba_dev);
	sba_hw_init(sba_dev);
	sba_common_init(sba_dev);

	hppa_dma_ops = &sba_ops;

#ifdef CONFIG_PROC_FS
Matthew Wilcox's avatar
Matthew Wilcox committed
2088
	if (IS_ASTRO(&dev->id)) {
Linus Torvalds's avatar
Linus Torvalds committed
2089
		create_proc_info_entry("Astro", 0, proc_runway_root, sba_proc_info);
Matthew Wilcox's avatar
Matthew Wilcox committed
2090
	} else if (IS_IKE(&dev->id)) {
Linus Torvalds's avatar
Linus Torvalds committed
2091
		create_proc_info_entry("Ike", 0, proc_runway_root, sba_proc_info);
Matthew Wilcox's avatar
Matthew Wilcox committed
2092 2093
	} else if (IS_PLUTO(&dev->id)) {
		create_proc_info_entry("Pluto", 0, proc_mckinley_root, sba_proc_info);
Matthew Wilcox's avatar
Matthew Wilcox committed
2094 2095
	} else {
		create_proc_info_entry("Reo", 0, proc_runway_root, sba_proc_info);
Linus Torvalds's avatar
Linus Torvalds committed
2096
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
2097
#if 0
Linus Torvalds's avatar
Linus Torvalds committed
2098
	create_proc_info_entry("bitmap", 0, proc_runway_root, sba_resource_map);
Matthew Wilcox's avatar
Matthew Wilcox committed
2099
#endif
Linus Torvalds's avatar
Linus Torvalds committed
2100
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
2101 2102 2103
	parisc_vmerge_boundary = IOVP_SIZE;
	parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG;

Linus Torvalds's avatar
Linus Torvalds committed
2104 2105
	return 0;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121

/*
** One time initialization to let the world know the SBA was found.
** This is the only routine which is NOT static.
** Must be called exactly once before pci_init().
*/
void __init sba_init(void)
{
	register_parisc_driver(&sba_driver);
}


/**
 * sba_get_iommu - Assign the iommu pointer for the pci bus controller.
 * @dev: The parisc device.
 *
2122 2123
 * Returns the appropriate IOMMU data for the given parisc PCI controller.
 * This is cached and used later for PCI DMA Mapping.
Matthew Wilcox's avatar
Matthew Wilcox committed
2124 2125 2126
 */
void * sba_get_iommu(struct parisc_device *pci_hba)
{
2127 2128 2129
	struct parisc_device *sba_dev = parisc_parent(pci_hba);
	struct sba_device *sba = sba_dev->dev.driver_data;
	char t = sba_dev->id.hw_type;
Matthew Wilcox's avatar
Matthew Wilcox committed
2130 2131
	int iocnum = (pci_hba->hw_path >> 3);	/* rope # */

2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
	BUG_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));

	return &(sba->ioc[iocnum]);
}


/**
 * sba_directed_lmmio - return first directed LMMIO range routed to rope
 * @pa_dev: The parisc device.
 * @r: resource PCI host controller wants start/end fields assigned.
 *
 * For the given parisc PCI controller, determine if any direct ranges
 * are routed down the corresponding rope.
 */
void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
{
	struct parisc_device *sba_dev = parisc_parent(pci_hba);
	struct sba_device *sba = sba_dev->dev.driver_data;
	char t = sba_dev->id.hw_type;
	int i;
	int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));  /* rope # */

Matthew Wilcox's avatar
Matthew Wilcox committed
2154 2155 2156
	if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
		BUG();

2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
	r->start = r->end = 0;

	/* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */
	for (i=0; i<4; i++) {
		int base, size;
		unsigned long reg = sba->sba_hpa + i*0x18;

		base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
		if ((base & 1) == 0)
			continue;	/* not enabled */

		size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);

		if ((size & (ROPES_PER_IOC-1)) != rope)
			continue;	/* directed down different rope */
		
		r->start = (base & ~1UL) | PCI_F_EXTEND;
		size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
		r->end = r->start + size;
	}
}


/**
 * sba_distributed_lmmio - return portion of distributed LMMIO range
 * @pa_dev: The parisc device.
 * @r: resource PCI host controller wants start/end fields assigned.
 *
 * For the given parisc PCI controller, return portion of distributed LMMIO
 * range. The distributed LMMIO is always present and it's just a question
 * of the base address and size of the range.
 */
void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
{
	struct parisc_device *sba_dev = parisc_parent(pci_hba);
	struct sba_device *sba = sba_dev->dev.driver_data;
	char t = sba_dev->id.hw_type;
	int base, size;
	int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));  /* rope # */

	if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
		BUG();

	r->start = r->end = 0;

	base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
	if ((base & 1) == 0) {
		BUG();	/* Gah! Distr Range wasn't enabled! */
		return;
	}

	r->start = (base & ~1UL) | PCI_F_EXTEND;

	size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
	r->start += rope * (size + 1);	/* adjust base for this rope */
	r->end = r->start + size;
Matthew Wilcox's avatar
Matthew Wilcox committed
2213
}