Commit 8b6f08ea authored by Paul Mundt's avatar Paul Mundt

Merge branch 'sh/alphaproject' into sh-latest

parents 4ae26f46 8a453cac
......@@ -457,6 +457,9 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.1.30:
- Fix writev() (it kept writing the first segment over and over again
instead of moving onto subsequent segments).
2.1.29:
- Fix a deadlock when mounting read-write.
2.1.28:
......
......@@ -4383,11 +4383,11 @@ F: Documentation/scsi/NinjaSCSI.txt
F: drivers/scsi/nsp32*
NTFS FILESYSTEM
M: Anton Altaparmakov <aia21@cantab.net>
M: Anton Altaparmakov <anton@tuxera.com>
L: linux-ntfs-dev@lists.sourceforge.net
W: http://www.linux-ntfs.org/
W: http://www.tuxera.com/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
S: Maintained
S: Supported
F: Documentation/filesystems/ntfs.txt
F: fs/ntfs/
......
......@@ -3,6 +3,9 @@ menu "Board support"
config SOLUTION_ENGINE
bool
config SH_ALPHA_BOARD
bool
config SH_SOLUTION_ENGINE
bool "SolutionEngine"
select SOLUTION_ENGINE
......@@ -320,6 +323,21 @@ config SH_SH2007
Compact Flash socket, two serial ports and PC-104 bus.
More information at <http://sh2000.sh-linux.org>.
config SH_APSH4A3A
bool "AP-SH4A-3A"
select SH_ALPHA_BOARD
depends on CPU_SUBTYPE_SH7785
help
Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A.
config SH_APSH4AD0A
bool "AP-SH4AD-0A"
select SH_ALPHA_BOARD
select SYS_SUPPORTS_PCI
depends on CPU_SUBTYPE_SH7786
help
Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A.
endmenu
source "arch/sh/boards/mach-r2d/Kconfig"
......
......@@ -13,3 +13,5 @@ obj-$(CONFIG_SH_ESPT) += board-espt.o
obj-$(CONFIG_SH_POLARIS) += board-polaris.o
obj-$(CONFIG_SH_TITAN) += board-titan.o
obj-$(CONFIG_SH_SH7757LCR) += board-sh7757lcr.o
obj-$(CONFIG_SH_APSH4A3A) += board-apsh4a3a.o
obj-$(CONFIG_SH_APSH4AD0A) += board-apsh4ad0a.o
/*
* ALPHAPROJECT AP-SH4A-3A Support.
*
* Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
* Copyright (C) 2008 Yoshihiro Shimoda
* Copyright (C) 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mtd/physmap.h>
#include <linux/smsc911x.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
#include <asm/sizes.h>
#include <asm/clock.h>
static struct mtd_partition nor_flash_partitions[] = {
{
.name = "loader",
.offset = 0x00000000,
.size = 512 * 1024,
},
{
.name = "bootenv",
.offset = MTDPART_OFS_APPEND,
.size = 512 * 1024,
},
{
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = 4 * 1024 * 1024,
},
{
.name = "data",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data nor_flash_data = {
.width = 4,
.parts = nor_flash_partitions,
.nr_parts = ARRAY_SIZE(nor_flash_partitions),
};
static struct resource nor_flash_resources[] = {
[0] = {
.start = 0x00000000,
.end = 0x01000000 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device nor_flash_device = {
.name = "physmap-flash",
.dev = {
.platform_data = &nor_flash_data,
},
.num_resources = ARRAY_SIZE(nor_flash_resources),
.resource = nor_flash_resources,
};
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
.start = 0xA4000000,
.end = 0xA4000000 + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "smsc911x-irq",
.start = evt2irq(0x200),
.end = evt2irq(0x200),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_16BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct platform_device *apsh4a3a_devices[] __initdata = {
&nor_flash_device,
&smsc911x_device,
};
static int __init apsh4a3a_devices_setup(void)
{
return platform_add_devices(apsh4a3a_devices,
ARRAY_SIZE(apsh4a3a_devices));
}
device_initcall(apsh4a3a_devices_setup);
static int apsh4a3a_clk_init(void)
{
struct clk *clk;
int ret;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);
return ret;
}
/* Initialize the board */
static void __init apsh4a3a_setup(char **cmdline_p)
{
printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n");
}
static void __init apsh4a3a_init_irq(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ7654);
}
/* Return the board specific boot mode pin configuration */
static int apsh4a3a_mode_pins(void)
{
int value = 0;
/* These are the factory default settings of SW1 and SW2.
* If you change these dip switches then you will need to
* adjust the values below as well.
*/
value &= ~MODE_PIN0; /* Clock Mode 16 */
value &= ~MODE_PIN1;
value &= ~MODE_PIN2;
value &= ~MODE_PIN3;
value |= MODE_PIN4;
value &= ~MODE_PIN5; /* 16-bit Area0 bus width */
value |= MODE_PIN6; /* Area 0 SRAM interface */
value |= MODE_PIN7;
value |= MODE_PIN8; /* Little Endian */
value |= MODE_PIN9; /* Master Mode */
value |= MODE_PIN10; /* Crystal resonator */
value |= MODE_PIN11; /* Display Unit */
value |= MODE_PIN12;
value &= ~MODE_PIN13; /* 29-bit address mode */
value |= MODE_PIN14; /* No PLL step-up */
return value;
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_apsh4a3a __initmv = {
.mv_name = "AP-SH4A-3A",
.mv_setup = apsh4a3a_setup,
.mv_clk_init = apsh4a3a_clk_init,
.mv_init_irq = apsh4a3a_init_irq,
.mv_mode_pins = apsh4a3a_mode_pins,
};
/*
* ALPHAPROJECT AP-SH4AD-0A Support.
*
* Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
* Copyright (C) 2010 Matt Fleming
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
#include <asm/sizes.h>
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
.start = 0xA4000000,
.end = 0xA4000000 + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "smsc911x-irq",
.start = evt2irq(0x200),
.end = evt2irq(0x200),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_16BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct platform_device *apsh4ad0a_devices[] __initdata = {
&smsc911x_device,
};
static int __init apsh4ad0a_devices_setup(void)
{
return platform_add_devices(apsh4ad0a_devices,
ARRAY_SIZE(apsh4ad0a_devices));
}
device_initcall(apsh4ad0a_devices_setup);
static int apsh4ad0a_mode_pins(void)
{
int value = 0;
/* These are the factory default settings of SW1 and SW2.
* If you change these dip switches then you will need to
* adjust the values below as well.
*/
value |= MODE_PIN0; /* Clock Mode 3 */
value |= MODE_PIN1;
value &= ~MODE_PIN2;
value &= ~MODE_PIN3;
value &= ~MODE_PIN4; /* 16-bit Area0 bus width */
value |= MODE_PIN5;
value |= MODE_PIN6;
value |= MODE_PIN7; /* Normal mode */
value |= MODE_PIN8; /* Little Endian */
value |= MODE_PIN9; /* Crystal resonator */
value &= ~MODE_PIN10; /* 29-bit address mode */
value &= ~MODE_PIN11; /* PCI-E Root port */
value &= ~MODE_PIN12; /* 4 lane + 1 lane */
value |= MODE_PIN13; /* AUD Enable */
value &= ~MODE_PIN14; /* Normal Operation */
return value;
}
static int apsh4ad0a_clk_init(void)
{
struct clk *clk;
int ret;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);
return ret;
}
/* Initialize the board */
static void __init apsh4ad0a_setup(char **cmdline_p)
{
pr_info("Alpha Project AP-SH4AD-0A support:\n");
}
static void __init apsh4ad0a_init_irq(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ3210);
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_apsh4ad0a __initmv = {
.mv_name = "AP-SH4AD-0A",
.mv_setup = apsh4ad0a_setup,
.mv_mode_pins = apsh4ad0a_mode_pins,
.mv_clk_init = apsh4ad0a_clk_init,
.mv_init_irq = apsh4ad0a_init_irq,
};
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7785=y
CONFIG_MEMORY_START=0x0C000000
CONFIG_FLATMEM_MANUAL=y
CONFIG_SH_STORE_QUEUES=y
CONFIG_SH_APSH4A3A=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_KEXEC=y
CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_SH7785FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_LOGO=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_CIFS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_FTRACE is not set
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CGROUPS=y
CONFIG_CGROUP_NS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEM_RES_CTLR=y
CONFIG_BLK_CGROUP=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
CONFIG_MEMORY_SIZE=0x10000000
CONFIG_HUGETLB_PAGE_SIZE_1MB=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_SH_STORE_QUEUES=y
CONFIG_SH_APSH4AD0A=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
CONFIG_CPU_FREQ_GOV_ONDEMAND=m
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_SH_CPU_FREQ=y
CONFIG_KEXEC=y
CONFIG_SECCOMP=y
CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_PM=y
CONFIG_PM_DEBUG=y
CONFIG_PM_VERBOSE=y
CONFIG_PM_RUNTIME=y
CONFIG_CPU_IDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CFI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_MDIO_BITBANG=y
CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_SH7785FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_LOGO=y
CONFIG_USB=y
CONFIG_USB_DEBUG=y
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_CIFS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_VM=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_DWARF_UNWINDER=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
......@@ -9,6 +9,7 @@ SE SH_SOLUTION_ENGINE
HIGHLANDER SH_HIGHLANDER
RTS7751R2D SH_RTS7751R2D
RSK SH_RSK
ALPHA_BOARD SH_ALPHA_BOARD
#
# List of companion chips / MFDs.
......@@ -61,3 +62,5 @@ ESPT SH_ESPT
POLARIS SH_POLARIS
KFR2R09 SH_KFR2R09
ECOVEC SH_ECOVEC
APSH4A3A SH_APSH4A3A
APSH4AD0A SH_APSH4AD0A
......@@ -464,6 +464,7 @@ config XEN_BLKDEV_FRONTEND
tristate "Xen virtual block device support"
depends on XEN
default y
select XEN_XENBUS_FRONTEND
help
This driver implements the front-end of the Xen virtual
block device driver. It communicates with a back-end driver
......
......@@ -94,6 +94,8 @@
#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
#define I810_DRAM_ROW_0_SDRAM 0x00000001
......
......@@ -688,14 +688,14 @@ static int intel_gtt_init(void)
intel_private.base.stolen_size = intel_gtt_stolen_size();
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
intel_gtt_cleanup();
return ret;
}
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
return 0;
}
......@@ -814,6 +814,12 @@ static bool intel_enable_gtt(void)
}
}
/* On the resume path we may be adjusting the PGTBL value, so
* be paranoid and flush all chipset write buffers...
*/
if (INTEL_GTT_GEN >= 3)
writel(0, intel_private.registers+GFX_FLSH_CNTL);
reg = intel_private.registers+I810_PGETBL_CTL;
writel(intel_private.PGETBL_save, reg);
if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
......@@ -823,6 +829,9 @@ static bool intel_enable_gtt(void)
return false;
}
if (INTEL_GTT_GEN >= 3)
writel(0, intel_private.registers+GFX_FLSH_CNTL);
return true;
}
......@@ -991,14 +1000,14 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL;
mem->num_sg = 0;
}
intel_gtt_clear_range(pg_start, mem->page_count);
return 0;
}
......
......@@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
static const char *agp_type_str(int type)
{
switch (type) {
case 0: return " uncached";
case 1: return " snooped";
default: return "";
}
}
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
......@@ -118,6 +127,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.write_domain,
obj->last_rendering_seqno,
obj->last_fenced_seqno,
agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
......@@ -276,6 +286,37 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
return 0;
}
static int i915_gem_gtt_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
seq_printf(m, " ");
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
total_gtt_size += obj->gtt_space->size;
count++;
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
......@@ -456,8 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
for (i = 0; i < I915_NUM_RINGS; i++)
for (i = 0; i < I915_NUM_RINGS; i++) {
if (IS_GEN6(dev)) {
seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
dev_priv->ring[i].name,
I915_READ_IMR(&dev_priv->ring[i]));
}
i915_ring_seqno_info(m, &dev_priv->ring[i]);
}
mutex_unlock(&dev->struct_mutex);
return 0;
......@@ -656,7 +703,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s",
seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
......@@ -666,7 +713,8 @@ static void print_error_buffers(struct seq_file *m,
tiling_flag(err->tiling),
dirty_flag(err->dirty),
purgeable_flag(err->purgeable),
ring_str(err->ring));
ring_str(err->ring),
agp_type_str(err->agp_type));
if (err->name)
seq_printf(m, " (name: %d)", err->name);
......@@ -744,7 +792,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->batchbuffer[i]) {
struct drm_i915_error_object *obj = error->batchbuffer[i];
seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
......@@ -890,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
u32 rstdbyctl = I915_READ(RSTDBYCTL);
u16 crstandvid = I915_READ16(CRSTANDVID);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
......@@ -913,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n",
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
seq_printf(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
seq_printf(m, "on\n");
break;
case RSX_STATUS_RC1:
seq_printf(m, "RC1\n");
break;
case RSX_STATUS_RC1E:
seq_printf(m, "RC1E\n");
break;
case RSX_STATUS_RS1:
seq_printf(m, "RS1\n");
break;
case RSX_STATUS_RS2:
seq_printf(m, "RS2 (RC6)\n");
break;
case RSX_STATUS_RS3:
seq_printf(m, "RC3 (RC6+)\n");
break;
default:
seq_printf(m, "unknown\n");
break;
}
return 0;
}
......@@ -1187,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
......
......@@ -1962,13 +1962,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */
dev_priv->has_gem = 1;
if (dev_priv->has_gem == 0 &&
drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
ret = -ENODEV;
goto out_workqueue_free;
}
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
......@@ -2055,7 +2048,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
out_workqueue_free:
destroy_workqueue(dev_priv->wq);
out_iomapfree:
io_mapping_free(dev_priv->mm.gtt_mapping);
......
......@@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
static struct drm_driver driver;
extern int intel_agp_enabled;
......@@ -352,6 +355,9 @@ static int i915_drm_thaw(struct drm_device *dev)
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
if (dev_priv->renderctx && dev_priv->pwrctx)
ironlake_enable_rc6(dev);
}
intel_opregion_init(dev);
......@@ -475,6 +481,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true;
int ret;
if (!i915_try_reset)
return 0;
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
......
......@@ -172,20 +172,21 @@ struct drm_i915_error_state {
int page_count;
u32 gtt_offset;
u32 *pages[0];
} *ringbuffer, *batchbuffer[2];
} *ringbuffer, *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer {
size_t size;
u32 size;
u32 name;
u32 seqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
u32 fence_reg;
s32 fence_reg:5;
s32 pinned:2;
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
u32 ring:4;
u32 agp_type:1;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
struct intel_overlay_error_state *overlay;
......@@ -332,6 +333,7 @@ typedef struct drm_i915_private {
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
......@@ -794,6 +796,7 @@ struct drm_i915_gem_object {
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
/**
* Current offset of the object in GTT space.
......@@ -1006,12 +1009,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
u32 mask);
extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
u32 mask);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
......@@ -1091,10 +1088,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
int __must_check i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
......@@ -1265,6 +1262,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
......
......@@ -35,18 +35,18 @@
#include <linux/swap.h>
#include <linux/pci.h>
static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
bool write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset,
uint64_t size);
static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
bool write);
static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable);
static void i915_gem_clear_fence_reg(struct drm_device *dev,
struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev,
......@@ -1935,6 +1935,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
bool idle;
int i;
dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work);
......@@ -1948,11 +1950,31 @@ i915_gem_retire_work_handler(struct work_struct *work)
i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
(!list_empty(&dev_priv->ring[RCS].request_list) ||
!list_empty(&dev_priv->ring[VCS].request_list) ||
!list_empty(&dev_priv->ring[BCS].request_list)))
/* Send a periodic flush down the ring so we don't hold onto GEM
* objects indefinitely.
*/
idle = true;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
ret = i915_gem_flush_ring(dev, ring, 0,
I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
i915_add_request(dev, NULL, request, ring))
kfree(request);
}
idle &= list_empty(&ring->request_list);
}
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
......@@ -2142,25 +2164,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret;
}
void
int
i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
ring->flush(ring, invalidate_domains, flush_domains);
int ret;
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
i915_gem_process_flushing_list(dev, flush_domains, ring);
return 0;
}
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
int ret;
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
if (!list_empty(&ring->gpu_write_list))
i915_gem_flush_ring(dev, ring,
if (!list_empty(&ring->gpu_write_list)) {
ret = i915_gem_flush_ring(dev, ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring),
ring);
......@@ -2370,10 +2404,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
i915_gem_flush_ring(obj->base.dev,
obj->last_fenced_ring,
0, obj->base.write_domain);
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->base.dev,
obj->last_fenced_ring,
0, obj->base.write_domain);
if (ret)
return ret;
}
obj->fenced_gpu_access = false;
}
......@@ -2393,6 +2430,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
obj->last_fenced_ring = NULL;
}
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
return 0;
}
......@@ -2523,9 +2566,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
return ret;
} else if (obj->tiling_changed) {
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain);
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
}
obj->fenced_gpu_access = false;
}
......@@ -2736,10 +2782,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->gtt_space = NULL;
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
ret = i915_gem_evict_something(dev, size,
alignment,
map_and_fenceable);
/* first try to reclaim some memory by clearing the GTT */
ret = i915_gem_evict_everything(dev, false);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
......@@ -2747,7 +2791,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free;
}
return ret;
return -ENOMEM;
}
goto search_free;
......@@ -2762,9 +2806,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
ret = i915_gem_evict_something(dev, size,
alignment, map_and_fenceable);
if (ret)
if (i915_gem_evict_everything(dev, false))
return ret;
goto search_free;
......@@ -2811,17 +2853,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
}
/** Flushes any GPU write domain for the object if it's dirty. */
static void
static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
return 0;
/* Queue the GPU write cache flushing we need. */
i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
BUG_ON(obj->base.write_domain);
return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
......@@ -2833,10 +2874,16 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return;
/* No actual flushing is required for the GTT write domain. Writes
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
* the GATT itself.
*/
wmb();
i915_gem_release_mmap(obj);
old_write_domain = obj->base.write_domain;
......@@ -2882,7 +2929,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->gtt_space == NULL)
return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
......@@ -2927,7 +2977,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL)
return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
/* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
......@@ -2952,12 +3005,17 @@ int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible)
{
int ret;
if (!obj->active)
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain);
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
}
return i915_gem_object_wait_rendering(obj, interruptible);
}
......@@ -2974,7 +3032,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
......@@ -3069,7 +3130,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
......@@ -3362,8 +3426,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* flush earlier is beneficial.
*/
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(dev, obj->ring,
0, obj->base.write_domain);
ret = i915_gem_flush_ring(dev, obj->ring,
0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) {
struct drm_i915_gem_request *request;
......
......@@ -127,9 +127,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
}
/* Nothing found, clean up and bail out! */
list_for_each_entry(obj, &unwind_list, exec_list) {
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
......@@ -162,6 +168,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
exec_list);
if (ret == 0)
ret = i915_gem_object_unbind(obj);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
......
......@@ -268,7 +268,6 @@ eb_destroy(struct eb_objects *eb)
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
......@@ -411,10 +410,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *entry)
struct eb_objects *eb)
{
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
......@@ -426,7 +425,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
sizeof(reloc)))
return -EFAULT;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
if (ret)
return ret;
......@@ -442,13 +441,13 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
if (ret)
return ret;
}
......@@ -459,8 +458,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb,
struct list_head *objects,
struct drm_i915_gem_exec_object2 *exec)
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
int ret;
......@@ -468,7 +466,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
list_for_each_entry(obj, objects, exec_list) {
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
return ret;
}
......@@ -479,13 +477,36 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects,
struct drm_i915_gem_exec_object2 *exec)
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct drm_i915_gem_exec_object2 *entry;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
else
list_move_tail(&obj->exec_list, &ordered_objects);
}
list_splice(&ordered_objects, objects);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
......@@ -504,14 +525,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
ret = 0;
/* Unbind any ill-fitting objects or pin. */
entry = exec;
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
if (!obj->gtt_space) {
entry++;
if (!obj->gtt_space)
continue;
}
need_fence =
has_fenced_gpu_access &&
......@@ -534,8 +552,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
}
/* Bind fresh objects */
entry = exec;
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence;
need_fence =
......@@ -570,7 +588,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
}
entry->offset = obj->gtt_offset;
entry++;
}
/* Decrement pin count for bound objects */
......@@ -622,7 +639,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
int i, total, ret;
/* We may process another execbuffer during the unlock... */
while (list_empty(objects)) {
while (!list_empty(objects)) {
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
......@@ -665,7 +682,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
}
/* reacquire the objects */
INIT_LIST_HEAD(objects);
eb_reset(eb);
for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj;
......@@ -681,10 +697,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
ret = i915_gem_execbuffer_reserve(ring, file, objects);
if (ret)
goto err;
......@@ -693,7 +710,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
exec,
reloc + total);
if (ret)
goto err;
......@@ -713,25 +729,34 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret;
}
static void
static int
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains,
uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i))
i915_gem_flush_ring(dev, &dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(dev,
&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
return ret;
}
}
return 0;
}
static int
......@@ -795,10 +820,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
cd.invalidate_domains,
cd.flush_domains);
#endif
i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
cd.flush_rings);
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
cd.flush_rings);
if (ret)
return ret;
}
list_for_each_entry(obj, objects, exec_list) {
......@@ -921,7 +948,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
u32 flush_domains;
u32 invalidate;
/*
* Ensure that the commands in the batch buffer are
......@@ -929,11 +956,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
*
* The sampler always gets flushed on i965 (sigh).
*/
flush_domains = 0;
invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4)
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
i915_gem_next_request_seqno(dev, ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
......@@ -1098,16 +1127,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
......@@ -1121,9 +1156,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Set the pending read domains for the batch buffer to COMMAND */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
if (batch_obj->base.pending_write_domain) {
DRM_ERROR("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
......@@ -1340,4 +1372,3 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
drm_free_large(exec2_list);
return ret;
}
......@@ -85,15 +85,11 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
if (dev_priv->mm.gtt->needs_dmar) {
if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
obj->num_sg = 0;
}
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
This diff is collapsed.
......@@ -145,6 +145,8 @@
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
......@@ -159,6 +161,7 @@
#define MI_MM_SPACE_PHYSICAL (0<<8)
#define MI_SAVE_EXT_STATE_EN (1<<3)
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
......@@ -288,6 +291,7 @@
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
......@@ -1130,9 +1134,50 @@
#define RCBMINAVG 0x111a0
#define RCUPEI 0x111b0
#define RCDNEI 0x111b4
#define MCHBAR_RENDER_STANDBY 0x111b8
#define RCX_SW_EXIT (1<<23)
#define RSX_STATUS_MASK 0x00700000
#define RSTDBYCTL 0x111b8
#define RS1EN (1<<31)
#define RS2EN (1<<30)
#define RS3EN (1<<29)
#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
#define RSX_STATUS_MASK (7<<20)
#define RSX_STATUS_ON (0<<20)
#define RSX_STATUS_RC1 (1<<20)
#define RSX_STATUS_RC1E (2<<20)
#define RSX_STATUS_RS1 (3<<20)
#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
#define RSX_STATUS_RSVD2 (7<<20)
#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
#define JRSC (1<<17) /* rsx coupled to cpu c-state */
#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
#define RS1CONTSAV_MASK (3<<14)
#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
#define RS1CONTSAV_RSVD (1<<14)
#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
#define NORMSLEXLAT_MASK (3<<12)
#define SLOW_RS123 (0<<12)
#define SLOW_RS23 (1<<12)
#define SLOW_RS3 (2<<12)
#define NORMAL_RS123 (3<<12)
#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
#define RS_CSTATE_MASK (3<<4)
#define RS_CSTATE_C367_RS1 (0<<4)
#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
#define RS_CSTATE_RSVD (2<<4)
#define RS_CSTATE_C367_RS2 (3<<4)
#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
#define VIDCTL 0x111c0
#define VIDSTS 0x111c8
#define VIDSTART 0x111cc /* 8 bits */
......@@ -2345,8 +2390,13 @@
/* Memory latency timer register */
#define MLTR_ILK 0x11222
#define MLTR_WM1_SHIFT 0
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
......@@ -2728,12 +2778,41 @@
/* PCH */
/* south display engine interrupt */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
#define SDE_AUDIO_POWER_B (1 << 25)
#define SDE_AUDIO_POWER_SHIFT (25)
#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
#define SDE_GMBUS (1 << 24)
#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
#define SDE_AUDIO_HDCP_MASK (3 << 22)
#define SDE_AUDIO_TRANSB (1 << 21)
#define SDE_AUDIO_TRANSA (1 << 20)
#define SDE_AUDIO_TRANS_MASK (3 << 20)
#define SDE_POISON (1 << 19)
/* 18 reserved */
#define SDE_FDI_RXB (1 << 17)
#define SDE_FDI_RXA (1 << 16)
#define SDE_FDI_MASK (3 << 16)
#define SDE_AUXD (1 << 15)
#define SDE_AUXC (1 << 14)
#define SDE_AUXB (1 << 13)
#define SDE_AUX_MASK (7 << 13)
/* 12 reserved */
#define SDE_CRT_HOTPLUG (1 << 11)
#define SDE_PORTD_HOTPLUG (1 << 10)
#define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8)
#define SDE_TRANSB_CRC_DONE (1 << 5)
#define SDE_TRANSB_CRC_ERR (1 << 4)
#define SDE_TRANSB_FIFO_UNDER (1 << 3)
#define SDE_TRANSA_CRC_DONE (1 << 2)
#define SDE_TRANSA_CRC_ERR (1 << 1)
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
/* CPT */
#define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
......@@ -3174,10 +3253,11 @@
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
/* SNB B-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
#define FORCEWAKE 0xA18C
......@@ -3239,6 +3319,7 @@
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_DATA 0x138128
......
......@@ -740,7 +740,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
I915_WRITE(MCHBAR_RENDER_STANDBY,
I915_WRITE(RSTDBYCTL,
dev_priv->saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
......@@ -811,7 +811,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(MCHBAR_RENDER_STANDBY);
I915_READ(RSTDBYCTL);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
......@@ -822,10 +822,6 @@ int i915_save_state(struct drm_device *dev)
if (IS_GEN6(dev))
gen6_disable_rps(dev);
/* XXX disabling the clock gating breaks suspend on gm45
intel_disable_clock_gating(dev);
*/
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
......
......@@ -30,6 +30,7 @@
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
......@@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
}
static bool intel_crt_detect_ddc(struct intel_crt *crt)
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
/* CRT should always be at 0, but check anyway */
......@@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt)
}
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
struct edid *edid;
bool is_digital = false;
edid = drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
*/
if (edid != NULL) {
is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
}
}
return false;
......@@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
}
}
if (intel_crt_detect_ddc(crt))
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
if (!force)
......@@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
crtc = intel_get_load_detect_pipe(&crt->base, connector,
NULL, &dpms_mode);
if (crtc) {
if (intel_crt_detect_ddc(crt))
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crtc, crt);
......
This diff is collapsed.
......@@ -1153,18 +1153,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set)
{
switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
}
}
......@@ -1334,17 +1343,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false;
int tries;
int tries, cr_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
/* channel equalization */
tries = 0;
cr_tries = 0;
channel_eq = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
intel_dp_link_down(intel_dp);
break;
}
if (IS_GEN6(dev) && is_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
......@@ -1367,14 +1383,26 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (!intel_dp_get_link_status(intel_dp))
break;
/* Make sure clock is still ok */
if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
if (intel_channel_eq_ok(intel_dp)) {
channel_eq = true;
break;
}
/* Try 5 times */
if (tries > 5)
break;
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
tries = 0;
cr_tries++;
continue;
}
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp);
......
......@@ -257,6 +257,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
......
......@@ -62,6 +62,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = ifbdev->helper.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd;
......@@ -77,7 +78,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mode_cmd.height = sizes->surface_height;
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
......@@ -120,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unpin;
}
/* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
......@@ -127,10 +133,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
if (!IS_GEN2(dev))
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->apertures->ranges[0].size =
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
......@@ -140,12 +144,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
ret = -ENOSPC;
goto out_unpin;
}
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unpin;
}
info->screen_size = size;
// memset(info->screen_base, 0, size);
......
......@@ -106,7 +106,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg);
intel_panel_set_backlight(dev, dev_priv->backlight_level);
intel_panel_enable_backlight(dev);
}
static void intel_lvds_disable(struct intel_lvds *intel_lvds)
......@@ -123,8 +123,7 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
lvds_reg = LVDS;
}
dev_priv->backlight_level = intel_panel_get_backlight(dev);
intel_panel_set_backlight(dev, 0);
intel_panel_disable_backlight(dev);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
......@@ -375,6 +374,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
if (pfit_control != intel_lvds->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control;
......@@ -398,8 +401,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
dev_priv->backlight_level = intel_panel_get_backlight(dev);
/* We try to do the minimum that is necessary in order to unlock
* the registers for mode setting.
*
......@@ -430,9 +431,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
/* Undo any unlocking done in prepare to prevent accidental
* adjustment of the registers.
*/
......
......@@ -250,3 +250,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
void intel_panel_disable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_enabled) {
dev_priv->backlight_level = intel_panel_get_backlight(dev);
dev_priv->backlight_enabled = false;
}
intel_panel_set_backlight(dev, 0);
}
void intel_panel_enable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, dev_priv->backlight_level);
dev_priv->backlight_enabled = true;
}
void intel_panel_setup_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
}
This diff is collapsed.
......@@ -16,21 +16,24 @@ struct intel_hw_status_page {
#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
struct intel_ring_buffer {
const char *name;
......@@ -49,12 +52,15 @@ struct intel_ring_buffer {
u32 tail;
int space;
int size;
int effective_size;
struct intel_hw_status_page status_page;
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */
u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
atomic_t irq_refcount;
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
......@@ -62,9 +68,9 @@ struct intel_ring_buffer {
void (*write_tail)(struct intel_ring_buffer *ring,
u32 value);
void (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno);
u32 (*get_seqno)(struct intel_ring_buffer *ring);
......
......@@ -1024,9 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
if (intel_sdvo->has_hdmi_monitor &&
!intel_sdvo_set_avi_infoframe(intel_sdvo))
return;
if (intel_sdvo->has_hdmi_monitor) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
intel_sdvo_set_avi_infoframe(intel_sdvo);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
if (intel_sdvo->is_tv &&
!intel_sdvo_set_tv_format(intel_sdvo))
......@@ -1398,6 +1402,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->attached_output = response;
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (response & SDVO_TMDS_MASK)
......@@ -1922,20 +1929,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
static bool
intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
{
int is_hdmi;
if (!intel_sdvo_check_supp_encode(intel_sdvo))
return false;
if (!intel_sdvo_set_target_output(intel_sdvo,
device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
return false;
is_hdmi = 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
return false;
return !!is_hdmi;
return intel_sdvo_check_supp_encode(intel_sdvo);
}
static u8
......@@ -2037,12 +2031,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
/* enable hdmi encoding mode if supported */
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
......
......@@ -2963,6 +2963,7 @@ config TILE_NET
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
select XEN_XENBUS_FRONTEND
default y
help
The network device frontend driver allows the kernel to
......
......@@ -45,6 +45,7 @@ config XEN_PCIDEV_FRONTEND
depends on PCI && X86 && XEN
select HOTPLUG
select PCI_XEN
select XEN_XENBUS_FRONTEND
default y
help
The PCI device frontend driver allows the kernel to import arbitrary
......
......@@ -29,6 +29,14 @@ config XEN_DEV_EVTCHN
firing.
If in doubt, say yes.
config XEN_BACKEND
bool "Backend driver support"
depends on XEN_DOM0
default y
help
Support for backend device drivers that provide I/O services
to other virtual machines.
config XENFS
tristate "Xen filesystem"
default y
......@@ -62,6 +70,9 @@ config XEN_SYS_HYPERVISOR
virtual environment, /sys/hypervisor will still be present,
but will have no xen contents.
config XEN_XENBUS_FRONTEND
tristate
config XEN_PLATFORM_PCI
tristate "xen platform pci device driver"
depends on XEN_PVHVM
......
......@@ -5,3 +5,8 @@ xenbus-objs += xenbus_client.o
xenbus-objs += xenbus_comms.o
xenbus-objs += xenbus_xs.o
xenbus-objs += xenbus_probe.o
xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
xenbus-objs += $(xenbus-be-objs-y)
obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
This diff is collapsed.
......@@ -36,26 +36,15 @@
#define XEN_BUS_ID_SIZE 20
#ifdef CONFIG_XEN_BACKEND
extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
extern void xenbus_backend_probe_and_watch(void);
extern int xenbus_backend_bus_register(void);
extern void xenbus_backend_bus_unregister(void);
#else
static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
static inline void xenbus_backend_probe_and_watch(void) {}
static inline int xenbus_backend_bus_register(void) { return 0; }
static inline void xenbus_backend_bus_unregister(void) {}
#endif
struct xen_bus_type
{
char *root;
unsigned int levels;
int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
int (*probe)(const char *type, const char *dir);
int (*probe)(struct xen_bus_type *bus, const char *type,
const char *dir);
void (*otherend_changed)(struct xenbus_watch *watch, const char **vec,
unsigned int len);
struct bus_type bus;
};
......@@ -73,4 +62,16 @@ extern int xenbus_probe_devices(struct xen_bus_type *bus);
extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
extern void xenbus_dev_shutdown(struct device *_dev);
extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
extern int xenbus_dev_resume(struct device *dev);
extern void xenbus_otherend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len,
int ignore_on_shutdown);
extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
char *id_node, char *path_node);
#endif
/******************************************************************************
* Talks to Xen Store to figure out what devices we have (backend half).
*
* Copyright (C) 2005 Rusty Russell, IBM Corporation
* Copyright (C) 2005 Mike Wray, Hewlett-Packard
* Copyright (C) 2005, 2006 XenSource Ltd
* Copyright (C) 2007 Solarflare Communications, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define DPRINTK(fmt, args...) \
pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args)
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
#include <asm/hypervisor.h>
#include <xen/xenbus.h>
#include <xen/features.h>
#include "xenbus_comms.h"
#include "xenbus_probe.h"
/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
{
int domid, err;
const char *devid, *type, *frontend;
unsigned int typelen;
type = strchr(nodename, '/');
if (!type)
return -EINVAL;
type++;
typelen = strcspn(type, "/");
if (!typelen || type[typelen] != '/')
return -EINVAL;
devid = strrchr(nodename, '/') + 1;
err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
"frontend", NULL, &frontend,
NULL);
if (err)
return err;
if (strlen(frontend) == 0)
err = -ERANGE;
if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
err = -ENOENT;
kfree(frontend);
if (err)
return err;
if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s",
typelen, type, domid, devid) >= XEN_BUS_ID_SIZE)
return -ENOSPC;
return 0;
}
static int xenbus_uevent_backend(struct device *dev,
struct kobj_uevent_env *env)
{
struct xenbus_device *xdev;
struct xenbus_driver *drv;
struct xen_bus_type *bus;
DPRINTK("");
if (dev == NULL)
return -ENODEV;
xdev = to_xenbus_device(dev);
bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
if (xdev == NULL)
return -ENODEV;
/* stuff we want to pass to /sbin/hotplug */
if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
return -ENOMEM;
if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename))
return -ENOMEM;
if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root))
return -ENOMEM;
if (dev->driver) {
drv = to_xenbus_driver(dev->driver);
if (drv && drv->uevent)
return drv->uevent(xdev, env);
}
return 0;
}
/* backend/<typename>/<frontend-uuid>/<name> */
static int xenbus_probe_backend_unit(struct xen_bus_type *bus,
const char *dir,
const char *type,
const char *name)
{
char *nodename;
int err;
nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
if (!nodename)
return -ENOMEM;
DPRINTK("%s\n", nodename);
err = xenbus_probe_node(bus, type, nodename);
kfree(nodename);
return err;
}
/* backend/<typename>/<frontend-domid> */
static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
const char *domid)
{
char *nodename;
int err = 0;
char **dir;
unsigned int i, dir_n = 0;
DPRINTK("");
nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid);
if (!nodename)
return -ENOMEM;
dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
if (IS_ERR(dir)) {
kfree(nodename);
return PTR_ERR(dir);
}
for (i = 0; i < dir_n; i++) {
err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]);
if (err)
break;
}
kfree(dir);
kfree(nodename);
return err;
}
static void frontend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
xenbus_otherend_changed(watch, vec, len, 0);
}
static struct device_attribute xenbus_backend_dev_attrs[] = {
__ATTR_NULL
};
static struct xen_bus_type xenbus_backend = {
.root = "backend",
.levels = 3, /* backend/type/<frontend>/<id> */
.get_bus_id = backend_bus_id,
.probe = xenbus_probe_backend,
.otherend_changed = frontend_changed,
.bus = {
.name = "xen-backend",
.match = xenbus_match,
.uevent = xenbus_uevent_backend,
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_backend_dev_attrs,
},
};
static void backend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
DPRINTK("");
xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
}
static struct xenbus_watch be_watch = {
.node = "backend",
.callback = backend_changed,
};
static int read_frontend_details(struct xenbus_device *xendev)
{
return xenbus_read_otherend_details(xendev, "frontend-id", "frontend");
}
int xenbus_dev_is_online(struct xenbus_device *dev)
{
int rc, val;
rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
if (rc != 1)
val = 0; /* no online node present */
return val;
}
EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
int __xenbus_register_backend(struct xenbus_driver *drv,
struct module *owner, const char *mod_name)
{
drv->read_otherend_details = read_frontend_details;
return xenbus_register_driver_common(drv, &xenbus_backend,
owner, mod_name);
}
EXPORT_SYMBOL_GPL(__xenbus_register_backend);
static int backend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
void *data)
{
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_backend);
register_xenbus_watch(&be_watch);
return NOTIFY_DONE;
}
static int __init xenbus_probe_backend_init(void)
{
static struct notifier_block xenstore_notifier = {
.notifier_call = backend_probe_and_watch
};
int err;
DPRINTK("");
/* Register ourselves with the kernel bus subsystem */
err = bus_register(&xenbus_backend.bus);
if (err)
return err;
register_xenstore_notifier(&xenstore_notifier);
return 0;
}
subsys_initcall(xenbus_probe_backend_init);
#define DPRINTK(fmt, args...) \
pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args)
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/platform_pci.h>
#include "xenbus_comms.h"
#include "xenbus_probe.h"
/* device/<type>/<id> => <type>-<id> */
static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
{
nodename = strchr(nodename, '/');
if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
return -EINVAL;
}
strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
return -EINVAL;
}
*strchr(bus_id, '/') = '-';
return 0;
}
/* device/<typename>/<name> */
static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type,
const char *name)
{
char *nodename;
int err;
nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name);
if (!nodename)
return -ENOMEM;
DPRINTK("%s", nodename);
err = xenbus_probe_node(bus, type, nodename);
kfree(nodename);
return err;
}
static int xenbus_uevent_frontend(struct device *_dev,
struct kobj_uevent_env *env)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
return -ENOMEM;
return 0;
}
static void backend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
xenbus_otherend_changed(watch, vec, len, 1);
}
static struct device_attribute xenbus_frontend_dev_attrs[] = {
__ATTR_NULL
};
static struct xen_bus_type xenbus_frontend = {
.root = "device",
.levels = 2, /* device/type/<id> */
.get_bus_id = frontend_bus_id,
.probe = xenbus_probe_frontend,
.otherend_changed = backend_changed,
.bus = {
.name = "xen",
.match = xenbus_match,
.uevent = xenbus_uevent_frontend,
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_frontend_dev_attrs,
.suspend = xenbus_dev_suspend,
.resume = xenbus_dev_resume,
},
};
static void frontend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
DPRINTK("");
xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
}
/* We watch for devices appearing and vanishing. */
static struct xenbus_watch fe_watch = {
.node = "device",
.callback = frontend_changed,
};
static int read_backend_details(struct xenbus_device *xendev)
{
return xenbus_read_otherend_details(xendev, "backend-id", "backend");
}
static int is_device_connecting(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct device_driver *drv = data;
struct xenbus_driver *xendrv;
/*
* A device with no driver will never connect. We care only about
* devices which should currently be in the process of connecting.
*/
if (!dev->driver)
return 0;
/* Is this search limited to a particular driver? */
if (drv && (dev->driver != drv))
return 0;
xendrv = to_xenbus_driver(dev->driver);
return (xendev->state < XenbusStateConnected ||
(xendev->state == XenbusStateConnected &&
xendrv->is_ready && !xendrv->is_ready(xendev)));
}
static int exists_connecting_device(struct device_driver *drv)
{
return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
is_device_connecting);
}
static int print_device_status(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct device_driver *drv = data;
/* Is this operation limited to a particular driver? */
if (drv && (dev->driver != drv))
return 0;
if (!dev->driver) {
/* Information only: is this too noisy? */
printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
xendev->nodename);
} else if (xendev->state < XenbusStateConnected) {
enum xenbus_state rstate = XenbusStateUnknown;
if (xendev->otherend)
rstate = xenbus_read_driver_state(xendev->otherend);
printk(KERN_WARNING "XENBUS: Timeout connecting "
"to device: %s (local state %d, remote state %d)\n",
xendev->nodename, xendev->state, rstate);
}
return 0;
}
/* We only wait for device setup after most initcalls have run. */
static int ready_to_wait_for_devices;
/*
* On a 5-minute timeout, wait for all devices currently configured. We need
* to do this to guarantee that the filesystems and / or network devices
* needed for boot are available, before we can allow the boot to proceed.
*
* This needs to be on a late_initcall, to happen after the frontend device
* drivers have been initialised, but before the root fs is mounted.
*
* A possible improvement here would be to have the tools add a per-device
* flag to the store entry, indicating whether it is needed at boot time.
* This would allow people who knew what they were doing to accelerate their
* boot slightly, but of course needs tools or manual intervention to set up
* those flags correctly.
*/
static void wait_for_devices(struct xenbus_driver *xendrv)
{
unsigned long start = jiffies;
struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
unsigned int seconds_waited = 0;
if (!ready_to_wait_for_devices || !xen_domain())
return;
while (exists_connecting_device(drv)) {
if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
if (!seconds_waited)
printk(KERN_WARNING "XENBUS: Waiting for "
"devices to initialise: ");
seconds_waited += 5;
printk("%us...", 300 - seconds_waited);
if (seconds_waited == 300)
break;
}
schedule_timeout_interruptible(HZ/10);
}
if (seconds_waited)
printk("\n");
bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
print_device_status);
}
int __xenbus_register_frontend(struct xenbus_driver *drv,
struct module *owner, const char *mod_name)
{
int ret;
drv->read_otherend_details = read_backend_details;
ret = xenbus_register_driver_common(drv, &xenbus_frontend,
owner, mod_name);
if (ret)
return ret;
/* If this driver is loaded as a module wait for devices to attach. */
wait_for_devices(drv);
return 0;
}
EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
static int frontend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
void *data)
{
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
register_xenbus_watch(&fe_watch);
return NOTIFY_DONE;
}
static int __init xenbus_probe_frontend_init(void)
{
static struct notifier_block xenstore_notifier = {
.notifier_call = frontend_probe_and_watch
};
int err;
DPRINTK("");
/* Register ourselves with the kernel bus subsystem */
err = bus_register(&xenbus_frontend.bus);
if (err)
return err;
register_xenstore_notifier(&xenstore_notifier);
return 0;
}
subsys_initcall(xenbus_probe_frontend_init);
#ifndef MODULE
static int __init boot_wait_for_devices(void)
{
if (xen_hvm_domain() && !xen_platform_pci_unplug)
return -ENODEV;
ready_to_wait_for_devices = 1;
wait_for_devices(NULL);
return 0;
}
late_initcall(boot_wait_for_devices);
#endif
MODULE_LICENSE("GPL");
......@@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
unistr.o upcase.o
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\"
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\"
ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
......
/*
* file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
......@@ -1380,15 +1380,14 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp,
* pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
* single-segment behaviour.
*
* We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
* when atomic and when not atomic. This is ok because
* __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
* and it is ok to call this when non-atomic.
* Infact, the only difference between __copy_from_user_inatomic() and
* We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
* atomic and when not atomic. This is ok because it calls
* __copy_from_user_inatomic() and it is ok to call this when non-atomic. In
* fact, the only difference between __copy_from_user_inatomic() and
* __copy_from_user() is that the latter calls might_sleep() and the former
* should not zero the tail of the buffer on error. And on many
* architectures __copy_from_user_inatomic() is just defined to
* __copy_from_user() so it makes no difference at all on those architectures.
* should not zero the tail of the buffer on error. And on many architectures
* __copy_from_user_inatomic() is just defined to __copy_from_user() so it
* makes no difference at all on those architectures.
*/
static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
unsigned nr_pages, unsigned ofs, const struct iovec **iov,
......@@ -1409,28 +1408,28 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
if (unlikely(copied != len)) {
/* Do it the slow way. */
addr = kmap(*pages);
copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len);
/*
* Zero the rest of the target like __copy_from_user().
*/
memset(addr + ofs + copied, 0, len - copied);
kunmap(*pages);
copied = __ntfs_copy_from_user_iovec_inatomic(addr +
ofs, *iov, *iov_ofs, len);
if (unlikely(copied != len))
goto err_out;
kunmap(*pages);
}
total += len;
ntfs_set_next_iovec(iov, iov_ofs, len);
bytes -= len;
if (!bytes)
break;
ntfs_set_next_iovec(iov, iov_ofs, len);
ofs = 0;
} while (++pages < last_page);
out:
return total;
err_out:
total += copied;
BUG_ON(copied > len);
/* Zero the rest of the target like __copy_from_user(). */
memset(addr + ofs + copied, 0, len - copied);
kunmap(*pages);
total += copied;
ntfs_set_next_iovec(iov, iov_ofs, copied);
while (++pages < last_page) {
bytes -= len;
if (!bytes)
......
/*
* super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2001,2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
......@@ -3193,8 +3193,8 @@ static void __exit exit_ntfs_fs(void)
ntfs_sysctl(0);
}
MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>");
MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2007 Anton Altaparmakov");
MODULE_AUTHOR("Anton Altaparmakov <anton@tuxera.com>");
MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.");
MODULE_VERSION(NTFS_VERSION);
MODULE_LICENSE("GPL");
#ifdef DEBUG
......
......@@ -94,7 +94,7 @@ struct xenbus_driver {
int (*remove)(struct xenbus_device *dev);
int (*suspend)(struct xenbus_device *dev, pm_message_t state);
int (*resume)(struct xenbus_device *dev);
int (*uevent)(struct xenbus_device *, char **, int, char *, int);
int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
struct device_driver driver;
int (*read_otherend_details)(struct xenbus_device *dev);
int (*is_ready)(struct xenbus_device *dev);
......
turbostat : turbostat.c
clean :
rm -f turbostat
install :
install turbostat /usr/bin/turbostat
install turbostat.8 /usr/share/man/man8
.TH TURBOSTAT 8
.SH NAME
turbostat \- Report processor frequency and idle statistics
.SH SYNOPSIS
.ft B
.B turbostat
.RB [ "\-v" ]
.RB [ "\-M MSR#" ]
.RB command
.br
.B turbostat
.RB [ "\-v" ]
.RB [ "\-M MSR#" ]
.RB [ "\-i interval_sec" ]
.SH DESCRIPTION
\fBturbostat \fP reports processor topology, frequency
and idle power state statistics on modern X86 processors.
Either \fBcommand\fP is forked and statistics are printed
upon its completion, or statistics are printed periodically.
\fBturbostat \fP
requires that the processor
supports an "invariant" TSC, plus the APERF and MPERF MSRs.
\fBturbostat \fP will report idle cpu power state residency
on processors that additionally support C-state residency counters.
.SS Options
The \fB-v\fP option increases verbosity.
.PP
The \fB-M MSR#\fP option dumps the specified MSR,
in addition to the usual frequency and idle statistics.
.PP
The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds.
The default is 5 seconds.
.PP
The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit,
displays the statistics gathered since it was forked.
.PP
.SH FIELD DESCRIPTIONS
.nf
\fBpkg\fP processor package number.
\fBcore\fP processor core number.
\fBCPU\fP Linux CPU (logical processor) number.
\fB%c0\fP percent of the interval that the CPU retired instructions.
\fBGHz\fP average clock rate while the CPU was in c0 state.
\fBTSC\fP average GHz that the TSC ran during the entire interval.
\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states.
\fB%pc3, %pc6\fP percentage residency in hardware package idle states.
.fi
.PP
.SH EXAMPLE
Without any parameters, turbostat prints out counters ever 5 seconds.
(override interval with "-i sec" option, or specify a command
for turbostat to fork).
The first row of statistics reflect the average for the entire system.
Subsequent rows show per-CPU statistics.
.nf
[root@x980]# ./turbostat
core CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
0.04 1.62 3.38 0.11 0.00 99.85 0.00 95.07
0 0 0.04 1.62 3.38 0.06 0.00 99.90 0.00 95.07
0 6 0.02 1.62 3.38 0.08 0.00 99.90 0.00 95.07
1 2 0.10 1.62 3.38 0.29 0.00 99.61 0.00 95.07
1 8 0.11 1.62 3.38 0.28 0.00 99.61 0.00 95.07
2 4 0.01 1.62 3.38 0.01 0.00 99.98 0.00 95.07
2 10 0.01 1.61 3.38 0.02 0.00 99.98 0.00 95.07
8 1 0.07 1.62 3.38 0.15 0.00 99.78 0.00 95.07
8 7 0.03 1.62 3.38 0.19 0.00 99.78 0.00 95.07
9 3 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07
9 9 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07
10 5 0.01 1.62 3.38 0.13 0.00 99.86 0.00 95.07
10 11 0.08 1.62 3.38 0.05 0.00 99.86 0.00 95.07
.fi
.SH VERBOSE EXAMPLE
The "-v" option adds verbosity to the output:
.nf
GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2)
12 * 133 = 1600 MHz max efficiency
25 * 133 = 3333 MHz TSC frequency
26 * 133 = 3467 MHz max turbo 4 active cores
26 * 133 = 3467 MHz max turbo 3 active cores
27 * 133 = 3600 MHz max turbo 2 active cores
27 * 133 = 3600 MHz max turbo 1 active cores
.fi
The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
available at the minimum package voltage. The \fBTSC frequency\fP is the nominal
maximum frequency of the processor if turbo-mode were not available. This frequency
should be sustainable on all CPUs indefinitely, given nominal power and cooling.
The remaining rows show what maximum turbo frequency is possible
depending on the number of idle cores. Note that this information is
not available on all processors.
.SH FORK EXAMPLE
If turbostat is invoked with a command, it will fork that command
and output the statistics gathered when the command exits.
eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds
until ^C while the other CPUs are mostly idle:
.nf
[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
^Ccore CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
8.49 3.63 3.38 16.23 0.66 74.63 0.00 0.00
0 0 1.22 3.62 3.38 32.18 0.00 66.60 0.00 0.00
0 6 0.40 3.61 3.38 33.00 0.00 66.60 0.00 0.00
1 2 0.11 3.14 3.38 0.19 3.95 95.75 0.00 0.00
1 8 0.05 2.88 3.38 0.25 3.95 95.75 0.00 0.00
2 4 0.00 3.13 3.38 0.02 0.00 99.98 0.00 0.00
2 10 0.00 3.09 3.38 0.02 0.00 99.98 0.00 0.00
8 1 0.04 3.50 3.38 14.43 0.00 85.54 0.00 0.00
8 7 0.03 2.98 3.38 14.43 0.00 85.54 0.00 0.00
9 3 0.00 3.16 3.38 100.00 0.00 0.00 0.00 0.00
9 9 99.93 3.63 3.38 0.06 0.00 0.00 0.00 0.00
10 5 0.01 2.82 3.38 0.08 0.00 99.91 0.00 0.00
10 11 0.02 3.36 3.38 0.06 0.00 99.91 0.00 0.00
6.950866 sec
.fi
Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit
while the other processors are generally in various states of idle.
Note that cpu3 is an HT sibling sharing core9
with cpu9, and thus it is unable to get to an idle state
deeper than c1 while cpu9 is busy.
Note that turbostat reports average GHz of 3.61, while
the arithmetic average of the GHz column above is 3.24.
This is a weighted average, where the weight is %c0. ie. it is the total number of
un-halted cycles elapsed per time divided by the number of CPUs.
.SH NOTES
.B "turbostat "
must be run as root.
.B "turbostat "
reads hardware counters, but doesn't write them.
So it will not interfere with the OS or other programs, including
multiple invocations of itself.
\fBturbostat \fP
may work poorly on Linux-2.6.20 through 2.6.29,
as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF
in those kernels.
The APERF, MPERF MSRs are defined to count non-halted cycles.
Although it is not guaranteed by the architecture, turbostat assumes
that they count at TSC rate, which is true on all processors tested to date.
.SH REFERENCES
"Intel® Turbo Boost Technology
in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
http://download.intel.com/design/processor/applnots/320354.pdf
"Intel® 64 and IA-32 Architectures Software Developer's Manual
Volume 3B: System Programming Guide"
http://www.intel.com/products/processor/manuals/
.SH FILES
.ta
.nf
/dev/cpu/*/msr
.fi
.SH "SEE ALSO"
msr(4), vmstat(8)
.PP
.SH AUTHORS
.nf
Written by Len Brown <len.brown@intel.com>
This diff is collapsed.
x86_energy_perf_policy : x86_energy_perf_policy.c
clean :
rm -f x86_energy_perf_policy
install :
install x86_energy_perf_policy /usr/bin/
install x86_energy_perf_policy.8 /usr/share/man/man8/
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment