Commit f9915bfe authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'tracing/ftrace' and 'tracing/urgent' into tracing/core

parents b91facc3 acd89579
...@@ -36,10 +36,10 @@ ...@@ -36,10 +36,10 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mmu.h> #include <asm/mmu.h>
......
...@@ -51,6 +51,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) ...@@ -51,6 +51,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
return (((counters >> 14) - counters) & 0x1fff) > 1; return (((counters >> 14) - counters) & 0x1fff) > 1;
} }
#define __raw_spin_is_contended __raw_spin_is_contended
static inline void __raw_spin_lock(raw_spinlock_t *lock) static inline void __raw_spin_lock(raw_spinlock_t *lock)
{ {
......
...@@ -1402,6 +1402,7 @@ static inline int __raw_spin_is_contended(struct raw_spinlock *lock) ...@@ -1402,6 +1402,7 @@ static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
{ {
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
} }
#define __raw_spin_is_contended __raw_spin_is_contended
static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
{ {
......
...@@ -245,6 +245,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) ...@@ -245,6 +245,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
{ {
return __ticket_spin_is_contended(lock); return __ticket_spin_is_contended(lock);
} }
#define __raw_spin_is_contended __raw_spin_is_contended
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
{ {
......
...@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) ...@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
free_cpumask_var(data->acpi_data.shared_cpu_map); free_cpumask_var(data->acpi_data.shared_cpu_map);
} }
static int get_transition_latency(struct powernow_k8_data *data)
{
int max_latency = 0;
int i;
for (i = 0; i < data->acpi_data.state_count; i++) {
int cur_latency = data->acpi_data.states[i].transition_latency
+ data->acpi_data.states[i].bus_master_latency;
if (cur_latency > max_latency)
max_latency = cur_latency;
}
/* value in usecs, needs to be in nanoseconds */
return 1000 * max_latency;
}
#else #else
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
static int get_transition_latency(struct powernow_k8_data *data) { return 0; }
#endif /* CONFIG_X86_POWERNOW_K8_ACPI */ #endif /* CONFIG_X86_POWERNOW_K8_ACPI */
/* Take a frequency, and issue the fid/vid transition command */ /* Take a frequency, and issue the fid/vid transition command */
...@@ -1173,7 +1188,13 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1173,7 +1188,13 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (rc) { if (rc) {
goto err_out; goto err_out;
} }
} /* Take a crude guess here.
* That guess was in microseconds, so multiply with 1000 */
pol->cpuinfo.transition_latency = (
((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
((1 << data->irt) * 30)) * 1000;
} else /* ACPI _PSS objects available */
pol->cpuinfo.transition_latency = get_transition_latency(data);
/* only run on specific CPU from here on */ /* only run on specific CPU from here on */
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
...@@ -1204,11 +1225,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1204,11 +1225,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
data->available_cores = pol->cpus; data->available_cores = pol->cpus;
/* Take a crude guess here.
* That guess was in microseconds, so multiply with 1000 */
pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
+ (3 * (1 << data->irt) * 10)) * 1000;
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
else else
......
...@@ -149,6 +149,9 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) ...@@ -149,6 +149,9 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
if (q == alg) if (q == alg)
goto err; goto err;
if (crypto_is_moribund(q))
continue;
if (crypto_is_larval(q)) { if (crypto_is_larval(q)) {
if (!strcmp(alg->cra_driver_name, q->cra_driver_name)) if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
goto err; goto err;
...@@ -197,7 +200,7 @@ void crypto_alg_tested(const char *name, int err) ...@@ -197,7 +200,7 @@ void crypto_alg_tested(const char *name, int err)
down_write(&crypto_alg_sem); down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) { list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (!crypto_is_larval(q)) if (crypto_is_moribund(q) || !crypto_is_larval(q))
continue; continue;
test = (struct crypto_larval *)q; test = (struct crypto_larval *)q;
...@@ -210,6 +213,7 @@ void crypto_alg_tested(const char *name, int err) ...@@ -210,6 +213,7 @@ void crypto_alg_tested(const char *name, int err)
goto unlock; goto unlock;
found: found:
q->cra_flags |= CRYPTO_ALG_DEAD;
alg = test->adult; alg = test->adult;
if (err || list_empty(&alg->cra_list)) if (err || list_empty(&alg->cra_list))
goto complete; goto complete;
......
...@@ -557,34 +557,34 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, ...@@ -557,34 +557,34 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
return ERR_PTR(err); return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(crypto_alloc_tfm); EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
/* /*
* crypto_free_tfm - Free crypto transform * crypto_destroy_tfm - Free crypto transform
* @mem: Start of tfm slab
* @tfm: Transform to free * @tfm: Transform to free
* *
* crypto_free_tfm() frees up the transform and any associated resources, * This function frees up the transform and any associated resources,
* then drops the refcount on the associated algorithm. * then drops the refcount on the associated algorithm.
*/ */
void crypto_free_tfm(struct crypto_tfm *tfm) void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{ {
struct crypto_alg *alg; struct crypto_alg *alg;
int size; int size;
if (unlikely(!tfm)) if (unlikely(!mem))
return; return;
alg = tfm->__crt_alg; alg = tfm->__crt_alg;
size = sizeof(*tfm) + alg->cra_ctxsize; size = ksize(mem);
if (!tfm->exit && alg->cra_exit) if (!tfm->exit && alg->cra_exit)
alg->cra_exit(tfm); alg->cra_exit(tfm);
crypto_exit_ops(tfm); crypto_exit_ops(tfm);
crypto_mod_put(alg); crypto_mod_put(alg);
memset(tfm, 0, size); memset(mem, 0, size);
kfree(tfm); kfree(mem);
} }
EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
EXPORT_SYMBOL_GPL(crypto_free_tfm);
int crypto_has_alg(const char *name, u32 type, u32 mask) int crypto_has_alg(const char *name, u32 type, u32 mask)
{ {
......
...@@ -54,7 +54,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, ...@@ -54,7 +54,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
struct page *page; struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
flush_dcache_page(page); if (!PageSlab(page))
flush_dcache_page(page);
} }
if (more) { if (more) {
......
...@@ -388,10 +388,15 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) ...@@ -388,10 +388,15 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
struct shash_desc *desc = crypto_tfm_ctx(tfm); struct shash_desc *desc = crypto_tfm_ctx(tfm);
struct crypto_shash *shash; struct crypto_shash *shash;
if (!crypto_mod_get(calg))
return -EAGAIN;
shash = __crypto_shash_cast(crypto_create_tfm( shash = __crypto_shash_cast(crypto_create_tfm(
calg, &crypto_shash_type)); calg, &crypto_shash_type));
if (IS_ERR(shash)) if (IS_ERR(shash)) {
crypto_mod_put(calg);
return PTR_ERR(shash); return PTR_ERR(shash);
}
desc->tfm = shash; desc->tfm = shash;
tfm->exit = crypto_exit_shash_ops_compat; tfm->exit = crypto_exit_shash_ops_compat;
......
...@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, ...@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
if (!dbs_tuners_ins.ignore_nice) {
busy_time = cputime64_add(busy_time,
kstat_cpu(cpu).cpustat.nice);
}
idle_time = cputime64_sub(cur_wall_time, busy_time); idle_time = cputime64_sub(cur_wall_time, busy_time);
if (wall) if (wall)
...@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) ...@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
if (idle_time == -1ULL) if (idle_time == -1ULL)
return get_cpu_idle_time_jiffy(cpu, wall); return get_cpu_idle_time_jiffy(cpu, wall);
if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice;
unsigned long cur_nice_jiffies;
struct cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cpu_dbs_info, cpu);
cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
dbs_info->prev_cpu_nice);
/*
* Assumption: nice time between sampling periods will be
* less than 2^32 jiffies for 32 bit sys
*/
cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice);
dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
return idle_time + jiffies_to_usecs(cur_nice_jiffies);
}
return idle_time; return idle_time;
} }
...@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, ...@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
dbs_info = &per_cpu(cpu_dbs_info, j); dbs_info = &per_cpu(cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j, dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall); &dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
} }
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
...@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time; j_dbs_info->prev_cpu_idle = cur_idle_time;
if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice;
unsigned long cur_nice_jiffies;
cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
j_dbs_info->prev_cpu_nice);
/*
* Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys
*/
cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice);
j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
if (unlikely(!wall_time || wall_time < idle_time)) if (unlikely(!wall_time || wall_time < idle_time))
continue; continue;
...@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall); &j_dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice) {
j_dbs_info->prev_cpu_nice =
kstat_cpu(j).cpustat.nice;
}
} }
this_dbs_info->cpu = cpu; this_dbs_info->cpu = cpu;
/* /*
......
...@@ -70,7 +70,7 @@ config DRM_I915 ...@@ -70,7 +70,7 @@ config DRM_I915
select FB_CFB_FILLRECT select FB_CFB_FILLRECT
select FB_CFB_COPYAREA select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT select FB_CFB_IMAGEBLIT
depends on FB select FB
tristate "i915 driver" tristate "i915 driver"
help help
Choose this option if you have a system that has Intel 830M, 845G, Choose this option if you have a system that has Intel 830M, 845G,
......
...@@ -276,6 +276,7 @@ int drm_irq_uninstall(struct drm_device * dev) ...@@ -276,6 +276,7 @@ int drm_irq_uninstall(struct drm_device * dev)
for (i = 0; i < dev->num_crtcs; i++) { for (i = 0; i < dev->num_crtcs; i++) {
DRM_WAKEUP(&dev->vbl_queue[i]); DRM_WAKEUP(&dev->vbl_queue[i]);
dev->vblank_enabled[i] = 0; dev->vblank_enabled[i] = 0;
dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
} }
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
......
...@@ -171,9 +171,14 @@ EXPORT_SYMBOL(drm_core_ioremap); ...@@ -171,9 +171,14 @@ EXPORT_SYMBOL(drm_core_ioremap);
void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
{ {
map->handle = ioremap_wc(map->offset, map->size); if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap_wc(map->offset, map->size);
} }
EXPORT_SYMBOL(drm_core_ioremap_wc); EXPORT_SYMBOL(drm_core_ioremap_wc);
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
{ {
if (!map->handle || !map->size) if (!map->handle || !map->size)
......
...@@ -731,8 +731,11 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -731,8 +731,11 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_GEM: case I915_PARAM_HAS_GEM:
value = dev_priv->has_gem; value = dev_priv->has_gem;
break; break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
break;
default: default:
DRM_ERROR("Unknown parameter %d\n", param->param); DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL; return -EINVAL;
} }
...@@ -764,8 +767,15 @@ static int i915_setparam(struct drm_device *dev, void *data, ...@@ -764,8 +767,15 @@ static int i915_setparam(struct drm_device *dev, void *data,
case I915_SETPARAM_ALLOW_BATCHBUFFER: case I915_SETPARAM_ALLOW_BATCHBUFFER:
dev_priv->allow_batchbuffer = param->value; dev_priv->allow_batchbuffer = param->value;
break; break;
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
param->value < 0)
return -EINVAL;
/* Userspace can use first N regs */
dev_priv->fence_reg_start = param->value;
break;
default: default:
DRM_ERROR("unknown parameter %d\n", param->param); DRM_DEBUG("unknown parameter %d\n", param->param);
return -EINVAL; return -EINVAL;
} }
...@@ -966,10 +976,6 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -966,10 +976,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto kfree_devname; goto kfree_devname;
dev_priv->mm.gtt_mapping =
io_mapping_create_wc(dev->agp->base,
dev->agp->agp_info.aper_size * 1024*1024);
/* Allow hardware batchbuffers unless told otherwise. /* Allow hardware batchbuffers unless told otherwise.
*/ */
dev_priv->allow_batchbuffer = 1; dev_priv->allow_batchbuffer = 1;
...@@ -1081,6 +1087,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1081,6 +1087,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv; goto free_priv;
} }
dev_priv->mm.gtt_mapping =
io_mapping_create_wc(dev->agp->base,
dev->agp->agp_info.aper_size * 1024*1024);
/* Set up a WC MTRR for non-PAT systems. This is more common than
* one would think, because the kernel disables PAT on first
* generation Core chips because WC PAT gets overridden by a UC
* MTRR if present. Even if a UC MTRR isn't present.
*/
dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
dev->agp->agp_info.aper_size *
1024 * 1024,
MTRR_TYPE_WRCOMB, 1);
if (dev_priv->mm.gtt_mtrr < 0) {
DRM_INFO("MTRR allocation failed\n. Graphics "
"performance may suffer.\n");
}
#ifdef CONFIG_HIGHMEM64G #ifdef CONFIG_HIGHMEM64G
/* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
dev_priv->has_gem = 0; dev_priv->has_gem = 0;
...@@ -1089,6 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1089,6 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->has_gem = 1; dev_priv->has_gem = 1;
#endif #endif
dev->driver->get_vblank_counter = i915_get_vblank_counter;
if (IS_GM45(dev))
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
i915_gem_load(dev); i915_gem_load(dev);
/* Init HWS */ /* Init HWS */
...@@ -1145,8 +1172,14 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1145,8 +1172,14 @@ int i915_driver_unload(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
dev->agp->agp_info.aper_size * 1024 * 1024);
dev_priv->mm.gtt_mtrr = -1;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET)) {
io_mapping_free(dev_priv->mm.gtt_mapping);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
} }
......
...@@ -112,7 +112,6 @@ static struct drm_driver driver = { ...@@ -112,7 +112,6 @@ static struct drm_driver driver = {
.suspend = i915_suspend, .suspend = i915_suspend,
.resume = i915_resume, .resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp, .device_is_agp = i915_driver_device_is_agp,
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915_enable_vblank, .enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank, .disable_vblank = i915_disable_vblank,
.irq_preinstall = i915_driver_irq_preinstall, .irq_preinstall = i915_driver_irq_preinstall,
......
...@@ -284,6 +284,7 @@ typedef struct drm_i915_private { ...@@ -284,6 +284,7 @@ typedef struct drm_i915_private {
struct drm_mm gtt_space; struct drm_mm gtt_space;
struct io_mapping *gtt_mapping; struct io_mapping *gtt_mapping;
int gtt_mtrr;
/** /**
* List of objects currently involved in rendering from the * List of objects currently involved in rendering from the
...@@ -534,6 +535,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, ...@@ -534,6 +535,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
extern int i915_enable_vblank(struct drm_device *dev, int crtc); extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc); extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data, extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
...@@ -601,6 +603,7 @@ int i915_gem_init_object(struct drm_gem_object *obj); ...@@ -601,6 +603,7 @@ int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj); void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev); void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev); uint32_t i915_get_gem_seqno(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests(struct drm_device *dev);
...@@ -784,6 +787,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); ...@@ -784,6 +787,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024) #define PRIMARY_RINGBUFFER_SIZE (128*1024)
......
...@@ -52,7 +52,7 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj); ...@@ -52,7 +52,7 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment); unsigned alignment);
static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
static int i915_gem_evict_something(struct drm_device *dev); static int i915_gem_evict_something(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
...@@ -567,6 +567,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -567,6 +567,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn; unsigned long pfn;
int ret = 0; int ret = 0;
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
/* We don't use vmf->pgoff since that has the fake offset */ /* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
...@@ -585,8 +586,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -585,8 +586,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Need a new fence register? */ /* Need a new fence register? */
if (obj_priv->fence_reg == I915_FENCE_REG_NONE && if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE) obj_priv->tiling_mode != I915_TILING_NONE) {
i915_gem_object_get_fence_reg(obj); ret = i915_gem_object_get_fence_reg(obj, write);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return VM_FAULT_SIGBUS;
}
}
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset; page_offset;
...@@ -1211,7 +1217,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) ...@@ -1211,7 +1217,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
/** /**
* Unbinds an object from the GTT aperture. * Unbinds an object from the GTT aperture.
*/ */
static int int
i915_gem_object_unbind(struct drm_gem_object *obj) i915_gem_object_unbind(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
...@@ -1445,21 +1451,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) ...@@ -1445,21 +1451,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_gem_object *obj_priv = obj->driver_private;
int regnum = obj_priv->fence_reg; int regnum = obj_priv->fence_reg;
int tile_width;
uint32_t val; uint32_t val;
uint32_t pitch_val; uint32_t pitch_val;
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (obj->size - 1))) { (obj_priv->gtt_offset & (obj->size - 1))) {
WARN(1, "%s: object not 1M or size aligned\n", __func__); WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
__func__, obj_priv->gtt_offset, obj->size);
return; return;
} }
if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || if (obj_priv->tiling_mode == I915_TILING_Y &&
IS_I945GM(dev) || HAS_128_BYTE_Y_TILING(dev))
IS_G33(dev))) tile_width = 128;
pitch_val = (obj_priv->stride / 128) - 1;
else else
pitch_val = (obj_priv->stride / 512) - 1; tile_width = 512;
/* Note: pitch better be a power of two tile widths */
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
val = obj_priv->gtt_offset; val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y) if (obj_priv->tiling_mode == I915_TILING_Y)
...@@ -1483,7 +1494,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) ...@@ -1483,7 +1494,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (obj->size - 1))) { (obj_priv->gtt_offset & (obj->size - 1))) {
WARN(1, "%s: object not 1M or size aligned\n", __func__); WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
__func__, obj_priv->gtt_offset);
return; return;
} }
...@@ -1503,6 +1515,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) ...@@ -1503,6 +1515,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
/** /**
* i915_gem_object_get_fence_reg - set up a fence reg for an object * i915_gem_object_get_fence_reg - set up a fence reg for an object
* @obj: object to map through a fence reg * @obj: object to map through a fence reg
* @write: object is about to be written
* *
* When mapping objects through the GTT, userspace wants to be able to write * When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled. * to them without having to worry about swizzling if the object is tiled.
...@@ -1513,8 +1526,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) ...@@ -1513,8 +1526,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
* It then sets up the reg based on the object's properties: address, pitch * It then sets up the reg based on the object's properties: address, pitch
* and tiling format. * and tiling format.
*/ */
static void static int
i915_gem_object_get_fence_reg(struct drm_gem_object *obj) i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -1527,12 +1540,18 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) ...@@ -1527,12 +1540,18 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
WARN(1, "allocating a fence for non-tiled object?\n"); WARN(1, "allocating a fence for non-tiled object?\n");
break; break;
case I915_TILING_X: case I915_TILING_X:
WARN(obj_priv->stride & (512 - 1), if (!obj_priv->stride)
"object is X tiled but has non-512B pitch\n"); return -EINVAL;
WARN((obj_priv->stride & (512 - 1)),
"object 0x%08x is X tiled but has non-512B pitch\n",
obj_priv->gtt_offset);
break; break;
case I915_TILING_Y: case I915_TILING_Y:
WARN(obj_priv->stride & (128 - 1), if (!obj_priv->stride)
"object is Y tiled but has non-128B pitch\n"); return -EINVAL;
WARN((obj_priv->stride & (128 - 1)),
"object 0x%08x is Y tiled but has non-128B pitch\n",
obj_priv->gtt_offset);
break; break;
} }
...@@ -1563,10 +1582,11 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) ...@@ -1563,10 +1582,11 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
* objects to finish before trying again. * objects to finish before trying again.
*/ */
if (i == dev_priv->num_fence_regs) { if (i == dev_priv->num_fence_regs) {
ret = i915_gem_object_wait_rendering(reg->obj); ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
if (ret) { if (ret) {
WARN(ret, "wait_rendering failed: %d\n", ret); WARN(ret != -ERESTARTSYS,
return; "switch to GTT domain failed: %d\n", ret);
return ret;
} }
goto try_again; goto try_again;
} }
...@@ -1591,6 +1611,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) ...@@ -1591,6 +1611,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
i915_write_fence_reg(reg); i915_write_fence_reg(reg);
else else
i830_write_fence_reg(reg); i830_write_fence_reg(reg);
return 0;
} }
/** /**
...@@ -1631,7 +1653,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) ...@@ -1631,7 +1653,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (dev_priv->mm.suspended) if (dev_priv->mm.suspended)
return -EBUSY; return -EBUSY;
if (alignment == 0) if (alignment == 0)
alignment = PAGE_SIZE; alignment = i915_gem_get_gtt_alignment(obj);
if (alignment & (PAGE_SIZE - 1)) { if (alignment & (PAGE_SIZE - 1)) {
DRM_ERROR("Invalid object alignment requested %u\n", alignment); DRM_ERROR("Invalid object alignment requested %u\n", alignment);
return -EINVAL; return -EINVAL;
...@@ -2652,6 +2674,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) ...@@ -2652,6 +2674,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
DRM_ERROR("Failure to bind: %d", ret); DRM_ERROR("Failure to bind: %d", ret);
return ret; return ret;
} }
/*
* Pre-965 chips need a fence register set up in order to
* properly handle tiled surfaces.
*/
if (!IS_I965G(dev) &&
obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE)
i915_gem_object_get_fence_reg(obj, true);
} }
obj_priv->pin_count++; obj_priv->pin_count++;
...@@ -3229,10 +3259,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ...@@ -3229,10 +3259,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
dev_priv->mm.wedged = 0; dev_priv->mm.wedged = 0;
} }
dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
dev->agp->agp_info.aper_size
* 1024 * 1024);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0; dev_priv->mm.suspended = 0;
...@@ -3255,7 +3281,6 @@ int ...@@ -3255,7 +3281,6 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
...@@ -3264,7 +3289,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, ...@@ -3264,7 +3289,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_idle(dev); ret = i915_gem_idle(dev);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
io_mapping_free(dev_priv->mm.gtt_mapping);
return ret; return ret;
} }
...@@ -3273,6 +3297,9 @@ i915_gem_lastclose(struct drm_device *dev) ...@@ -3273,6 +3297,9 @@ i915_gem_lastclose(struct drm_device *dev)
{ {
int ret; int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
ret = i915_gem_idle(dev); ret = i915_gem_idle(dev);
if (ret) if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret); DRM_ERROR("failed to idle hardware: %d\n", ret);
...@@ -3294,7 +3321,7 @@ i915_gem_load(struct drm_device *dev) ...@@ -3294,7 +3321,7 @@ i915_gem_load(struct drm_device *dev)
/* Old X drivers will take 0-2 for front, back, depth buffers */ /* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3; dev_priv->fence_reg_start = 3;
if (IS_I965G(dev)) if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16; dev_priv->num_fence_regs = 16;
else else
dev_priv->num_fence_regs = 8; dev_priv->num_fence_regs = 8;
......
...@@ -173,6 +173,73 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) ...@@ -173,6 +173,73 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dev_priv->mm.bit_6_swizzle_y = swizzle_y; dev_priv->mm.bit_6_swizzle_y = swizzle_y;
} }
/**
* Returns the size of the fence for a tiled object of the given size.
*/
static int
i915_get_fence_size(struct drm_device *dev, int size)
{
int i;
int start;
if (IS_I965G(dev)) {
/* The 965 can have fences at any page boundary. */
return ALIGN(size, 4096);
} else {
/* Align the size to a power of two greater than the smallest
* fence size.
*/
if (IS_I9XX(dev))
start = 1024 * 1024;
else
start = 512 * 1024;
for (i = start; i < size; i <<= 1)
;
return i;
}
}
/* Check pitch constriants for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
return true;
if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
else
tile_width = 512;
/* 965+ just needs multiples of tile width */
if (IS_I965G(dev)) {
if (stride & (tile_width - 1))
return false;
return true;
}
/* Pre-965 needs power of two tile widths */
if (stride < tile_width)
return false;
if (stride & (stride - 1))
return false;
/* We don't handle the aperture area covered by the fence being bigger
* than the object size.
*/
if (i915_get_fence_size(dev, size) != size)
return false;
return true;
}
/** /**
* Sets the tiling mode of an object, returning the required swizzling of * Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object. * bit 6 of addresses in the object.
...@@ -191,6 +258,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -191,6 +258,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
obj_priv = obj->driver_private; obj_priv = obj->driver_private;
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
drm_gem_object_unreference(obj);
return -EINVAL;
}
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (args->tiling_mode == I915_TILING_NONE) { if (args->tiling_mode == I915_TILING_NONE) {
...@@ -207,7 +279,24 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -207,7 +279,24 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
} }
} }
obj_priv->tiling_mode = args->tiling_mode; if (args->tiling_mode != obj_priv->tiling_mode) {
int ret;
/* Unbind the object, as switching tiling means we're
* switching the cache organization due to fencing, probably.
*/
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
WARN(ret != -ERESTARTSYS,
"failed to unbind object for tiling switch");
args->tiling_mode = obj_priv->tiling_mode;
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
return ret;
}
obj_priv->tiling_mode = args->tiling_mode;
}
obj_priv->stride = args->stride; obj_priv->stride = args->stride;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -174,6 +174,19 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) ...@@ -174,6 +174,19 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return count; return count;
} }
u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
return 0;
}
return I915_READ(reg);
}
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
......
...@@ -186,12 +186,12 @@ ...@@ -186,12 +186,12 @@
#define FENCE_REG_830_0 0x2000 #define FENCE_REG_830_0 0x2000
#define I830_FENCE_START_MASK 0x07f80000 #define I830_FENCE_START_MASK 0x07f80000
#define I830_FENCE_TILING_Y_SHIFT 12 #define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8) #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0) #define I830_FENCE_REG_VALID (1<<0)
#define I915_FENCE_START_MASK 0x0ff00000 #define I915_FENCE_START_MASK 0x0ff00000
#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8) #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
#define FENCE_REG_965_0 0x03000 #define FENCE_REG_965_0 0x03000
#define I965_FENCE_PITCH_SHIFT 2 #define I965_FENCE_PITCH_SHIFT 2
...@@ -1371,6 +1371,9 @@ ...@@ -1371,6 +1371,9 @@
#define PIPE_FRAME_LOW_SHIFT 24 #define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0 #define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
#define PIPEA_FRMCOUNT_GM45 0x70040
#define PIPEA_FLIPCOUNT_GM45 0x70044
/* Cursor A & B regs */ /* Cursor A & B regs */
#define CURACNTR 0x70080 #define CURACNTR 0x70080
...@@ -1439,6 +1442,9 @@ ...@@ -1439,6 +1442,9 @@
#define PIPEBSTAT 0x71024 #define PIPEBSTAT 0x71024
#define PIPEBFRAMEHIGH 0x71040 #define PIPEBFRAMEHIGH 0x71040
#define PIPEBFRAMEPIXEL 0x71044 #define PIPEBFRAMEPIXEL 0x71044
#define PIPEB_FRMCOUNT_GM45 0x71040
#define PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */ /* Display B control */
#define DSPBCNTR 0x71180 #define DSPBCNTR 0x71180
......
...@@ -755,6 +755,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, ...@@ -755,6 +755,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_HDMI:
is_sdvo = true; is_sdvo = true;
if (intel_output->needs_tv_clock)
is_tv = true;
break; break;
case INTEL_OUTPUT_DVO: case INTEL_OUTPUT_DVO:
is_dvo = true; is_dvo = true;
...@@ -1452,6 +1454,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) ...@@ -1452,6 +1454,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
static void intel_setup_outputs(struct drm_device *dev) static void intel_setup_outputs(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector; struct drm_connector *connector;
intel_crt_init(dev); intel_crt_init(dev);
...@@ -1463,13 +1466,16 @@ static void intel_setup_outputs(struct drm_device *dev) ...@@ -1463,13 +1466,16 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_I9XX(dev)) { if (IS_I9XX(dev)) {
int found; int found;
found = intel_sdvo_init(dev, SDVOB); if (I915_READ(SDVOB) & SDVO_DETECTED) {
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) found = intel_sdvo_init(dev, SDVOB);
intel_hdmi_init(dev, SDVOB); if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
found = intel_sdvo_init(dev, SDVOC); }
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
intel_hdmi_init(dev, SDVOC); found = intel_sdvo_init(dev, SDVOC);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
}
} else } else
intel_dvo_init(dev); intel_dvo_init(dev);
......
...@@ -82,6 +82,7 @@ struct intel_output { ...@@ -82,6 +82,7 @@ struct intel_output {
struct intel_i2c_chan *i2c_bus; /* for control functions */ struct intel_i2c_chan *i2c_bus; /* for control functions */
struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
bool load_detect_temp; bool load_detect_temp;
bool needs_tv_clock;
void *dev_priv; void *dev_priv;
}; };
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
* Jesse Barnes <jesse.barnes@intel.com> * Jesse Barnes <jesse.barnes@intel.com>
*/ */
#include <linux/dmi.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"
...@@ -311,10 +312,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector) ...@@ -311,10 +312,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
if (dev_priv->panel_fixed_mode != NULL) { if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode; struct drm_display_mode *mode;
mutex_lock(&dev->mode_config.mutex);
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
drm_mode_probed_add(connector, mode); drm_mode_probed_add(connector, mode);
mutex_unlock(&dev->mode_config.mutex);
return 1; return 1;
} }
...@@ -405,6 +404,16 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -405,6 +404,16 @@ void intel_lvds_init(struct drm_device *dev)
u32 lvds; u32 lvds;
int pipe; int pipe;
/* Blacklist machines that we know falsely report LVDS. */
/* FIXME: add a check for the Aopen Mini PC */
/* Apple Mac Mini Core Duo and Mac Mini Core 2 Duo */
if(dmi_match(DMI_PRODUCT_NAME, "Macmini1,1") ||
dmi_match(DMI_PRODUCT_NAME, "Macmini2,1")) {
DRM_DEBUG("Skipping LVDS initialization for Apple Mac Mini\n");
return;
}
intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output) { if (!intel_output) {
return; return;
...@@ -458,7 +467,7 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -458,7 +467,7 @@ void intel_lvds_init(struct drm_device *dev)
dev_priv->panel_fixed_mode = dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, scan); drm_mode_duplicate(dev, scan);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
goto out; /* FIXME: check for quirks */ goto out;
} }
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
} }
...@@ -492,7 +501,7 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -492,7 +501,7 @@ void intel_lvds_init(struct drm_device *dev)
if (dev_priv->panel_fixed_mode) { if (dev_priv->panel_fixed_mode) {
dev_priv->panel_fixed_mode->type |= dev_priv->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED; DRM_MODE_TYPE_PREFERRED;
goto out; /* FIXME: check for quirks */ goto out;
} }
} }
...@@ -500,38 +509,6 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -500,38 +509,6 @@ void intel_lvds_init(struct drm_device *dev)
if (!dev_priv->panel_fixed_mode) if (!dev_priv->panel_fixed_mode)
goto failed; goto failed;
/* FIXME: detect aopen & mac mini type stuff automatically? */
/*
* Blacklist machines with BIOSes that list an LVDS panel without
* actually having one.
*/
if (IS_I945GM(dev)) {
/* aopen mini pc */
if (dev->pdev->subsystem_vendor == 0xa0a0)
goto failed;
if ((dev->pdev->subsystem_vendor == 0x8086) &&
(dev->pdev->subsystem_device == 0x7270)) {
/* It's a Mac Mini or Macbook Pro.
*
* Apple hardware is out to get us. The macbook pro
* has a real LVDS panel, but the mac mini does not,
* and they have the same device IDs. We'll
* distinguish by panel size, on the assumption
* that Apple isn't about to make any machines with an
* 800x600 display.
*/
if (dev_priv->panel_fixed_mode != NULL &&
dev_priv->panel_fixed_mode->hdisplay == 800 &&
dev_priv->panel_fixed_mode->vdisplay == 600) {
DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
goto failed;
}
}
}
out: out:
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
return; return;
......
This diff is collapsed.
This diff is collapsed.
...@@ -1039,9 +1039,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, ...@@ -1039,9 +1039,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
#if __OS_HAS_AGP #if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->flags & RADEON_IS_AGP) {
drm_core_ioremap(dev_priv->cp_ring, dev); drm_core_ioremap_wc(dev_priv->cp_ring, dev);
drm_core_ioremap(dev_priv->ring_rptr, dev); drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
drm_core_ioremap(dev->agp_buffer_map, dev); drm_core_ioremap_wc(dev->agp_buffer_map, dev);
if (!dev_priv->cp_ring->handle || if (!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle || !dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) { !dev->agp_buffer_map->handle) {
......
...@@ -27,6 +27,7 @@ menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION ...@@ -27,6 +27,7 @@ menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
bool "Android RAM Console Enable error correction" bool "Android RAM Console Enable error correction"
default n default n
depends on ANDROID_RAM_CONSOLE depends on ANDROID_RAM_CONSOLE
depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
select REED_SOLOMON select REED_SOLOMON
select REED_SOLOMON_ENC8 select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8 select REED_SOLOMON_DEC8
......
...@@ -224,9 +224,23 @@ static int __init ram_console_init(struct ram_console_buffer *buffer, ...@@ -224,9 +224,23 @@ static int __init ram_console_init(struct ram_console_buffer *buffer,
ram_console_buffer_size = ram_console_buffer_size =
buffer_size - sizeof(struct ram_console_buffer); buffer_size - sizeof(struct ram_console_buffer);
if (ram_console_buffer_size > buffer_size) {
pr_err("ram_console: buffer %p, invalid size %d, datasize %d\n",
buffer, buffer_size, ram_console_buffer_size);
return 0;
}
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION #ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size, ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
ECC_BLOCK_SIZE) + 1) * ECC_SIZE; ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
if (ram_console_buffer_size > buffer_size) {
pr_err("ram_console: buffer %p, invalid size %d, "
"non-ecc datasize %d\n",
buffer, buffer_size, ram_console_buffer_size);
return 0;
}
ram_console_par_buffer = buffer->data + ram_console_buffer_size; ram_console_par_buffer = buffer->data + ram_console_buffer_size;
......
...@@ -50,7 +50,7 @@ static ssize_t gpio_enable_show(struct device *dev, struct device_attribute *att ...@@ -50,7 +50,7 @@ static ssize_t gpio_enable_show(struct device *dev, struct device_attribute *att
if (hrtimer_active(&gpio_data->timer)) { if (hrtimer_active(&gpio_data->timer)) {
ktime_t r = hrtimer_get_remaining(&gpio_data->timer); ktime_t r = hrtimer_get_remaining(&gpio_data->timer);
struct timeval t = ktime_to_timeval(r); struct timeval t = ktime_to_timeval(r);
remaining = t.tv_sec * 1000 + t.tv_usec; remaining = t.tv_sec * 1000 + t.tv_usec / 1000;
} else } else
remaining = 0; remaining = 0;
......
config USB_ATMEL config USB_ATMEL
tristate "Atmel at76c503/at76c505/at76c505a USB cards" tristate "Atmel at76c503/at76c505/at76c505a USB cards"
depends on MAC80211 && WLAN_80211 && USB depends on WLAN_80211 && USB
default N default N
select FW_LOADER select FW_LOADER
---help--- ---help---
......
This diff is collapsed.
This diff is collapsed.
...@@ -622,7 +622,7 @@ static int set_ctrl_bits(void) ...@@ -622,7 +622,7 @@ static int set_ctrl_bits(void)
} }
/* sets ctrl & data port bits according to current signals values */ /* sets ctrl & data port bits according to current signals values */
static void set_bits(void) static void panel_set_bits(void)
{ {
set_data_bits(); set_data_bits();
set_ctrl_bits(); set_ctrl_bits();
...@@ -707,12 +707,12 @@ static void lcd_send_serial(int byte) ...@@ -707,12 +707,12 @@ static void lcd_send_serial(int byte)
*/ */
for (bit = 0; bit < 8; bit++) { for (bit = 0; bit < 8; bit++) {
bits.cl = BIT_CLR; /* CLK low */ bits.cl = BIT_CLR; /* CLK low */
set_bits(); panel_set_bits();
bits.da = byte & 1; bits.da = byte & 1;
set_bits(); panel_set_bits();
udelay(2); /* maintain the data during 2 us before CLK up */ udelay(2); /* maintain the data during 2 us before CLK up */
bits.cl = BIT_SET; /* CLK high */ bits.cl = BIT_SET; /* CLK high */
set_bits(); panel_set_bits();
udelay(1); /* maintain the strobe during 1 us */ udelay(1); /* maintain the strobe during 1 us */
byte >>= 1; byte >>= 1;
} }
...@@ -727,7 +727,7 @@ static void lcd_backlight(int on) ...@@ -727,7 +727,7 @@ static void lcd_backlight(int on)
/* The backlight is activated by seting the AUTOFEED line to +5V */ /* The backlight is activated by seting the AUTOFEED line to +5V */
spin_lock(&pprt_lock); spin_lock(&pprt_lock);
bits.bl = on; bits.bl = on;
set_bits(); panel_set_bits();
spin_unlock(&pprt_lock); spin_unlock(&pprt_lock);
} }
......
...@@ -11,6 +11,7 @@ obj-$(CONFIG_USB_MON) += mon/ ...@@ -11,6 +11,7 @@ obj-$(CONFIG_USB_MON) += mon/
obj-$(CONFIG_PCI) += host/ obj-$(CONFIG_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/ obj-$(CONFIG_USB_EHCI_HCD) += host/
obj-$(CONFIG_USB_ISP116X_HCD) += host/ obj-$(CONFIG_USB_ISP116X_HCD) += host/
obj-$(CONFIG_USB_ISP1760_HCD) += host/
obj-$(CONFIG_USB_OHCI_HCD) += host/ obj-$(CONFIG_USB_OHCI_HCD) += host/
obj-$(CONFIG_USB_UHCI_HCD) += host/ obj-$(CONFIG_USB_UHCI_HCD) += host/
obj-$(CONFIG_USB_FHCI_HCD) += host/ obj-$(CONFIG_USB_FHCI_HCD) += host/
......
...@@ -1349,9 +1349,6 @@ static struct usb_device_id acm_ids[] = { ...@@ -1349,9 +1349,6 @@ static struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
}, },
{ USB_DEVICE(0x0e8d, 0x3329), /* i-blue 747, Qstarz BT-Q1000, Holux M-241 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */ { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
}, },
......
...@@ -1622,6 +1622,8 @@ static int qe_ep_disable(struct usb_ep *_ep) ...@@ -1622,6 +1622,8 @@ static int qe_ep_disable(struct usb_ep *_ep)
nuke(ep, -ESHUTDOWN); nuke(ep, -ESHUTDOWN);
ep->desc = NULL; ep->desc = NULL;
ep->stopped = 1; ep->stopped = 1;
ep->tx_req = NULL;
qe_ep_reset(udc, ep->epnum);
spin_unlock_irqrestore(&udc->lock, flags); spin_unlock_irqrestore(&udc->lock, flags);
cpm_muram_free(cpm_muram_offset(ep->rxbase)); cpm_muram_free(cpm_muram_offset(ep->rxbase));
...@@ -1681,14 +1683,11 @@ static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req) ...@@ -1681,14 +1683,11 @@ static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req); kfree(req);
} }
/* queues (submits) an I/O request to an endpoint */ static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{ {
struct qe_ep *ep = container_of(_ep, struct qe_ep, ep); struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
struct qe_req *req = container_of(_req, struct qe_req, req); struct qe_req *req = container_of(_req, struct qe_req, req);
struct qe_udc *udc; struct qe_udc *udc;
unsigned long flags;
int reval; int reval;
udc = ep->udc; udc = ep->udc;
...@@ -1732,7 +1731,7 @@ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req, ...@@ -1732,7 +1731,7 @@ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
list_add_tail(&req->queue, &ep->queue); list_add_tail(&req->queue, &ep->queue);
dev_vdbg(udc->dev, "gadget have request in %s! %d\n", dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
ep->name, req->req.length); ep->name, req->req.length);
spin_lock_irqsave(&udc->lock, flags);
/* push the request to device */ /* push the request to device */
if (ep_is_in(ep)) if (ep_is_in(ep))
reval = ep_req_send(ep, req); reval = ep_req_send(ep, req);
...@@ -1748,11 +1747,24 @@ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req, ...@@ -1748,11 +1747,24 @@ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
if (ep->dir == USB_DIR_OUT) if (ep->dir == USB_DIR_OUT)
reval = ep_req_receive(ep, req); reval = ep_req_receive(ep, req);
spin_unlock_irqrestore(&udc->lock, flags);
return 0; return 0;
} }
/* queues (submits) an I/O request to an endpoint */
static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
struct qe_udc *udc = ep->udc;
unsigned long flags;
int ret;
spin_lock_irqsave(&udc->lock, flags);
ret = __qe_ep_queue(_ep, _req);
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
/* dequeues (cancels, unlinks) an I/O request from an endpoint */ /* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{ {
...@@ -2008,7 +2020,7 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value, ...@@ -2008,7 +2020,7 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
udc->ep0_dir = USB_DIR_IN; udc->ep0_dir = USB_DIR_IN;
/* data phase */ /* data phase */
status = qe_ep_queue(&ep->ep, &req->req, GFP_ATOMIC); status = __qe_ep_queue(&ep->ep, &req->req);
if (status == 0) if (status == 0)
return; return;
...@@ -2151,6 +2163,9 @@ static int reset_irq(struct qe_udc *udc) ...@@ -2151,6 +2163,9 @@ static int reset_irq(struct qe_udc *udc)
{ {
unsigned char i; unsigned char i;
if (udc->usb_state == USB_STATE_DEFAULT)
return 0;
qe_usb_disable(); qe_usb_disable();
out_8(&udc->usb_regs->usb_usadr, 0); out_8(&udc->usb_regs->usb_usadr, 0);
...@@ -2442,8 +2457,12 @@ static int __devinit qe_udc_reg_init(struct qe_udc *udc) ...@@ -2442,8 +2457,12 @@ static int __devinit qe_udc_reg_init(struct qe_udc *udc)
struct usb_ctlr __iomem *qe_usbregs; struct usb_ctlr __iomem *qe_usbregs;
qe_usbregs = udc->usb_regs; qe_usbregs = udc->usb_regs;
/* Init the usb register */ /* Spec says that we must enable the USB controller to change mode. */
out_8(&qe_usbregs->usb_usmod, 0x01); out_8(&qe_usbregs->usb_usmod, 0x01);
/* Mode changed, now disable it, since muram isn't initialized yet. */
out_8(&qe_usbregs->usb_usmod, 0x00);
/* Initialize the rest. */
out_be16(&qe_usbregs->usb_usbmr, 0); out_be16(&qe_usbregs->usb_usbmr, 0);
out_8(&qe_usbregs->usb_uscom, 0); out_8(&qe_usbregs->usb_uscom, 0);
out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR); out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
...@@ -2604,6 +2623,10 @@ static int __devinit qe_udc_probe(struct of_device *ofdev, ...@@ -2604,6 +2623,10 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
(unsigned long)udc_controller); (unsigned long)udc_controller);
/* request irq and disable DR */ /* request irq and disable DR */
udc_controller->usb_irq = irq_of_parse_and_map(np, 0); udc_controller->usb_irq = irq_of_parse_and_map(np, 0);
if (!udc_controller->usb_irq) {
ret = -EINVAL;
goto err_noirq;
}
ret = request_irq(udc_controller->usb_irq, qe_udc_irq, 0, ret = request_irq(udc_controller->usb_irq, qe_udc_irq, 0,
driver_name, udc_controller); driver_name, udc_controller);
...@@ -2625,6 +2648,8 @@ static int __devinit qe_udc_probe(struct of_device *ofdev, ...@@ -2625,6 +2648,8 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
err6: err6:
free_irq(udc_controller->usb_irq, udc_controller); free_irq(udc_controller->usb_irq, udc_controller);
err5: err5:
irq_dispose_mapping(udc_controller->usb_irq);
err_noirq:
if (udc_controller->nullmap) { if (udc_controller->nullmap) {
dma_unmap_single(udc_controller->gadget.dev.parent, dma_unmap_single(udc_controller->gadget.dev.parent,
udc_controller->nullp, 256, udc_controller->nullp, 256,
...@@ -2648,7 +2673,7 @@ static int __devinit qe_udc_probe(struct of_device *ofdev, ...@@ -2648,7 +2673,7 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
iounmap(udc_controller->usb_regs); iounmap(udc_controller->usb_regs);
err1: err1:
kfree(udc_controller); kfree(udc_controller);
udc_controller = NULL;
return ret; return ret;
} }
...@@ -2710,6 +2735,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev) ...@@ -2710,6 +2735,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
kfree(ep->txframe); kfree(ep->txframe);
free_irq(udc_controller->usb_irq, udc_controller); free_irq(udc_controller->usb_irq, udc_controller);
irq_dispose_mapping(udc_controller->usb_irq);
tasklet_kill(&udc_controller->rx_tasklet); tasklet_kill(&udc_controller->rx_tasklet);
......
...@@ -621,9 +621,9 @@ static int __init aircable_init(void) ...@@ -621,9 +621,9 @@ static int __init aircable_init(void)
goto failed_usb_register; goto failed_usb_register;
return 0; return 0;
failed_serial_register:
usb_serial_deregister(&aircable_device);
failed_usb_register: failed_usb_register:
usb_serial_deregister(&aircable_device);
failed_serial_register:
return retval; return retval;
} }
......
...@@ -662,6 +662,7 @@ static struct usb_device_id id_table_combined [] = { ...@@ -662,6 +662,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
{ USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) },
{ }, /* Optional parameter entry */ { }, /* Optional parameter entry */
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
...@@ -1064,8 +1065,10 @@ static int set_serial_info(struct tty_struct *tty, ...@@ -1064,8 +1065,10 @@ static int set_serial_info(struct tty_struct *tty,
if (!capable(CAP_SYS_ADMIN)) { if (!capable(CAP_SYS_ADMIN)) {
if (((new_serial.flags & ~ASYNC_USR_MASK) != if (((new_serial.flags & ~ASYNC_USR_MASK) !=
(priv->flags & ~ASYNC_USR_MASK))) (priv->flags & ~ASYNC_USR_MASK))) {
unlock_kernel();
return -EPERM; return -EPERM;
}
priv->flags = ((priv->flags & ~ASYNC_USR_MASK) | priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK)); (new_serial.flags & ASYNC_USR_MASK));
priv->custom_divisor = new_serial.custom_divisor; priv->custom_divisor = new_serial.custom_divisor;
......
...@@ -844,6 +844,9 @@ ...@@ -844,6 +844,9 @@
#define TML_VID 0x1B91 /* Vendor ID */ #define TML_VID 0x1B91 /* Vendor ID */
#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */ #define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
/* NDI Polaris System */
#define FTDI_NDI_HUC_PID 0xDA70
/* Propox devices */ /* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
......
...@@ -199,14 +199,15 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po ...@@ -199,14 +199,15 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
/* FUTURE NOVATEL PRODUCTS */ /* FUTURE NOVATEL PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_EVDO_1 0x6000 #define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000
#define NOVATELWIRELESS_PRODUCT_HSPA_1 0x7000 #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
#define NOVATELWIRELESS_PRODUCT_EMBEDDED_1 0x8000 #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
#define NOVATELWIRELESS_PRODUCT_GLOBAL_1 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
#define NOVATELWIRELESS_PRODUCT_EVDO_2 0x6001 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000
#define NOVATELWIRELESS_PRODUCT_HSPA_2 0x7001 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001
#define NOVATELWIRELESS_PRODUCT_EMBEDDED_2 0x8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000
#define NOVATELWIRELESS_PRODUCT_GLOBAL_2 0x9001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001
#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001
/* AMOI PRODUCTS */ /* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614 #define AMOI_VENDOR_ID 0x1614
...@@ -216,6 +217,27 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po ...@@ -216,6 +217,27 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define DELL_VENDOR_ID 0x413C #define DELL_VENDOR_ID 0x413C
/* Dell modems */
#define DELL_PRODUCT_5700_MINICARD 0x8114
#define DELL_PRODUCT_5500_MINICARD 0x8115
#define DELL_PRODUCT_5505_MINICARD 0x8116
#define DELL_PRODUCT_5700_EXPRESSCARD 0x8117
#define DELL_PRODUCT_5510_EXPRESSCARD 0x8118
#define DELL_PRODUCT_5700_MINICARD_SPRINT 0x8128
#define DELL_PRODUCT_5700_MINICARD_TELUS 0x8129
#define DELL_PRODUCT_5720_MINICARD_VZW 0x8133
#define DELL_PRODUCT_5720_MINICARD_SPRINT 0x8134
#define DELL_PRODUCT_5720_MINICARD_TELUS 0x8135
#define DELL_PRODUCT_5520_MINICARD_CINGULAR 0x8136
#define DELL_PRODUCT_5520_MINICARD_GENERIC_L 0x8137
#define DELL_PRODUCT_5520_MINICARD_GENERIC_I 0x8138
#define DELL_PRODUCT_5730_MINICARD_SPRINT 0x8180
#define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181
#define DELL_PRODUCT_5730_MINICARD_VZW 0x8182
#define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a #define KYOCERA_PRODUCT_KPC680 0x180a
...@@ -274,12 +296,6 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po ...@@ -274,12 +296,6 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define ERICSSON_VENDOR_ID 0x0bdb #define ERICSSON_VENDOR_ID 0x0bdb
#define ERICSSON_PRODUCT_F3507G 0x1900 #define ERICSSON_PRODUCT_F3507G 0x1900
/* Pantech products */
#define PANTECH_VENDOR_ID 0x106c
#define PANTECH_PRODUCT_PC5740 0x3701
#define PANTECH_PRODUCT_PC5750 0x3702 /* PX-500 */
#define PANTECH_PRODUCT_UM150 0x3711
static struct usb_device_id option_ids[] = { static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
...@@ -395,31 +411,37 @@ static struct usb_device_id option_ids[] = { ...@@ -395,31 +411,37 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_1) }, /* Novatel Global product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_2) }, /* Novatel EVDO product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_2) }, /* Novatel HSPA product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_2) }, /* Novatel Embedded product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_2) }, /* Novatel Global product */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
{ USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8117) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8118) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8128) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8129) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8133) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8136) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
...@@ -488,9 +510,6 @@ static struct usb_device_id option_ids[] = { ...@@ -488,9 +510,6 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
{ USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) }, { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) },
{ USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5740) },
{ USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5750) },
{ USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_UM150) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
MODULE_DEVICE_TABLE(usb, option_ids); MODULE_DEVICE_TABLE(usb, option_ids);
......
...@@ -176,7 +176,7 @@ static unsigned int product_5052_count; ...@@ -176,7 +176,7 @@ static unsigned int product_5052_count;
/* the array dimension is the number of default entries plus */ /* the array dimension is the number of default entries plus */
/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
/* null entry */ /* null entry */
static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = { static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
...@@ -185,9 +185,11 @@ static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = { ...@@ -185,9 +185,11 @@ static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
}; };
static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = { static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
...@@ -195,7 +197,7 @@ static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = { ...@@ -195,7 +197,7 @@ static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
}; };
static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] = { static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
...@@ -208,6 +210,8 @@ static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] = ...@@ -208,6 +210,8 @@ static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] =
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
{ } { }
}; };
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#define IBM_VENDOR_ID 0x04b3 #define IBM_VENDOR_ID 0x04b3
#define TI_3410_PRODUCT_ID 0x3410 #define TI_3410_PRODUCT_ID 0x3410
#define IBM_4543_PRODUCT_ID 0x4543 #define IBM_4543_PRODUCT_ID 0x4543
#define IBM_454B_PRODUCT_ID 0x454b
#define IBM_454C_PRODUCT_ID 0x454c
#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */ #define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */
#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */ #define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */
#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */ #define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
......
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
*/ */
#define VENDOR_ID_NOKIA 0x0421 #define VENDOR_ID_NOKIA 0x0421
#define VENDOR_ID_NIKON 0x04b0 #define VENDOR_ID_NIKON 0x04b0
#define VENDOR_ID_PENTAX 0x0a17
#define VENDOR_ID_MOTOROLA 0x22b8 #define VENDOR_ID_MOTOROLA 0x22b8
/*********************************************************************** /***********************************************************************
...@@ -158,6 +159,7 @@ static int slave_configure(struct scsi_device *sdev) ...@@ -158,6 +159,7 @@ static int slave_configure(struct scsi_device *sdev)
switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) { switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
case VENDOR_ID_NOKIA: case VENDOR_ID_NOKIA:
case VENDOR_ID_NIKON: case VENDOR_ID_NIKON:
case VENDOR_ID_PENTAX:
case VENDOR_ID_MOTOROLA: case VENDOR_ID_MOTOROLA:
if (!(us->fflags & (US_FL_FIX_CAPACITY | if (!(us->fflags & (US_FL_FIX_CAPACITY |
US_FL_CAPACITY_OK))) US_FL_CAPACITY_OK)))
......
...@@ -558,32 +558,10 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb) ...@@ -558,32 +558,10 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) { if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
/* The command succeeded. If the capacity is odd /* The command succeeded. We know this device doesn't
* (i.e., if the sector number is even) then the * have the last-sector bug, so stop checking it.
* "always-even" heuristic would be wrong for this
* device. Issue a WARN() so that the kerneloops.org
* project will be notified and we will then know to
* mark the device with a CAPACITY_OK flag. Hopefully
* this will occur for only a few devices.
*
* Use the sign of us->last_sector_hacks to tell whether
* the warning has already been issued; we don't need
* more than one warning per device.
*/ */
if (!(sector & 1) && us->use_last_sector_hacks > 0) { us->use_last_sector_hacks = 0;
unsigned vid = le16_to_cpu(
us->pusb_dev->descriptor.idVendor);
unsigned pid = le16_to_cpu(
us->pusb_dev->descriptor.idProduct);
unsigned rev = le16_to_cpu(
us->pusb_dev->descriptor.bcdDevice);
WARN(1, "%s: Successful last sector success at %u, "
"device %04x:%04x:%04x\n",
sdkp->disk->disk_name, sector,
vid, pid, rev);
us->use_last_sector_hacks = -1;
}
} else { } else {
/* The command failed. Allow up to 3 retries in case this /* The command failed. Allow up to 3 retries in case this
...@@ -599,14 +577,6 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb) ...@@ -599,14 +577,6 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
srb->result = SAM_STAT_CHECK_CONDITION; srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer, record_not_found, memcpy(srb->sense_buffer, record_not_found,
sizeof(record_not_found)); sizeof(record_not_found));
/* In theory we might want to issue a WARN() here if the
* capacity is even, since it could indicate the device
* has the READ CAPACITY bug _and_ the real capacity is
* odd. But it could also indicate that the device
* simply can't access its last sector, a failure mode
* which is surprisingly common. So no warning.
*/
} }
done: done:
......
...@@ -1214,7 +1214,7 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff, ...@@ -1214,7 +1214,7 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
"Datafab", "Datafab",
"KECF-USB", "KECF-USB",
US_SC_DEVICE, US_PR_DEVICE, NULL, US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ), US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ),
/* Reported by Rauch Wolke <rauchwolke@gmx.net> */ /* Reported by Rauch Wolke <rauchwolke@gmx.net> */
UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
...@@ -1354,21 +1354,6 @@ UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000, ...@@ -1354,21 +1354,6 @@ UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000,
US_SC_DEVICE, US_PR_DEVICE, NULL, US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ), US_FL_FIX_INQUIRY ),
/* Submitted by Per Winkvist <per.winkvist@uk.com> */
UNUSUAL_DEV( 0x0a17, 0x006, 0x0000, 0xffff,
"Pentax",
"Optio S/S4",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
/* Reported by Jaak Ristioja <Ristioja@gmail.com> */
UNUSUAL_DEV( 0x0a17, 0x006e, 0x0100, 0x0100,
"Pentax",
"K10D",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
/* These are virtual windows driver CDs, which the zd1211rw driver /* These are virtual windows driver CDs, which the zd1211rw driver
* automatically converts into WLAN devices. */ * automatically converts into WLAN devices. */
UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101, UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101,
......
...@@ -1054,9 +1054,10 @@ config FB_RIVA_BACKLIGHT ...@@ -1054,9 +1054,10 @@ config FB_RIVA_BACKLIGHT
config FB_I810 config FB_I810
tristate "Intel 810/815 support (EXPERIMENTAL)" tristate "Intel 810/815 support (EXPERIMENTAL)"
depends on FB && EXPERIMENTAL && PCI && X86_32 depends on EXPERIMENTAL && PCI && X86_32
select AGP select AGP
select AGP_INTEL select AGP_INTEL
select FB
select FB_MODE_HELPERS select FB_MODE_HELPERS
select FB_CFB_FILLRECT select FB_CFB_FILLRECT
select FB_CFB_COPYAREA select FB_CFB_COPYAREA
...@@ -1119,7 +1120,8 @@ config FB_CARILLO_RANCH ...@@ -1119,7 +1120,8 @@ config FB_CARILLO_RANCH
config FB_INTEL config FB_INTEL
tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)" tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
depends on FB && EXPERIMENTAL && PCI && X86 depends on EXPERIMENTAL && PCI && X86
select FB
select AGP select AGP
select AGP_INTEL select AGP_INTEL
select FB_MODE_HELPERS select FB_MODE_HELPERS
......
...@@ -1530,8 +1530,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root ...@@ -1530,8 +1530,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
* for higher level blocks, try not to allocate blocks * for higher level blocks, try not to allocate blocks
* with the block and the parent locks held. * with the block and the parent locks held.
*/ */
if (level > 0 && !prealloc_block.objectid && if (level > 0 && !prealloc_block.objectid) {
btrfs_path_lock_waiting(p, level)) {
u32 size = b->len; u32 size = b->len;
u64 hint = b->start; u64 hint = b->start;
......
...@@ -236,25 +236,3 @@ int btrfs_tree_locked(struct extent_buffer *eb) ...@@ -236,25 +236,3 @@ int btrfs_tree_locked(struct extent_buffer *eb)
return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) || return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
spin_is_locked(&eb->lock); spin_is_locked(&eb->lock);
} }
/*
* btrfs_search_slot uses this to decide if it should drop its locks
* before doing something expensive like allocating free blocks for cow.
*/
int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
{
int i;
struct extent_buffer *eb;
for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
eb = path->nodes[i];
if (!eb)
break;
smp_mb();
if (spin_is_contended(&eb->lock) ||
waitqueue_active(&eb->lock_wq))
return 1;
}
return 0;
}
...@@ -26,8 +26,6 @@ int btrfs_tree_locked(struct extent_buffer *eb); ...@@ -26,8 +26,6 @@ int btrfs_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_lock(struct extent_buffer *eb); int btrfs_try_tree_lock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb); int btrfs_try_spin_lock(struct extent_buffer *eb);
int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
void btrfs_set_lock_blocking(struct extent_buffer *eb); void btrfs_set_lock_blocking(struct extent_buffer *eb);
void btrfs_clear_lock_blocking(struct extent_buffer *eb); void btrfs_clear_lock_blocking(struct extent_buffer *eb);
#endif #endif
...@@ -427,7 +427,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, ...@@ -427,7 +427,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out; goto out;
case -EAGAIN: case -EAGAIN:
ret = nlm_lck_denied; ret = nlm_lck_denied;
goto out; break;
case FILE_LOCK_DEFERRED: case FILE_LOCK_DEFERRED:
if (wait) if (wait)
break; break;
...@@ -443,6 +443,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, ...@@ -443,6 +443,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out; goto out;
} }
ret = nlm_lck_denied;
if (!wait)
goto out;
ret = nlm_lck_blocked; ret = nlm_lck_blocked;
/* Append to list of blocked */ /* Append to list of blocked */
......
...@@ -222,7 +222,7 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) ...@@ -222,7 +222,7 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
static inline void crypto_free_shash(struct crypto_shash *tfm) static inline void crypto_free_shash(struct crypto_shash *tfm)
{ {
crypto_free_tfm(crypto_shash_tfm(tfm)); crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
} }
static inline unsigned int crypto_shash_alignmask( static inline unsigned int crypto_shash_alignmask(
......
...@@ -261,6 +261,7 @@ typedef struct drm_i915_irq_wait { ...@@ -261,6 +261,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_LAST_DISPATCH 3 #define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4 #define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5 #define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
typedef struct drm_i915_getparam { typedef struct drm_i915_getparam {
int param; int param;
...@@ -272,6 +273,7 @@ typedef struct drm_i915_getparam { ...@@ -272,6 +273,7 @@ typedef struct drm_i915_getparam {
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4
typedef struct drm_i915_setparam { typedef struct drm_i915_setparam {
int param; int param;
......
...@@ -552,7 +552,12 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, ...@@ -552,7 +552,12 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, const struct crypto_type *frontend,
u32 type, u32 mask); u32 type, u32 mask);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_free_tfm(struct crypto_tfm *tfm); void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
static inline void crypto_free_tfm(struct crypto_tfm *tfm)
{
return crypto_destroy_tfm(tfm, tfm);
}
int alg_test(const char *driver, const char *alg, u32 type, u32 mask); int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
......
...@@ -124,7 +124,12 @@ do { \ ...@@ -124,7 +124,12 @@ do { \
#ifdef CONFIG_GENERIC_LOCKBREAK #ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock) #define spin_is_contended(lock) ((lock)->break_lock)
#else #else
#ifdef __raw_spin_is_contended
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
#else
#define spin_is_contended(lock) (((void)(lock), 0))
#endif /*__raw_spin_is_contended*/
#endif #endif
/** /**
......
...@@ -114,12 +114,15 @@ int __ref profile_init(void) ...@@ -114,12 +114,15 @@ int __ref profile_init(void)
if (!slab_is_available()) { if (!slab_is_available()) {
prof_buffer = alloc_bootmem(buffer_bytes); prof_buffer = alloc_bootmem(buffer_bytes);
alloc_bootmem_cpumask_var(&prof_cpu_mask); alloc_bootmem_cpumask_var(&prof_cpu_mask);
cpumask_copy(prof_cpu_mask, cpu_possible_mask);
return 0; return 0;
} }
if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
cpumask_copy(prof_cpu_mask, cpu_possible_mask);
prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
if (prof_buffer) if (prof_buffer)
return 0; return 0;
......
...@@ -311,7 +311,10 @@ long mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -311,7 +311,10 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
is_vm_hugetlb_page(vma) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) { vma == get_gate_vma(current))) {
return __mlock_vma_pages_range(vma, start, end, 1); __mlock_vma_pages_range(vma, start, end, 1);
/* Hide errors from mmap() and other callers */
return 0;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment