Commit 442d1ba2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'edac-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp

* 'edac-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp:
  amd64_edac: Disable DRAM ECC injection on K8
  EDAC: Fixup scrubrate manipulation
  amd64_edac: Remove two-stage initialization
  amd64_edac: Check ECC capabilities initially
  amd64_edac: Carve out ECC-related hw settings
  amd64_edac: Remove PCI ECS enabling functions
  amd64_edac: Remove explicit Kconfig PCI dependency
  amd64_edac: Allocate driver instances dynamically
  amd64_edac: Rework printk macros
  amd64_edac: Rename CPU PCI devices
  amd64_edac: Concentrate per-family init even more
  amd64_edac: Cleanup the CPU PCI device reservation
  amd64_edac: Simplify CPU family detection
  amd64_edac: Add per-family init function
  amd64_edac: Use cached extended CPU model
  amd64_edac: Remove F11h support
parents fb5131e1 a135cef7
...@@ -75,11 +75,11 @@ config EDAC_MCE ...@@ -75,11 +75,11 @@ config EDAC_MCE
bool bool
config EDAC_AMD64 config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" tristate "AMD64 (Opteron, Athlon64) K8, F10h"
depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE
help help
Support for error detection and correction on the AMD 64 Support for error detection and correction of DRAM ECC errors on
Families of Memory Controllers (K8, F10h and F11h) the AMD64 families of memory controllers (K8 and F10h)
config EDAC_AMD64_ERROR_INJECTION config EDAC_AMD64_ERROR_INJECTION
bool "Sysfs HW Error injection facilities" bool "Sysfs HW Error injection facilities"
......
This diff is collapsed.
...@@ -74,11 +74,26 @@ ...@@ -74,11 +74,26 @@
#include "edac_core.h" #include "edac_core.h"
#include "mce_amd.h" #include "mce_amd.h"
#define amd64_printk(level, fmt, arg...) \ #define amd64_debug(fmt, arg...) \
edac_printk(level, "amd64", fmt, ##arg) edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
#define amd64_mc_printk(mci, level, fmt, arg...) \ #define amd64_info(fmt, arg...) \
edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg) edac_printk(KERN_INFO, "amd64", fmt, ##arg)
#define amd64_notice(fmt, arg...) \
edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
#define amd64_warn(fmt, arg...) \
edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
#define amd64_err(fmt, arg...) \
edac_printk(KERN_ERR, "amd64", fmt, ##arg)
#define amd64_mc_warn(mci, fmt, arg...) \
edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
#define amd64_mc_err(mci, fmt, arg...) \
edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg)
/* /*
* Throughout the comments in this code, the following terms are used: * Throughout the comments in this code, the following terms are used:
...@@ -129,11 +144,9 @@ ...@@ -129,11 +144,9 @@
* sections 3.5.4 and 3.5.5 for more information. * sections 3.5.4 and 3.5.5 for more information.
*/ */
#define EDAC_AMD64_VERSION " Ver: 3.3.0 " __DATE__ #define EDAC_AMD64_VERSION "v3.3.0"
#define EDAC_MOD_STR "amd64_edac" #define EDAC_MOD_STR "amd64_edac"
#define EDAC_MAX_NUMNODES 8
/* Extended Model from CPUID, for CPU Revision numbers */ /* Extended Model from CPUID, for CPU Revision numbers */
#define K8_REV_D 1 #define K8_REV_D 1
#define K8_REV_E 2 #define K8_REV_E 2
...@@ -322,9 +335,6 @@ ...@@ -322,9 +335,6 @@
#define K8_SCRCTRL 0x58 #define K8_SCRCTRL 0x58
#define F10_NB_CFG_LOW 0x88 #define F10_NB_CFG_LOW 0x88
#define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14)
#define F10_NB_CFG_HIGH 0x8C
#define F10_ONLINE_SPARE 0xB0 #define F10_ONLINE_SPARE 0xB0
#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1)) #define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
...@@ -373,7 +383,6 @@ static inline int get_node_id(struct pci_dev *pdev) ...@@ -373,7 +383,6 @@ static inline int get_node_id(struct pci_dev *pdev)
enum amd64_chipset_families { enum amd64_chipset_families {
K8_CPUS = 0, K8_CPUS = 0,
F10_CPUS, F10_CPUS,
F11_CPUS,
}; };
/* Error injection control structure */ /* Error injection control structure */
...@@ -384,16 +393,13 @@ struct error_injection { ...@@ -384,16 +393,13 @@ struct error_injection {
}; };
struct amd64_pvt { struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */ /* pci_device handles which we utilize */
struct pci_dev *addr_f1_ctl; struct pci_dev *F1, *F2, *F3;
struct pci_dev *dram_f2_ctl;
struct pci_dev *misc_f3_ctl;
int mc_node_id; /* MC index of this MC node */ int mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */ int ext_model; /* extended model value of this node */
struct low_ops *ops; /* pointer to per PCI Device ID func table */
int channel_count; int channel_count;
/* Raw registers */ /* Raw registers */
...@@ -455,27 +461,27 @@ struct amd64_pvt { ...@@ -455,27 +461,27 @@ struct amd64_pvt {
/* place to store error injection parameters prior to issue */ /* place to store error injection parameters prior to issue */
struct error_injection injection; struct error_injection injection;
/* Save old hw registers' values before we modified them */ /* DCT per-family scrubrate setting */
u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ u32 min_scrubrate;
u32 old_nbctl;
/* MC Type Index value: socket F vs Family 10h */ /* family name this instance is running on */
u32 mc_type_index; const char *ctl_name;
};
/*
* per-node ECC settings descriptor
*/
struct ecc_settings {
u32 old_nbctl;
bool nbctl_valid;
/* misc settings */
struct flags { struct flags {
unsigned long cf8_extcfg:1;
unsigned long nb_mce_enable:1; unsigned long nb_mce_enable:1;
unsigned long nb_ecc_prev:1; unsigned long nb_ecc_prev:1;
} flags; } flags;
}; };
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
};
extern struct scrubrate scrubrates[23];
extern const char *tt_msgs[4]; extern const char *tt_msgs[4];
extern const char *ll_msgs[4]; extern const char *ll_msgs[4];
extern const char *rrrr_msgs[16]; extern const char *rrrr_msgs[16];
...@@ -517,23 +523,10 @@ struct low_ops { ...@@ -517,23 +523,10 @@ struct low_ops {
struct amd64_family_type { struct amd64_family_type {
const char *ctl_name; const char *ctl_name;
u16 addr_f1_ctl; u16 f1_id, f3_id;
u16 misc_f3_ctl;
struct low_ops ops; struct low_ops ops;
}; };
static struct amd64_family_type amd64_family_types[];
static inline const char *get_amd_family_name(int index)
{
return amd64_family_types[index].ctl_name;
}
static inline struct low_ops *family_ops(int index)
{
return &amd64_family_types[index].ops;
}
static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
u32 *val, const char *func) u32 *val, const char *func)
{ {
...@@ -541,8 +534,8 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, ...@@ -541,8 +534,8 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
err = pci_read_config_dword(pdev, offset, val); err = pci_read_config_dword(pdev, offset, val);
if (err) if (err)
amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n", amd64_warn("%s: error reading F%dx%x.\n",
func, PCI_FUNC(pdev->devfn), offset); func, PCI_FUNC(pdev->devfn), offset);
return err; return err;
} }
...@@ -556,7 +549,6 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, ...@@ -556,7 +549,6 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
*/ */
#define K8_MIN_SCRUB_RATE_BITS 0x0 #define K8_MIN_SCRUB_RATE_BITS 0x0
#define F10_MIN_SCRUB_RATE_BITS 0x5 #define F10_MIN_SCRUB_RATE_BITS 0x5
#define F11_MIN_SCRUB_RATE_BITS 0x6
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size); u64 *hole_offset, u64 *hole_size);
...@@ -23,9 +23,7 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, ...@@ -23,9 +23,7 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
if (ret != -EINVAL) { if (ret != -EINVAL) {
if (value > 3) { if (value > 3) {
amd64_printk(KERN_WARNING, amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
"%s: invalid section 0x%lx\n",
__func__, value);
return -EINVAL; return -EINVAL;
} }
...@@ -58,9 +56,7 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, ...@@ -58,9 +56,7 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
if (ret != -EINVAL) { if (ret != -EINVAL) {
if (value > 8) { if (value > 8) {
amd64_printk(KERN_WARNING, amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
"%s: invalid word 0x%lx\n",
__func__, value);
return -EINVAL; return -EINVAL;
} }
...@@ -92,9 +88,8 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, ...@@ -92,9 +88,8 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
if (ret != -EINVAL) { if (ret != -EINVAL) {
if (value & 0xFFFF0000) { if (value & 0xFFFF0000) {
amd64_printk(KERN_WARNING, amd64_warn("%s: invalid EccVector: 0x%lx\n",
"%s: invalid EccVector: 0x%lx\n", __func__, value);
__func__, value);
return -EINVAL; return -EINVAL;
} }
...@@ -122,15 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, ...@@ -122,15 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */ /* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC | section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section); SET_NB_ARRAY_ADDRESS(pvt->injection.section);
pci_write_config_dword(pvt->misc_f3_ctl, pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
pvt->injection.bit_map); pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */ /* Issue 'word' and 'bit' along with the READ request */
pci_write_config_dword(pvt->misc_f3_ctl, pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
...@@ -157,15 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, ...@@ -157,15 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */ /* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC | section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section); SET_NB_ARRAY_ADDRESS(pvt->injection.section);
pci_write_config_dword(pvt->misc_f3_ctl, pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
pvt->injection.bit_map); pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */ /* Issue 'word' and 'bit' along with the READ request */
pci_write_config_dword(pvt->misc_f3_ctl, pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
......
...@@ -818,9 +818,10 @@ static void cpc925_del_edac_devices(void) ...@@ -818,9 +818,10 @@ static void cpc925_del_edac_devices(void)
} }
/* Convert current back-ground scrub rate into byte/sec bandwith */ /* Convert current back-ground scrub rate into byte/sec bandwith */
static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
{ {
struct cpc925_mc_pdata *pdata = mci->pvt_info; struct cpc925_mc_pdata *pdata = mci->pvt_info;
int bw;
u32 mscr; u32 mscr;
u8 si; u8 si;
...@@ -832,11 +833,11 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) ...@@ -832,11 +833,11 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
(si == 0)) { (si == 0)) {
cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
*bw = 0; bw = 0;
} else } else
*bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
return 0; return bw;
} }
/* Return 0 for single channel; 1 for dual channel */ /* Return 0 for single channel; 1 for dual channel */
......
...@@ -983,11 +983,11 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) ...@@ -983,11 +983,11 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval); pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
return 0; return scrubrates[i].bandwidth;
} }
/* Convert current scrub rate value into byte/sec bandwidth */ /* Convert current scrub rate value into byte/sec bandwidth */
static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
{ {
const struct scrubrate *scrubrates; const struct scrubrate *scrubrates;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
...@@ -1013,10 +1013,8 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) ...@@ -1013,10 +1013,8 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
"Invalid sdram scrub control value: 0x%x\n", scrubval); "Invalid sdram scrub control value: 0x%x\n", scrubval);
return -1; return -1;
} }
return scrubrates[i].bandwidth;
*bw = scrubrates[i].bandwidth;
return 0;
} }
/* Return 1 if dual channel mode is active. Else return 0. */ /* Return 1 if dual channel mode is active. Else return 0. */
......
...@@ -68,9 +68,10 @@ ...@@ -68,9 +68,10 @@
#define EDAC_PCI "PCI" #define EDAC_PCI "PCI"
#define EDAC_DEBUG "DEBUG" #define EDAC_DEBUG "DEBUG"
extern const char *edac_mem_types[];
#ifdef CONFIG_EDAC_DEBUG #ifdef CONFIG_EDAC_DEBUG
extern int edac_debug_level; extern int edac_debug_level;
extern const char *edac_mem_types[];
#define edac_debug_printk(level, fmt, arg...) \ #define edac_debug_printk(level, fmt, arg...) \
do { \ do { \
...@@ -386,7 +387,7 @@ struct mem_ctl_info { ...@@ -386,7 +387,7 @@ struct mem_ctl_info {
representation and converts it to the closest matching representation and converts it to the closest matching
bandwith in bytes/sec. bandwith in bytes/sec.
*/ */
int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
/* pointer to edac checking routine */ /* pointer to edac checking routine */
......
...@@ -76,6 +76,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci) ...@@ -76,6 +76,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
debugf3("\tpvt_info = %p\n\n", mci->pvt_info); debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
} }
#endif /* CONFIG_EDAC_DEBUG */
/* /*
* keep those in sync with the enum mem_type * keep those in sync with the enum mem_type
*/ */
...@@ -100,8 +102,6 @@ const char *edac_mem_types[] = { ...@@ -100,8 +102,6 @@ const char *edac_mem_types[] = {
}; };
EXPORT_SYMBOL_GPL(edac_mem_types); EXPORT_SYMBOL_GPL(edac_mem_types);
#endif /* CONFIG_EDAC_DEBUG */
/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
* Adjust 'ptr' so that its alignment is at least as stringent as what the * Adjust 'ptr' so that its alignment is at least as stringent as what the
* compiler would provide for X and return the aligned result. * compiler would provide for X and return the aligned result.
......
...@@ -436,56 +436,55 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, ...@@ -436,56 +436,55 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
return count; return count;
} }
/* memory scrubbing */ /* Memory scrubbing interface:
*
* A MC driver can limit the scrubbing bandwidth based on the CPU type.
* Therefore, ->set_sdram_scrub_rate should be made to return the actual
* bandwidth that is accepted or 0 when scrubbing is to be disabled.
*
* Negative value still means that an error has occurred while setting
* the scrub rate.
*/
static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
const char *data, size_t count) const char *data, size_t count)
{ {
unsigned long bandwidth = 0; unsigned long bandwidth = 0;
int err; int new_bw = 0;
if (!mci->set_sdram_scrub_rate) { if (!mci->set_sdram_scrub_rate)
edac_printk(KERN_WARNING, EDAC_MC,
"Memory scrub rate setting not implemented!\n");
return -EINVAL; return -EINVAL;
}
if (strict_strtoul(data, 10, &bandwidth) < 0) if (strict_strtoul(data, 10, &bandwidth) < 0)
return -EINVAL; return -EINVAL;
err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth); new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
if (err) { if (new_bw >= 0) {
edac_printk(KERN_DEBUG, EDAC_MC, edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw);
"Failed setting scrub rate to %lu\n", bandwidth);
return -EINVAL;
}
else {
edac_printk(KERN_DEBUG, EDAC_MC,
"Scrub rate set to: %lu\n", bandwidth);
return count; return count;
} }
edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth);
return -EINVAL;
} }
/*
* ->get_sdram_scrub_rate() return value semantics same as above.
*/
static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
{ {
u32 bandwidth = 0; int bandwidth = 0;
int err;
if (!mci->get_sdram_scrub_rate) { if (!mci->get_sdram_scrub_rate)
edac_printk(KERN_WARNING, EDAC_MC,
"Memory scrub rate reading not implemented\n");
return -EINVAL; return -EINVAL;
}
err = mci->get_sdram_scrub_rate(mci, &bandwidth); bandwidth = mci->get_sdram_scrub_rate(mci);
if (err) { if (bandwidth < 0) {
edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
return err; return bandwidth;
}
else {
edac_printk(KERN_DEBUG, EDAC_MC,
"Read scrub rate: %d\n", bandwidth);
return sprintf(data, "%d\n", bandwidth);
} }
edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth);
return sprintf(data, "%d\n", bandwidth);
} }
/* default attribute files for the MCI object */ /* default attribute files for the MCI object */
......
...@@ -611,20 +611,17 @@ static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) ...@@ -611,20 +611,17 @@ static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
bandwidth = 5900000 * i5100_mc_scrben(dw); bandwidth = 5900000 * i5100_mc_scrben(dw);
return 0; return bandwidth;
} }
static int i5100_get_scrub_rate(struct mem_ctl_info *mci, static int i5100_get_scrub_rate(struct mem_ctl_info *mci)
u32 *bandwidth)
{ {
struct i5100_priv *priv = mci->pvt_info; struct i5100_priv *priv = mci->pvt_info;
u32 dw; u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw); pci_read_config_dword(priv->mc, I5100_MC, &dw);
*bandwidth = 5900000 * i5100_mc_scrben(dw); return 5900000 * i5100_mc_scrben(dw);
return 0;
} }
static struct pci_dev *pci_get_device_func(unsigned vendor, static struct pci_dev *pci_get_device_func(unsigned vendor,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment