Commit 115a5c2b authored by Dave Airlie's avatar Dave Airlie

Merge remote branch 'korg/drm-radeon-next' of into drm-linus

This merges some TTM overhauls to allow us to do better object placement
for certain radeon GPUs that need scanout+cursor within range of each other,
along with an API change to not return ERESTART to userspace, but to use
ERESTARTSYS properly internally and have it convert to EINTR and catch that
correctly. Also lots of radeon fixes across the board.
parents 0b5e8db6 fb53f862
...@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev) ...@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
{ {
int count = 0; int count = 0;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
drm_fb_helper_parse_command_line(dev); drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev, count = drm_helper_probe_connector_modes(dev,
......
...@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, ...@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
} }
EXPORT_SYMBOL(drm_mm_get_block_generic); EXPORT_SYMBOL(drm_mm_get_block_generic);
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int atomic)
{
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
unsigned wasted = 0;
if (node->start < start)
wasted += start - node->start;
if (alignment)
tmp = ((node->start + wasted) % alignment);
if (tmp)
wasted += alignment - tmp;
if (wasted) {
align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (node->size == size) {
list_del_init(&node->fl_entry);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
/* /*
* Put a block. Merge with the previous and / or next block if they are free. * Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack. * Otherwise add to the free stack.
...@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, ...@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
} }
EXPORT_SYMBOL(drm_mm_search_free); EXPORT_SYMBOL(drm_mm_search_free);
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
best = NULL;
best_size = ~0UL;
list_for_each(list, free_stack) {
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
continue;
if (entry->start > end || (entry->start+entry->size) < start)
continue;
if (entry->start < start)
wasted += start - entry->start;
if (alignment) {
register unsigned tmp = (entry->start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
}
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
best = entry;
best_size = entry->size;
}
}
}
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);
int drm_mm_clean(struct drm_mm * mm) int drm_mm_clean(struct drm_mm * mm)
{ {
struct list_head *head = &mm->ml_entry; struct list_head *head = &mm->ml_entry;
...@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm) ...@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
} }
EXPORT_SYMBOL(drm_mm_takedown); EXPORT_SYMBOL(drm_mm_takedown);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
int total_used = 0, total_free = 0, total = 0;
list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
prefix, entry->start, entry->start + entry->size,
entry->size, entry->free ? "free" : "used");
total += entry->size;
if (entry->free)
total_free += entry->size;
else
total_used += entry->size;
}
printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{ {
......
...@@ -499,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -499,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else else
pll = &rdev->clock.p2pll; pll = &rdev->clock.p2pll;
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, if (ASIC_IS_AVIVO(rdev)) {
&ref_div, &post_div, pll_flags); if (radeon_new_pll)
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
else
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
} else
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
...@@ -599,8 +609,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -599,8 +609,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
} }
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo); radeon_bo_unreserve(rbo);
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
switch (crtc->fb->bits_per_pixel) { switch (crtc->fb->bits_per_pixel) {
case 8: case 8:
...@@ -630,6 +638,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -630,6 +638,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL; return -EINVAL;
} }
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
if (tiling_flags & RADEON_TILING_MICRO) if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= AVIVO_D1GRPH_TILED; fb_format |= AVIVO_D1GRPH_TILED;
......
...@@ -3299,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev) ...@@ -3299,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev); radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */ /* Resume clock after posting */
r100_clock_startup(rdev); r100_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r100_startup(rdev); return r100_startup(rdev);
} }
......
...@@ -1250,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev) ...@@ -1250,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev); radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */ /* Resume clock after posting */
r300_clock_startup(rdev); r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r300_startup(rdev); return r300_startup(rdev);
} }
......
...@@ -231,7 +231,8 @@ int r420_resume(struct radeon_device *rdev) ...@@ -231,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
} }
/* Resume clock after posting */ /* Resume clock after posting */
r420_clock_resume(rdev); r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r420_startup(rdev); return r420_startup(rdev);
} }
......
...@@ -220,6 +220,8 @@ int r520_resume(struct radeon_device *rdev) ...@@ -220,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */ /* Resume clock after posting */
rv515_clock_startup(rdev); rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r520_startup(rdev); return r520_startup(rdev);
} }
......
...@@ -1845,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev) ...@@ -1845,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
{ {
int r; int r;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
r600_mc_program(rdev); r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev); r600_agp_enable(rdev);
...@@ -2026,25 +2034,17 @@ int r600_init(struct radeon_device *rdev) ...@@ -2026,25 +2034,17 @@ int r600_init(struct radeon_device *rdev)
rdev->ih.ring_obj = NULL; rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024); r600_ih_ring_init(rdev, 64 * 1024);
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
r = r600_pcie_gart_init(rdev); r = r600_pcie_gart_init(rdev);
if (r) if (r)
return r; return r;
rdev->accel_working = true;
r = r600_blit_init(rdev); r = r600_blit_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled blitter (%d).\n", r); DRM_ERROR("radeon: failed blitter (%d).\n", r);
return r; return r;
} }
rdev->accel_working = true;
r = r600_startup(rdev); r = r600_startup(rdev);
if (r) { if (r) {
r600_suspend(rdev); r600_suspend(rdev);
...@@ -2056,12 +2056,12 @@ int r600_init(struct radeon_device *rdev) ...@@ -2056,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev); r = radeon_ib_pool_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} }
r = r600_ib_test(rdev); r = r600_ib_test(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r); DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} }
} }
......
...@@ -88,6 +88,7 @@ extern int radeon_benchmarking; ...@@ -88,6 +88,7 @@ extern int radeon_benchmarking;
extern int radeon_testing; extern int radeon_testing;
extern int radeon_connector_table; extern int radeon_connector_table;
extern int radeon_tv; extern int radeon_tv;
extern int radeon_new_pll;
/* /*
* Copy from radeon_drv.h so we don't have to include both and have conflicting * Copy from radeon_drv.h so we don't have to include both and have conflicting
...@@ -208,6 +209,8 @@ struct radeon_bo { ...@@ -208,6 +209,8 @@ struct radeon_bo {
/* Protected by gem.mutex */ /* Protected by gem.mutex */
struct list_head list; struct list_head list;
/* Protected by tbo.reserved */ /* Protected by tbo.reserved */
u32 placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo; struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap; struct ttm_bo_kmap_obj kmap;
unsigned pin_count; unsigned pin_count;
...@@ -1012,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev); ...@@ -1012,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save { struct r100_mc_save {
......
...@@ -70,6 +70,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev ...@@ -70,6 +70,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info; struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset; uint16_t data_offset;
int i;
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false; i2c.valid = false;
...@@ -78,38 +79,43 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev ...@@ -78,38 +79,43 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
gpio = &i2c_info->asGPIO_Info[id];
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
if (gpio->sucI2cId.ucAccess == 0xa0) for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
i2c.mm_i2c = true; gpio = &i2c_info->asGPIO_Info[i];
else
i2c.mm_i2c = false; if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
i2c.i2c_id = gpio->sucI2cId.ucAccess; if (gpio->sucI2cId.ucAccess == 0xa0)
i2c.mm_i2c = true;
else
i2c.mm_i2c = false;
i2c.i2c_id = gpio->sucI2cId.ucAccess;
i2c.valid = true; i2c.valid = true;
}
}
return i2c; return i2c;
} }
...@@ -503,6 +509,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ...@@ -503,6 +509,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
usRecordOffset)); usRecordOffset));
ATOM_I2C_RECORD *i2c_record; ATOM_I2C_RECORD *i2c_record;
ATOM_HPD_INT_RECORD *hpd_record; ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
hpd.hpd = RADEON_HPD_NONE; hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0 while (record->ucRecordType > 0
...@@ -514,10 +521,12 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ...@@ -514,10 +521,12 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
i2c_record = i2c_record =
(ATOM_I2C_RECORD *) (ATOM_I2C_RECORD *)
record; record;
i2c_config =
(ATOM_I2C_ID_CONFIG_ACCESS *)
&i2c_record->sucI2cId;
ddc_bus = radeon_lookup_i2c_gpio(rdev, ddc_bus = radeon_lookup_i2c_gpio(rdev,
i2c_record-> i2c_config->
sucI2cId. ucAccess);
bfI2C_LineMux);
break; break;
case ATOM_HPD_INT_RECORD_TYPE: case ATOM_HPD_INT_RECORD_TYPE:
hpd_record = hpd_record =
...@@ -670,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct ...@@ -670,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
if ((rdev->family == CHIP_RS690) || bios_connectors[i].line_mux =
(rdev->family == CHIP_RS740)) { ci.sucI2cId.ucAccess;
if ((i == ATOM_DEVICE_DFP2_INDEX)
&& (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
else if ((i == ATOM_DEVICE_DFP3_INDEX)
&& (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
else
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux;
} else
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux;
/* give tv unique connector ids */ /* give tv unique connector ids */
if (i == ATOM_DEVICE_TV1_INDEX) { if (i == ATOM_DEVICE_TV1_INDEX) {
...@@ -876,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) ...@@ -876,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family. * family.
*/ */
p1pll->pll_out_min = 64800; if (!radeon_new_pll)
p1pll->pll_out_min = 64800;
} }
p1pll->pll_in_min = p1pll->pll_in_min =
...@@ -1006,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct ...@@ -1006,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev; uint8_t frev, crev;
struct radeon_atom_ss *ss = NULL; struct radeon_atom_ss *ss = NULL;
int i;
if (id > ATOM_MAX_SS_ENTRY) if (id > ATOM_MAX_SS_ENTRY)
return NULL; return NULL;
...@@ -1023,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct ...@@ -1023,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
if (!ss) if (!ss)
return NULL; return NULL;
ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage); for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType; if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->step = ss_info->asSS_Info[id].ucSS_Step; ss->percentage =
ss->delay = ss_info->asSS_Info[id].ucSS_Delay; le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
ss->range = ss_info->asSS_Info[id].ucSS_Range; ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div; ss->step = ss_info->asSS_Info[i].ucSS_Step;
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
}
}
} }
return ss; return ss;
} }
......
...@@ -1103,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1103,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property, rdev->mode_info.coherent_mode_property,
1); 1);
radeon_connector->dac_load_detect = true; if (connector_type == DRM_MODE_CONNECTOR_DVII) {
drm_connector_attach_property(&radeon_connector->base, radeon_connector->dac_load_detect = true;
rdev->mode_info.load_detect_property, drm_connector_attach_property(&radeon_connector->base,
1); rdev->mode_info.load_detect_property,
1);
}
break; break;
case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_HDMIB:
...@@ -1141,14 +1143,19 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1141,14 +1143,19 @@ radeon_add_atom_connector(struct drm_device *dev,
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (ret) if (ret)
goto failed; goto failed;
/* add DP i2c bus */
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (i2c_bus->valid) { if (i2c_bus->valid) {
/* add DP i2c bus */
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus) if (!radeon_connector->ddc_bus)
goto failed; goto failed;
} }
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
break; break;
case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_Composite:
...@@ -1183,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1183,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus) if (!radeon_connector->ddc_bus)
goto failed; goto failed;
} }
drm_mode_create_scaling_mode_property(dev);
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);
......
...@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev) ...@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
if (rdev->family < CHIP_R600) { if (rdev->family < CHIP_R600) {
int i; int i;
for (i = 0; i < 8; i++) { for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
WREG32(RADEON_SURFACE0_INFO + if (rdev->surface_regs[i].bo)
i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
0); else
radeon_clear_surface_reg(rdev, i);
} }
/* enable surfaces */ /* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0); WREG32(RADEON_SURFACE_CNTL, 0);
...@@ -487,8 +488,10 @@ int radeon_atombios_init(struct radeon_device *rdev) ...@@ -487,8 +488,10 @@ int radeon_atombios_init(struct radeon_device *rdev)
void radeon_atombios_fini(struct radeon_device *rdev) void radeon_atombios_fini(struct radeon_device *rdev)
{ {
kfree(rdev->mode_info.atom_context->scratch); if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context); kfree(rdev->mode_info.atom_context->scratch);
kfree(rdev->mode_info.atom_context);
}
kfree(rdev->mode_info.atom_card_info); kfree(rdev->mode_info.atom_card_info);
} }
......
...@@ -560,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll, ...@@ -560,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
*post_div_p = best_post_div; *post_div_p = best_post_div;
} }
void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags)
{
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
fixed20_12 pll_out_max, pll_out_min;
fixed20_12 pll_in_max, pll_in_min;
fixed20_12 reference_freq;
fixed20_12 error, ffreq, a, b;
pll_out_max.full = rfixed_const(pll->pll_out_max);
pll_out_min.full = rfixed_const(pll->pll_out_min);
pll_in_max.full = rfixed_const(pll->pll_in_max);
pll_in_min.full = rfixed_const(pll->pll_in_min);
reference_freq.full = rfixed_const(pll->reference_freq);
do_div(freq, 10);
ffreq.full = rfixed_const(freq);
error.full = rfixed_const(100 * 100);
/* max p */
p.full = rfixed_div(pll_out_max, ffreq);
p.full = rfixed_floor(p);
/* min m */
m.full = rfixed_div(reference_freq, pll_in_max);
m.full = rfixed_ceil(m);
while (1) {
n.full = rfixed_div(ffreq, reference_freq);
n.full = rfixed_mul(n, m);
n.full = rfixed_mul(n, p);
f_vco.full = rfixed_div(n, m);
f_vco.full = rfixed_mul(f_vco, reference_freq);
f_pclk.full = rfixed_div(f_vco, p);
if (f_pclk.full > ffreq.full)
error.full = f_pclk.full - ffreq.full;
else
error.full = ffreq.full - f_pclk.full;
error.full = rfixed_div(error, f_pclk);
a.full = rfixed_const(100 * 100);
error.full = rfixed_mul(error, a);
a.full = rfixed_mul(m, p);
a.full = rfixed_div(n, a);
best_freq.full = rfixed_mul(reference_freq, a);
if (rfixed_trunc(error) < 25)
break;
a.full = rfixed_const(1);
m.full = m.full + a.full;
a.full = rfixed_div(reference_freq, m);
if (a.full >= pll_in_min.full)
continue;
m.full = rfixed_div(reference_freq, pll_in_max);
m.full = rfixed_ceil(m);
a.full= rfixed_const(1);
p.full = p.full - a.full;
a.full = rfixed_mul(p, ffreq);
if (a.full >= pll_out_min.full)
continue;
else {
DRM_ERROR("Unable to find pll dividers\n");
break;
}
}
a.full = rfixed_const(10);
b.full = rfixed_mul(n, a);
frac_n.full = rfixed_floor(n);
frac_n.full = rfixed_mul(frac_n, a);
frac_n.full = b.full - frac_n.full;
*dot_clock_p = rfixed_trunc(best_freq);
*fb_div_p = rfixed_trunc(n);
*frac_fb_div_p = rfixed_trunc(frac_n);
*ref_div_p = rfixed_trunc(m);
*post_div_p = rfixed_trunc(p);
DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
}
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{ {
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
...@@ -660,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev) ...@@ -660,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
return -ENOMEM; return -ENOMEM;
rdev->mode_info.coherent_mode_property->values[0] = 0; rdev->mode_info.coherent_mode_property->values[0] = 0;
rdev->mode_info.coherent_mode_property->values[0] = 1; rdev->mode_info.coherent_mode_property->values[1] = 1;
} }
if (!ASIC_IS_AVIVO(rdev)) { if (!ASIC_IS_AVIVO(rdev)) {
...@@ -684,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev) ...@@ -684,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.load_detect_property) if (!rdev->mode_info.load_detect_property)
return -ENOMEM; return -ENOMEM;
rdev->mode_info.load_detect_property->values[0] = 0; rdev->mode_info.load_detect_property->values[0] = 0;
rdev->mode_info.load_detect_property->values[0] = 1; rdev->mode_info.load_detect_property->values[1] = 1;
drm_mode_create_scaling_mode_property(rdev->ddev); drm_mode_create_scaling_mode_property(rdev->ddev);
......
...@@ -86,6 +86,7 @@ int radeon_benchmarking = 0; ...@@ -86,6 +86,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0; int radeon_testing = 0;
int radeon_connector_table = 0; int radeon_connector_table = 0;
int radeon_tv = 1; int radeon_tv = 1;
int radeon_new_pll = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444); module_param_named(no_wb, radeon_no_wb, int, 0444);
...@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); ...@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444); module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(r4xx_atom, "Select new PLL code for AVIVO chips");
module_param_named(new_pll, radeon_new_pll, int, 0444);
static int radeon_suspend(struct drm_device *dev, pm_message_t state) static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
......
...@@ -197,9 +197,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) ...@@ -197,9 +197,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
r = wait_event_interruptible_timeout(rdev->fence_drv.queue, r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout); radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev); radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r == -ERESTARTSYS)) { if (unlikely(r < 0))
return -EBUSY; return r;
}
} else { } else {
radeon_irq_kms_sw_irq_get(rdev); radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue, r = wait_event_timeout(rdev->fence_drv.queue,
......
...@@ -38,6 +38,23 @@ typedef union rfixed { ...@@ -38,6 +38,23 @@ typedef union rfixed {
#define fixed_init_half(A) { .full = rfixed_const_half((A)) } #define fixed_init_half(A) { .full = rfixed_const_half((A)) }
#define rfixed_trunc(A) ((A).full >> 12) #define rfixed_trunc(A) ((A).full >> 12)
static inline u32 rfixed_floor(fixed20_12 A)
{
u32 non_frac = rfixed_trunc(A);
return rfixed_const(non_frac);
}
static inline u32 rfixed_ceil(fixed20_12 A)
{
u32 non_frac = rfixed_trunc(A);
if (A.full > rfixed_const(non_frac))
return rfixed_const(non_frac + 1);
else
return rfixed_const(non_frac);
}
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
{ {
u64 tmp = ((u64)A.full << 13); u64 tmp = ((u64)A.full << 13);
......
...@@ -30,10 +30,19 @@ ...@@ -30,10 +30,19 @@
#include "radeon.h" #include "radeon.h"
#include "radeon_drm.h" #include "radeon_drm.h"
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
if (rdev == NULL)
return 0;
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
kfree(rdev);
dev->dev_private = NULL;
return 0;
}
/*
* Driver load/unload
*/
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
...@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) ...@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
*/ */
r = radeon_device_init(rdev, dev, dev->pdev, flags); r = radeon_device_init(rdev, dev, dev->pdev, flags);
if (r) { if (r) {
DRM_ERROR("Fatal error while trying to initialize radeon.\n"); dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
return r; goto out;
} }
/* Again modeset_init should fail only on fatal error /* Again modeset_init should fail only on fatal error
* otherwise it should provide enough functionalities * otherwise it should provide enough functionalities
* for shadowfb to run * for shadowfb to run
*/ */
r = radeon_modeset_init(rdev); r = radeon_modeset_init(rdev);
if (r) { if (r)
return r; dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
} out:
return 0; if (r)
} radeon_driver_unload_kms(dev);
return r;
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
if (rdev == NULL)
return 0;
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
kfree(rdev);
dev->dev_private = NULL;
return 0;
} }
......
...@@ -437,6 +437,15 @@ extern void radeon_compute_pll(struct radeon_pll *pll, ...@@ -437,6 +437,15 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *post_div_p, uint32_t *post_div_p,
int flags); int flags);
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags);
extern void radeon_setup_encoder_clones(struct drm_device *dev); extern void radeon_setup_encoder_clones(struct drm_device *dev);
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
......
...@@ -75,6 +75,25 @@ static inline u32 radeon_ttm_flags_from_domain(u32 domain) ...@@ -75,6 +75,25 @@ static inline u32 radeon_ttm_flags_from_domain(u32 domain)
return flags; return flags;
} }
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
u32 c = 0;
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
if (domain & RADEON_GEM_DOMAIN_CPU)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
}
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long size, bool kernel, u32 domain, unsigned long size, bool kernel, u32 domain,
struct radeon_bo **bo_ptr) struct radeon_bo **bo_ptr)
...@@ -102,16 +121,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, ...@@ -102,16 +121,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->list);
flags = radeon_ttm_flags_from_domain(domain); flags = radeon_ttm_flags_from_domain(domain);
retry: /* Kernel allocation are uninterruptible */
r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type, r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
flags, 0, 0, true, NULL, size, flags, 0, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy); &radeon_ttm_bo_destroy);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r == -ERESTART) if (r != -ERESTARTSYS)
goto retry; dev_err(rdev->dev,
/* ttm call radeon_ttm_object_object_destroy if error happen */ "object_init failed for (%ld, 0x%08X)\n",
dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n", size, flags);
size, flags);
return r; return r;
} }
*bo_ptr = bo; *bo_ptr = bo;
...@@ -169,40 +187,32 @@ void radeon_bo_unref(struct radeon_bo **bo) ...@@ -169,40 +187,32 @@ void radeon_bo_unref(struct radeon_bo **bo)
int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{ {
u32 flags; int r, i;
u32 tmp;
int r;
flags = radeon_ttm_flags_from_domain(domain); radeon_ttm_placement_from_domain(bo, domain);
if (bo->pin_count) { if (bo->pin_count) {
bo->pin_count++; bo->pin_count++;
if (gpu_addr) if (gpu_addr)
*gpu_addr = radeon_bo_gpu_offset(bo); *gpu_addr = radeon_bo_gpu_offset(bo);
return 0; return 0;
} }
tmp = bo->tbo.mem.placement; radeon_ttm_placement_from_domain(bo, domain);
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); for (i = 0; i < bo->placement.num_placement; i++)
bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
TTM_PL_MASK_CACHING; r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
retry:
r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
true, false);
if (likely(r == 0)) { if (likely(r == 0)) {
bo->pin_count = 1; bo->pin_count = 1;
if (gpu_addr != NULL) if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo); *gpu_addr = radeon_bo_gpu_offset(bo);
} }
if (unlikely(r != 0)) { if (unlikely(r != 0))
if (r == -ERESTART)
goto retry;
dev_err(bo->rdev->dev, "%p pin failed\n", bo); dev_err(bo->rdev->dev, "%p pin failed\n", bo);
}
return r; return r;
} }
int radeon_bo_unpin(struct radeon_bo *bo) int radeon_bo_unpin(struct radeon_bo *bo)
{ {
int r; int r, i;
if (!bo->pin_count) { if (!bo->pin_count) {
dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
...@@ -211,18 +221,12 @@ int radeon_bo_unpin(struct radeon_bo *bo) ...@@ -211,18 +221,12 @@ int radeon_bo_unpin(struct radeon_bo *bo)
bo->pin_count--; bo->pin_count--;
if (bo->pin_count) if (bo->pin_count)
return 0; return 0;
bo->tbo.proposed_placement = bo->tbo.mem.placement & for (i = 0; i < bo->placement.num_placement; i++)
~TTM_PL_FLAG_NO_EVICT; bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
retry: r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement, if (unlikely(r != 0))
true, false);
if (unlikely(r != 0)) {
if (r == -ERESTART)
goto retry;
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r; return r;
}
return 0;
} }
int radeon_bo_evict_vram(struct radeon_device *rdev) int radeon_bo_evict_vram(struct radeon_device *rdev)
...@@ -326,21 +330,17 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) ...@@ -326,21 +330,17 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
bo = lobj->bo; bo = lobj->bo;
if (!bo->pin_count) { if (!bo->pin_count) {
if (lobj->wdomain) { if (lobj->wdomain) {
bo->tbo.proposed_placement = radeon_ttm_placement_from_domain(bo,
radeon_ttm_flags_from_domain(lobj->wdomain); lobj->wdomain);
} else { } else {
bo->tbo.proposed_placement = radeon_ttm_placement_from_domain(bo,
radeon_ttm_flags_from_domain(lobj->rdomain); lobj->rdomain);
} }
retry:
r = ttm_buffer_object_validate(&bo->tbo, r = ttm_buffer_object_validate(&bo->tbo,
bo->tbo.proposed_placement, &bo->placement,
true, false); true, false);
if (unlikely(r)) { if (unlikely(r))
if (r == -ERESTART)
goto retry;
return r; return r;
}
} }
lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags; lobj->tiling_flags = bo->tiling_flags;
...@@ -378,7 +378,7 @@ int radeon_bo_fbdev_mmap(struct radeon_bo *bo, ...@@ -378,7 +378,7 @@ int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
return ttm_fbdev_mmap(vma, &bo->tbo); return ttm_fbdev_mmap(vma, &bo->tbo);
} }
static int radeon_bo_get_surface_reg(struct radeon_bo *bo) int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{ {
struct radeon_device *rdev = bo->rdev; struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg; struct radeon_surface_reg *reg;
......
...@@ -175,5 +175,5 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, ...@@ -175,5 +175,5 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif #endif
...@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0; return 0;
} }
static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo) static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{ {
uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE; struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
default: default:
return (cur_placement & ~TTM_PL_MASK_CACHING) | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
} }
*placement = rbo->placement;
} }
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
...@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct radeon_device *rdev; struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
uint32_t proposed_placement; u32 placements;
struct ttm_placement placement;
int r; int r;
rdev = radeon_get_rdev(bo->bdev); rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem; tmp_mem = *new_mem;
tmp_mem.mm_node = NULL; tmp_mem.mm_node = NULL;
proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; placement.fpfn = 0;
r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem, placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait); interruptible, no_wait);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
...@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, ...@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct radeon_device *rdev; struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
uint32_t proposed_flags; struct ttm_placement placement;
u32 placements;
int r; int r;
rdev = radeon_get_rdev(bo->bdev); rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem; tmp_mem = *new_mem;
tmp_mem.mm_node = NULL; tmp_mem.mm_node = NULL;
proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; placement.fpfn = 0;
r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, placement.lpfn = 0;
interruptible, no_wait); placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
...@@ -407,18 +423,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, ...@@ -407,18 +423,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
return r; return r;
} }
const uint32_t radeon_mem_prios[] = {
TTM_PL_VRAM,
TTM_PL_TT,
TTM_PL_SYSTEM,
};
const uint32_t radeon_busy_prios[] = {
TTM_PL_TT,
TTM_PL_VRAM,
TTM_PL_SYSTEM,
};
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible) bool lazy, bool interruptible)
{ {
...@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) ...@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
} }
static struct ttm_bo_driver radeon_bo_driver = { static struct ttm_bo_driver radeon_bo_driver = {
.mem_type_prio = radeon_mem_prios,
.mem_busy_prio = radeon_busy_prios,
.num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
.num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry, .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
.invalidate_caches = &radeon_invalidate_caches, .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type, .init_mem_type = &radeon_init_mem_type,
...@@ -483,7 +483,7 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -483,7 +483,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r; return r;
} }
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
0, rdev->mc.real_vram_size >> PAGE_SHIFT); rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) { if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n"); DRM_ERROR("Failed initializing VRAM heap.\n");
return r; return r;
...@@ -506,7 +506,7 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -506,7 +506,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_INFO("radeon: %uM of VRAM memory ready\n", DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned)rdev->mc.real_vram_size / (1024 * 1024)); (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
0, rdev->mc.gtt_size >> PAGE_SHIFT); rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) { if (r) {
DRM_ERROR("Failed initializing GTT heap.\n"); DRM_ERROR("Failed initializing GTT heap.\n");
return r; return r;
......
...@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev) ...@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev); radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */ /* Resume clock after posting */
r300_clock_startup(rdev); r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rs400_startup(rdev); return rs400_startup(rdev);
} }
......
...@@ -586,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev) ...@@ -586,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */ /* Resume clock after posting */
rv515_clock_startup(rdev); rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rs600_startup(rdev); return rs600_startup(rdev);
} }
......
...@@ -260,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -260,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay); b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256); c.full = rfixed_const(256);
a.full = rfixed_mul(wm->num_line_pair, b); a.full = rfixed_div(b, c);
request_fifo_depth.full = rfixed_div(a, c); request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) { if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4; wm->lb_request_fifo_depth = 4;
} else { } else {
...@@ -390,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -390,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */ /* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
...@@ -399,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -399,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
} else { } else {
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a); wm->priority_mark.full = rfixed_div(estimated_width, a);
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
} }
} }
...@@ -655,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev) ...@@ -655,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */ /* Resume clock after posting */
rv515_clock_startup(rdev); rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rs690_startup(rdev); return rs690_startup(rdev);
} }
......
...@@ -513,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev) ...@@ -513,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */ /* Resume clock after posting */
rv515_clock_startup(rdev); rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rv515_startup(rdev); return rv515_startup(rdev);
} }
...@@ -889,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -889,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay); b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256); c.full = rfixed_const(256);
a.full = rfixed_mul(wm->num_line_pair, b); a.full = rfixed_div(b, c);
request_fifo_depth.full = rfixed_div(a, c); request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) { if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4; wm->lb_request_fifo_depth = 4;
} else { } else {
...@@ -992,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -992,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */ /* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
estimated_width.full = rfixed_div(estimated_width, consumption_time); estimated_width.full = rfixed_div(estimated_width, consumption_time);
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = rfixed_const(10); wm->priority_mark.full = wm->priority_mark_max.full;
} else { } else {
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a); wm->priority_mark.full = rfixed_div(estimated_width, a);
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
} }
} }
......
...@@ -874,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev) ...@@ -874,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
{ {
int r; int r;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
rv770_mc_program(rdev); rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev); rv770_agp_enable(rdev);
...@@ -1039,25 +1047,17 @@ int rv770_init(struct radeon_device *rdev) ...@@ -1039,25 +1047,17 @@ int rv770_init(struct radeon_device *rdev)
rdev->ih.ring_obj = NULL; rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024); r600_ih_ring_init(rdev, 64 * 1024);
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
r = r600_pcie_gart_init(rdev); r = r600_pcie_gart_init(rdev);
if (r) if (r)
return r; return r;
rdev->accel_working = true;
r = r600_blit_init(rdev); r = r600_blit_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled blitter (%d).\n", r); DRM_ERROR("radeon: failed blitter (%d).\n", r);
rdev->accel_working = false; return r;
} }
rdev->accel_working = true;
r = rv770_startup(rdev); r = rv770_startup(rdev);
if (r) { if (r) {
rv770_suspend(rdev); rv770_suspend(rdev);
...@@ -1069,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev) ...@@ -1069,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev)
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev); r = radeon_ib_pool_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} }
r = r600_ib_test(rdev); r = r600_ib_test(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r); DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} }
} }
......
This diff is collapsed.
...@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_wait(bo, false, true, false); ret = ttm_bo_wait(bo, false, true, false);
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
retval = (ret != -ERESTART) ? retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock; goto out_unlock;
} }
...@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, ...@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
switch (ret) { switch (ret) {
case 0: case 0:
break; break;
case -ERESTART:
ret = -EINTR;
goto out_unref;
case -EBUSY: case -EBUSY:
ret = -EAGAIN; ret = -EAGAIN;
goto out_unref; goto out_unref;
...@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, ...@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
switch (ret) { switch (ret) {
case 0: case 0:
break; break;
case -ERESTART:
return -EINTR;
case -EBUSY: case -EBUSY:
return -EAGAIN; return -EAGAIN;
default: default:
......
...@@ -323,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, ...@@ -323,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
* No special dma32 zone needed. * No special dma32 zone needed.
*/ */
if (mem <= ((uint64_t) 1ULL << 32)) if (mem <= ((uint64_t) 1ULL << 32)) {
kfree(zone);
return 0; return 0;
}
/* /*
* Limit max dma32 memory to 4GB for now * Limit max dma32 memory to 4GB for now
......
...@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, ...@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
int atomic); int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int atomic);
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size, unsigned long size,
unsigned alignment) unsigned alignment)
...@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa ...@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
{ {
return drm_mm_get_block_generic(parent, size, alignment, 1); return drm_mm_get_block_generic(parent, size, alignment, 1);
} }
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 1);
}
extern void drm_mm_put_block(struct drm_mm_node *cur); extern void drm_mm_put_block(struct drm_mm_node *cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
int best_match); int best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start, extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
unsigned long size); unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm); extern void drm_mm_takedown(struct drm_mm *mm);
...@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) ...@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm; return block->mm;
} }
extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
#endif #endif
......
...@@ -44,6 +44,29 @@ struct ttm_bo_device; ...@@ -44,6 +44,29 @@ struct ttm_bo_device;
struct drm_mm_node; struct drm_mm_node;
/**
* struct ttm_placement
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
* @num_placement: number of prefered placements
* @placement: prefered placements
* @num_busy_placement: number of prefered placements when need to evict buffer
* @busy_placement: prefered placements when need to evict buffer
*
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
unsigned fpfn;
unsigned lpfn;
unsigned num_placement;
const uint32_t *placement;
unsigned num_busy_placement;
const uint32_t *busy_placement;
};
/** /**
* struct ttm_mem_reg * struct ttm_mem_reg
* *
...@@ -109,10 +132,6 @@ struct ttm_tt; ...@@ -109,10 +132,6 @@ struct ttm_tt;
* the object is destroyed. * the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change. * @event_queue: Queue for processes waiting on buffer object status change.
* @lock: spinlock protecting mostly synchronization members. * @lock: spinlock protecting mostly synchronization members.
* @proposed_placement: Proposed placement for the buffer. Changed only by the
* creator prior to validation as opposed to bo->mem.proposed_flags which is
* changed by the implementation prior to a buffer move if it wants to outsmart
* the buffer creator / user. This latter happens, for example, at eviction.
* @mem: structure describing current placement. * @mem: structure describing current placement.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers * @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member * pinned in physical memory. If this behaviour is not desired, this member
...@@ -177,7 +196,6 @@ struct ttm_buffer_object { ...@@ -177,7 +196,6 @@ struct ttm_buffer_object {
* Members protected by the bo::reserved lock. * Members protected by the bo::reserved lock.
*/ */
uint32_t proposed_placement;
struct ttm_mem_reg mem; struct ttm_mem_reg mem;
struct file *persistant_swap_storage; struct file *persistant_swap_storage;
struct ttm_tt *ttm; struct ttm_tt *ttm;
...@@ -285,7 +303,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) ...@@ -285,7 +303,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
* Note: It might be necessary to block validations before the * Note: It might be necessary to block validations before the
* wait by reserving the buffer. * wait by reserving the buffer.
* Returns -EBUSY if no_wait is true and the buffer is busy. * Returns -EBUSY if no_wait is true and the buffer is busy.
* Returns -ERESTART if interrupted by a signal. * Returns -ERESTARTSYS if interrupted by a signal.
*/ */
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
...@@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, ...@@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* ttm_buffer_object_validate * ttm_buffer_object_validate
* *
* @bo: The buffer object. * @bo: The buffer object.
* @proposed_placement: Proposed_placement for the buffer object. * @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping. * @interruptible: Sleep interruptible if sleeping.
* @no_wait: Return immediately if the buffer is busy. * @no_wait: Return immediately if the buffer is busy.
* *
* Changes placement and caching policy of the buffer object * Changes placement and caching policy of the buffer object
* according to bo::proposed_flags. * according proposed placement.
* Returns * Returns
* -EINVAL on invalid proposed_flags. * -EINVAL on invalid proposed placement.
* -ENOMEM on out-of-memory condition. * -ENOMEM on out-of-memory condition.
* -EBUSY if no_wait is true and buffer busy. * -EBUSY if no_wait is true and buffer busy.
* -ERESTART if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
/** /**
* ttm_bo_unref * ttm_bo_unref
* *
...@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo); ...@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
* waiting for buffer idle. This lock is recursive. * waiting for buffer idle. This lock is recursive.
* Returns * Returns
* -EBUSY if the buffer is busy and no_wait is true. * -EBUSY if the buffer is busy and no_wait is true.
* -ERESTART if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
extern int extern int
...@@ -371,7 +390,7 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); ...@@ -371,7 +390,7 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
* Returns * Returns
* -ENOMEM: Out of memory. * -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags. * -EINVAL: Invalid placement flags.
* -ERESTART: Interrupted by signal while sleeping waiting for resources. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/ */
extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
...@@ -411,7 +430,7 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -411,7 +430,7 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
* Returns * Returns
* -ENOMEM: Out of memory. * -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags. * -EINVAL: Invalid placement flags.
* -ERESTART: Interrupted by signal while waiting for resources. * -ERESTARTSYS: Interrupted by signal while waiting for resources.
*/ */
extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
...@@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, ...@@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
* *
* @bdev: Pointer to a ttm_bo_device struct. * @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type. * @mem_type: The memory type.
* @p_offset: offset for managed area in pages.
* @p_size: size managed area in pages. * @p_size: size managed area in pages.
* *
* Initialize a manager for a given memory type. * Initialize a manager for a given memory type.
...@@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, ...@@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
*/ */
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_offset, unsigned long p_size); unsigned long p_size);
/** /**
* ttm_bo_clean_mm * ttm_bo_clean_mm
* *
...@@ -503,7 +521,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); ...@@ -503,7 +521,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
* *
* Returns: * Returns:
* -EINVAL: Invalid or uninitialized memory type. * -EINVAL: Invalid or uninitialized memory type.
* -ERESTART: The call was interrupted by a signal while waiting to * -ERESTARTSYS: The call was interrupted by a signal while waiting to
* evict a buffer. * evict a buffer.
*/ */
...@@ -606,7 +624,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, ...@@ -606,7 +624,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* be called from the fops::read and fops::write method. * be called from the fops::read and fops::write method.
* Returns: * Returns:
* See man (2) write, man(2) read. In particular, * See man (2) write, man(2) read. In particular,
* the function may return -EINTR if * the function may return -ERESTARTSYS if
* interrupted by a signal. * interrupted by a signal.
*/ */
......
...@@ -242,12 +242,6 @@ struct ttm_mem_type_manager { ...@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
/** /**
* struct ttm_bo_driver * struct ttm_bo_driver
* *
* @mem_type_prio: Priority array of memory types to place a buffer object in
* if it fits without evicting buffers from any of these memory types.
* @mem_busy_prio: Priority array of memory types to place a buffer object in
* if it needs to evict buffers to make room.
* @num_mem_type_prio: Number of elements in the @mem_type_prio array.
* @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
* @create_ttm_backend_entry: Callback to create a struct ttm_backend. * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
* @invalidate_caches: Callback to invalidate read caches when a buffer object * @invalidate_caches: Callback to invalidate read caches when a buffer object
* has been evicted. * has been evicted.
...@@ -265,11 +259,6 @@ struct ttm_mem_type_manager { ...@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
*/ */
struct ttm_bo_driver { struct ttm_bo_driver {
const uint32_t *mem_type_prio;
const uint32_t *mem_busy_prio;
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
/** /**
* struct ttm_bo_driver member create_ttm_backend_entry * struct ttm_bo_driver member create_ttm_backend_entry
* *
...@@ -306,7 +295,8 @@ struct ttm_bo_driver { ...@@ -306,7 +295,8 @@ struct ttm_bo_driver {
* finished, they'll end up in bo->mem.flags * finished, they'll end up in bo->mem.flags
*/ */
uint32_t(*evict_flags) (struct ttm_buffer_object *bo); void(*evict_flags) (struct ttm_buffer_object *bo,
struct ttm_placement *placement);
/** /**
* struct ttm_bo_driver member move: * struct ttm_bo_driver member move:
* *
...@@ -648,12 +638,12 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, ...@@ -648,12 +638,12 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* -EBUSY: No space available (only if no_wait == 1). * -EBUSY: No space available (only if no_wait == 1).
* -ENOMEM: Could not allocate memory for the buffer object, either due to * -ENOMEM: Could not allocate memory for the buffer object, either due to
* fragmentation or concurrent allocators. * fragmentation or concurrent allocators.
* -ERESTART: An interruptible sleep was interrupted by a signal. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/ */
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
/** /**
* ttm_bo_wait_for_cpu * ttm_bo_wait_for_cpu
* *
...@@ -663,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -663,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Wait until a buffer object is no longer sync'ed for CPU access. * Wait until a buffer object is no longer sync'ed for CPU access.
* Returns: * Returns:
* -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
* -ERESTART: An interruptible sleep was interrupted by a signal. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/ */
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
...@@ -767,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); ...@@ -767,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
* -EAGAIN: The reservation may cause a deadlock. * -EAGAIN: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and * Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1). * try again. (only if use_sequence == 1).
* -ERESTART: A wait for the buffer to become unreserved was interrupted by * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space. * a signal. Release all buffer reservations and return to user-space.
*/ */
extern int ttm_bo_reserve(struct ttm_buffer_object *bo, extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
...@@ -808,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, ...@@ -808,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
* *
* Returns: * Returns:
* -EBUSY: If no_wait == 1 and the buffer is already reserved. * -EBUSY: If no_wait == 1 and the buffer is already reserved.
* -ERESTART: If interruptible == 1 and the process received a signal * -ERESTARTSYS: If interruptible == 1 and the process received a signal
* while sleeping. * while sleeping.
*/ */
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment