Commit 115a5c2b authored by Dave Airlie's avatar Dave Airlie

Merge remote branch 'korg/drm-radeon-next' of into drm-linus

This merges some TTM overhauls to allow us to do better object placement
for certain radeon GPUs that need scanout+cursor within range of each other,
along with an API change to not return ERESTART to userspace, but to use
ERESTARTSYS properly internally and have it convert to EINTR and catch that
correctly. Also lots of radeon fixes across the board.
parents 0b5e8db6 fb53f862
...@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev) ...@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
{ {
int count = 0; int count = 0;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
drm_fb_helper_parse_command_line(dev); drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev, count = drm_helper_probe_connector_modes(dev,
......
...@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, ...@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
} }
EXPORT_SYMBOL(drm_mm_get_block_generic); EXPORT_SYMBOL(drm_mm_get_block_generic);
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int atomic)
{
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
unsigned wasted = 0;
if (node->start < start)
wasted += start - node->start;
if (alignment)
tmp = ((node->start + wasted) % alignment);
if (tmp)
wasted += alignment - tmp;
if (wasted) {
align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (node->size == size) {
list_del_init(&node->fl_entry);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
/* /*
* Put a block. Merge with the previous and / or next block if they are free. * Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack. * Otherwise add to the free stack.
...@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, ...@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
} }
EXPORT_SYMBOL(drm_mm_search_free); EXPORT_SYMBOL(drm_mm_search_free);
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
best = NULL;
best_size = ~0UL;
list_for_each(list, free_stack) {
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
continue;
if (entry->start > end || (entry->start+entry->size) < start)
continue;
if (entry->start < start)
wasted += start - entry->start;
if (alignment) {
register unsigned tmp = (entry->start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
}
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
best = entry;
best_size = entry->size;
}
}
}
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);
int drm_mm_clean(struct drm_mm * mm) int drm_mm_clean(struct drm_mm * mm)
{ {
struct list_head *head = &mm->ml_entry; struct list_head *head = &mm->ml_entry;
...@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm) ...@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
} }
EXPORT_SYMBOL(drm_mm_takedown); EXPORT_SYMBOL(drm_mm_takedown);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
int total_used = 0, total_free = 0, total = 0;
list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
prefix, entry->start, entry->start + entry->size,
entry->size, entry->free ? "free" : "used");
total += entry->size;
if (entry->free)
total_free += entry->size;
else
total_used += entry->size;
}
printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{ {
......
...@@ -499,6 +499,16 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -499,6 +499,16 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else else
pll = &rdev->clock.p2pll; pll = &rdev->clock.p2pll;
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll)
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
else
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
} else
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags); &ref_div, &post_div, pll_flags);
...@@ -599,8 +609,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -599,8 +609,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
} }
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo); radeon_bo_unreserve(rbo);
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
switch (crtc->fb->bits_per_pixel) { switch (crtc->fb->bits_per_pixel) {
case 8: case 8:
...@@ -630,6 +638,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -630,6 +638,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL; return -EINVAL;
} }
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
if (tiling_flags & RADEON_TILING_MICRO) if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= AVIVO_D1GRPH_TILED; fb_format |= AVIVO_D1GRPH_TILED;
......
...@@ -3299,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev) ...@@ -3299,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev); radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */ /* Resume clock after posting */
r100_clock_startup(rdev); r100_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r100_startup(rdev); return r100_startup(rdev);
} }
......
...@@ -1250,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev) ...@@ -1250,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev); radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */ /* Resume clock after posting */
r300_clock_startup(rdev); r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r300_startup(rdev); return r300_startup(rdev);
} }
......
...@@ -231,7 +231,8 @@ int r420_resume(struct radeon_device *rdev) ...@@ -231,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
} }
/* Resume clock after posting */ /* Resume clock after posting */
r420_clock_resume(rdev); r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r420_startup(rdev); return r420_startup(rdev);
} }
......
...@@ -220,6 +220,8 @@ int r520_resume(struct radeon_device *rdev) ...@@ -220,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */ /* Resume clock after posting */
rv515_clock_startup(rdev); rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r520_startup(rdev); return r520_startup(rdev);
} }
......
...@@ -1845,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev) ...@@ -1845,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
{ {
int r; int r;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
r600_mc_program(rdev); r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev); r600_agp_enable(rdev);
...@@ -2026,25 +2034,17 @@ int r600_init(struct radeon_device *rdev) ...@@ -2026,25 +2034,17 @@ int r600_init(struct radeon_device *rdev)
rdev->ih.ring_obj = NULL; rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024); r600_ih_ring_init(rdev, 64 * 1024);
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
r = r600_pcie_gart_init(rdev); r = r600_pcie_gart_init(rdev);
if (r) if (r)
return r; return r;
rdev->accel_working = true;
r = r600_blit_init(rdev); r = r600_blit_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled blitter (%d).\n", r); DRM_ERROR("radeon: failed blitter (%d).\n", r);
return r; return r;
} }
rdev->accel_working = true;
r = r600_startup(rdev); r = r600_startup(rdev);
if (r) { if (r) {
r600_suspend(rdev); r600_suspend(rdev);
...@@ -2056,12 +2056,12 @@ int r600_init(struct radeon_device *rdev) ...@@ -2056,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev); r = radeon_ib_pool_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} }
r = r600_ib_test(rdev); r = r600_ib_test(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r); DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} }
} }
......
...@@ -88,6 +88,7 @@ extern int radeon_benchmarking; ...@@ -88,6 +88,7 @@ extern int radeon_benchmarking;
extern int radeon_testing; extern int radeon_testing;
extern int radeon_connector_table; extern int radeon_connector_table;
extern int radeon_tv; extern int radeon_tv;
extern int radeon_new_pll;
/* /*
* Copy from radeon_drv.h so we don't have to include both and have conflicting * Copy from radeon_drv.h so we don't have to include both and have conflicting
...@@ -208,6 +209,8 @@ struct radeon_bo { ...@@ -208,6 +209,8 @@ struct radeon_bo {
/* Protected by gem.mutex */ /* Protected by gem.mutex */
struct list_head list; struct list_head list;
/* Protected by tbo.reserved */ /* Protected by tbo.reserved */
u32 placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo; struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap; struct ttm_bo_kmap_obj kmap;
unsigned pin_count; unsigned pin_count;
...@@ -1012,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev); ...@@ -1012,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save { struct r100_mc_save {
......
...@@ -70,6 +70,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev ...@@ -70,6 +70,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info; struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset; uint16_t data_offset;
int i;
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false; i2c.valid = false;
...@@ -78,8 +79,11 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev ...@@ -78,8 +79,11 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
gpio = &i2c_info->asGPIO_Info[id];
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
gpio = &i2c_info->asGPIO_Info[i];
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
...@@ -110,6 +114,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev ...@@ -110,6 +114,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c.i2c_id = gpio->sucI2cId.ucAccess; i2c.i2c_id = gpio->sucI2cId.ucAccess;
i2c.valid = true; i2c.valid = true;
}
}
return i2c; return i2c;
} }
...@@ -503,6 +509,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ...@@ -503,6 +509,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
usRecordOffset)); usRecordOffset));
ATOM_I2C_RECORD *i2c_record; ATOM_I2C_RECORD *i2c_record;
ATOM_HPD_INT_RECORD *hpd_record; ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
hpd.hpd = RADEON_HPD_NONE; hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0 while (record->ucRecordType > 0
...@@ -514,10 +521,12 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ...@@ -514,10 +521,12 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
i2c_record = i2c_record =
(ATOM_I2C_RECORD *) (ATOM_I2C_RECORD *)
record; record;
i2c_config =
(ATOM_I2C_ID_CONFIG_ACCESS *)
&i2c_record->sucI2cId;
ddc_bus = radeon_lookup_i2c_gpio(rdev, ddc_bus = radeon_lookup_i2c_gpio(rdev,
i2c_record-> i2c_config->
sucI2cId. ucAccess);
bfI2C_LineMux);
break; break;
case ATOM_HPD_INT_RECORD_TYPE: case ATOM_HPD_INT_RECORD_TYPE:
hpd_record = hpd_record =
...@@ -670,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct ...@@ -670,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
if ((rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740)) {
if ((i == ATOM_DEVICE_DFP2_INDEX)
&& (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
else if ((i == ATOM_DEVICE_DFP3_INDEX)
&& (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
else
bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux;
} else
bios_connectors[i].line_mux = bios_connectors[i].line_mux =
ci.sucI2cId.sbfAccess.bfI2C_LineMux; ci.sucI2cId.ucAccess;
/* give tv unique connector ids */ /* give tv unique connector ids */
if (i == ATOM_DEVICE_TV1_INDEX) { if (i == ATOM_DEVICE_TV1_INDEX) {
...@@ -876,6 +871,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) ...@@ -876,6 +871,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family. * family.
*/ */
if (!radeon_new_pll)
p1pll->pll_out_min = 64800; p1pll->pll_out_min = 64800;
} }
...@@ -1006,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct ...@@ -1006,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev; uint8_t frev, crev;
struct radeon_atom_ss *ss = NULL; struct radeon_atom_ss *ss = NULL;
int i;
if (id > ATOM_MAX_SS_ENTRY) if (id > ATOM_MAX_SS_ENTRY)
return NULL; return NULL;
...@@ -1023,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct ...@@ -1023,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
if (!ss) if (!ss)
return NULL; return NULL;
ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage); for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType; if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->step = ss_info->asSS_Info[id].ucSS_Step; ss->percentage =
ss->delay = ss_info->asSS_Info[id].ucSS_Delay; le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
ss->range = ss_info->asSS_Info[id].ucSS_Range; ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div; ss->step = ss_info->asSS_Info[i].ucSS_Step;
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
}
}
} }
return ss; return ss;
} }
......
...@@ -1103,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1103,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property, rdev->mode_info.coherent_mode_property,
1); 1);
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true; radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property, rdev->mode_info.load_detect_property,
1); 1);
}
break; break;
case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_HDMIB:
...@@ -1141,14 +1143,19 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1141,14 +1143,19 @@ radeon_add_atom_connector(struct drm_device *dev,
ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (ret) if (ret)
goto failed; goto failed;
if (i2c_bus->valid) {
/* add DP i2c bus */ /* add DP i2c bus */
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (i2c_bus->valid) { if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus) if (!radeon_connector->ddc_bus)
goto failed; goto failed;
} }
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
break; break;
case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_Composite:
...@@ -1183,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1183,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus) if (!radeon_connector->ddc_bus)
goto failed; goto failed;
} }
drm_mode_create_scaling_mode_property(dev);
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);
......
...@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev) ...@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
if (rdev->family < CHIP_R600) { if (rdev->family < CHIP_R600) {
int i; int i;
for (i = 0; i < 8; i++) { for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
WREG32(RADEON_SURFACE0_INFO + if (rdev->surface_regs[i].bo)
i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
0); else
radeon_clear_surface_reg(rdev, i);
} }
/* enable surfaces */ /* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0); WREG32(RADEON_SURFACE_CNTL, 0);
...@@ -487,8 +488,10 @@ int radeon_atombios_init(struct radeon_device *rdev) ...@@ -487,8 +488,10 @@ int radeon_atombios_init(struct radeon_device *rdev)
void radeon_atombios_fini(struct radeon_device *rdev) void radeon_atombios_fini(struct radeon_device *rdev)
{ {
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch); kfree(rdev->mode_info.atom_context->scratch);
kfree(rdev->mode_info.atom_context); kfree(rdev->mode_info.atom_context);
}
kfree(rdev->mode_info.atom_card_info); kfree(rdev->mode_info.atom_card_info);
} }
......
...@@ -560,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll, ...@@ -560,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
*post_div_p = best_post_div; *post_div_p = best_post_div;
} }
void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags)
{
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
fixed20_12 pll_out_max, pll_out_min;
fixed20_12 pll_in_max, pll_in_min;
fixed20_12 reference_freq;
fixed20_12 error, ffreq, a, b;
pll_out_max.full = rfixed_const(pll->pll_out_max);
pll_out_min.full = rfixed_const(pll->pll_out_min);
pll_in_max.full = rfixed_const(pll->pll_in_max);
pll_in_min.full = rfixed_const(pll->pll_in_min);
reference_freq.full = rfixed_const(pll->reference_freq);
do_div(freq, 10);
ffreq.full = rfixed_const(freq);
error.full = rfixed_const(100 * 100);
/* max p */
p.full = rfixed_div(pll_out_max, ffreq);
p.full = rfixed_floor(p);
/* min m */
m.full = rfixed_div(reference_freq, pll_in_max);
m.full = rfixed_ceil(m);
while (1) {
n.full = rfixed_div(ffreq, reference_freq);
n.full = rfixed_mul(n, m);
n.full = rfixed_mul(n, p);
f_vco.full = rfixed_div(n, m);
f_vco.full = rfixed_mul(f_vco, reference_freq);
f_pclk.full = rfixed_div(f_vco, p);
if (f_pclk.full > ffreq.full)
error.full = f_pclk.full - ffreq.full;
else
error.full = ffreq.full - f_pclk.full;
error.full = rfixed_div(error, f_pclk);
a.full = rfixed_const(100 * 100);
error.full = rfixed_mul(error, a);
a.full = rfixed_mul(m, p);
a.full = rfixed_div(n, a);
best_freq.full = rfixed_mul(reference_freq, a);
if (rfixed_trunc(error) < 25)
break;
a.full = rfixed_const(1);
m.full = m.full + a.full;
a.full = rfixed_div(reference_freq, m);
if (a.full >= pll_in_min.full)
continue;
m.full = rfixed_div(reference_freq, pll_in_max);
m.full = rfixed_ceil(m);
a.full= rfixed_const(1);
p.full = p.full - a.full;
a.full = rfixed_mul(p, ffreq);
if (a.full >= pll_out_min.full)
continue;
else {
DRM_ERROR("Unable to find pll dividers\n");
break;
}
}
a.full = rfixed_const(10);
b.full = rfixed_mul(n, a);
frac_n.full = rfixed_floor(n);
frac_n.full = rfixed_mul(frac_n, a);
frac_n.full = b.full - frac_n.full;
*dot_clock_p = rfixed_trunc(best_freq);
*fb_div_p = rfixed_trunc(n);
*frac_fb_div_p = rfixed_trunc(frac_n);
*ref_div_p = rfixed_trunc(m);
*post_div_p = rfixed_trunc(p);
DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
}
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{ {
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
...@@ -660,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev) ...@@ -660,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
return -ENOMEM; return -ENOMEM;
rdev->mode_info.coherent_mode_property->values[0] = 0; rdev->mode_info.coherent_mode_property->values[0] = 0;
rdev->mode_info.coherent_mode_property->values[0] = 1; rdev->mode_info.coherent_mode_property->values[1] = 1;
} }
if (!ASIC_IS_AVIVO(rdev)) { if (!ASIC_IS_AVIVO(rdev)) {
...@@ -684,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev) ...@@ -684,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.load_detect_property) if (!rdev->mode_info.load_detect_property)
return -ENOMEM; return -ENOMEM;
rdev->mode_info.load_detect_property->values[0] = 0; rdev->mode_info.load_detect_property->values[0] = 0;
rdev->mode_info.load_detect_property->values[0] = 1; rdev->mode_info.load_detect_property->values[1] = 1;
drm_mode_create_scaling_mode_property(rdev->ddev); drm_mode_create_scaling_mode_property(rdev->ddev);
......
...@@ -86,6 +86,7 @@ int radeon_benchmarking = 0; ...@@ -86,6 +86,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0; int radeon_testing = 0;
int radeon_connector_table = 0; int radeon_connector_table = 0;
int radeon_tv = 1; int radeon_tv = 1;
int radeon_new_pll = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444); module_param_named(no_wb, radeon_no_wb, int, 0444);
...@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); ...@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444); module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(r4xx_atom, "Select new PLL code for AVIVO chips");
module_param_named(new_pll, radeon_new_pll, int, 0444);
static int radeon_suspend(struct drm_device *dev, pm_message_t state) static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
......
...@@ -197,9 +197,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) ...@@ -197,9 +197,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
r = wait_event_interruptible_timeout(rdev->fence_drv.queue, r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout); radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev); radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r == -ERESTARTSYS)) { if (unlikely(r < 0))
return -EBUSY; return r;
}
} else { } else {
radeon_irq_kms_sw_irq_get(rdev); radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue, r = wait_event_timeout(rdev->fence_drv.queue,
......
...@@ -38,6 +38,23 @@ typedef union rfixed { ...@@ -38,6 +38,23 @@ typedef union rfixed {
#define fixed_init_half(A) { .full = rfixed_const_half((A)) } #define fixed_init_half(A) { .full = rfixed_const_half((A)) }
#define rfixed_trunc(A) ((A).full >> 12) #define rfixed_trunc(A) ((A).full >> 12)
static inline u32 rfixed_floor(fixed20_12 A)
{
u32 non_frac = rfixed_trunc(A);
return rfixed_const(non_frac);
}
static inline u32 rfixed_ceil(fixed20_12 A)
{
u32 non_frac = rfixed_trunc(A);
if (A.full > rfixed_const(non_frac))
return rfixed_const(non_frac + 1);
else
return rfixed_const(non_frac);
}
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
{ {
u64 tmp = ((u64)A.full << 13); u64 tmp = ((u64)A.full << 13);
......
...@@ -30,10 +30,19 @@ ...@@ -30,10 +30,19 @@
#include "radeon.h" #include "radeon.h"
#include "radeon_drm.h" #include "radeon_drm.h"
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
if (rdev == NULL)
return 0;
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
kfree(rdev);
dev->dev_private = NULL;
return 0;
}
/*
* Driver load/unload
*/
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
...@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) ...@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
*/ */
r = radeon_device_init(rdev, dev, dev->pdev, flags); r = radeon_device_init(rdev, dev, dev->pdev, flags);
if (r) { if (r) {
DRM_ERROR("Fatal error while trying to initialize radeon.\n"); dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
return r; goto out;
} }
/* Again modeset_init should fail only on fatal error /* Again modeset_init should fail only on fatal error
* otherwise it should provide enough functionalities * otherwise it should provide enough functionalities
* for shadowfb to run * for shadowfb to run
*/ */
r = radeon_modeset_init(rdev); r = radeon_modeset_init(rdev);
if (r) { if (r)
dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
out:
if (r)
radeon_driver_unload_kms(dev);
return r; return r;
}
return 0;
}
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
if (rdev == NULL)
return 0;
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
kfree(rdev);
dev->dev_private = NULL;
return 0;
} }
......
...@@ -437,6 +437,15 @@ extern void radeon_compute_pll(struct radeon_pll *pll, ...@@ -437,6 +437,15 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *post_div_p, uint32_t *post_div_p,
int flags); int flags);
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p,
int flags);
extern void radeon_setup_encoder_clones(struct drm_device *dev); extern void radeon_setup_encoder_clones(struct drm_device *dev);
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
......
...@@ -75,6 +75,25 @@ static inline u32 radeon_ttm_flags_from_domain(u32 domain) ...@@ -75,6 +75,25 @@ static inline u32 radeon_ttm_flags_from_domain(u32 domain)
return flags; return flags;
} }
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
u32 c = 0;
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
if (domain & RADEON_GEM_DOMAIN_CPU)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
}
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long size, bool kernel, u32 domain, unsigned long size, bool kernel, u32 domain,
struct radeon_bo **bo_ptr) struct radeon_bo **bo_ptr)
...@@ -102,15 +121,14 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, ...@@ -102,15 +121,14 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->list);
flags = radeon_ttm_flags_from_domain(domain); flags = radeon_ttm_flags_from_domain(domain);
retry: /* Kernel allocation are uninterruptible */
r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type, r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
flags, 0, 0, true, NULL, size, flags, 0, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy); &radeon_ttm_bo_destroy);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r == -ERESTART) if (r != -ERESTARTSYS)
goto retry; dev_err(rdev->dev,
/* ttm call radeon_ttm_object_object_destroy if error happen */ "object_init failed for (%ld, 0x%08X)\n",
dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n",
size, flags); size, flags);
return r; return r;
} }
...@@ -169,40 +187,32 @@ void radeon_bo_unref(struct radeon_bo **bo) ...@@ -169,40 +187,32 @@ void radeon_bo_unref(struct radeon_bo **bo)
int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{ {
u32 flags; int r, i;
u32 tmp;
int r;
flags = radeon_ttm_flags_from_domain(domain); radeon_ttm_placement_from_domain(bo, domain);
if (bo->pin_count) { if (bo->pin_count) {
bo->pin_count++; bo->pin_count++;
if (gpu_addr) if (gpu_addr)
*gpu_addr = radeon_bo_gpu_offset(bo); *gpu_addr = radeon_bo_gpu_offset(bo);
return 0; return 0;
} }
tmp = bo->tbo.mem.placement; radeon_ttm_placement_from_domain(bo, domain);
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); for (i = 0; i < bo->placement.num_placement; i++)
bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
TTM_PL_MASK_CACHING; r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
retry:
r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
true, false);
if (likely(r == 0)) { if (likely(r == 0)) {
bo->pin_count = 1; bo->pin_count = 1;
if (gpu_addr != NULL) if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo); *gpu_addr = radeon_bo_gpu_offset(bo);
} }
if (unlikely(r != 0)) { if (unlikely(r != 0))
if (r == -ERESTART)
goto retry;
dev_err(bo->rdev->dev, "%p pin failed\n", bo); dev_err(bo->rdev->dev, "%p pin failed\n", bo);
}
return r; return r;
} }
int radeon_bo_unpin(struct radeon_bo *bo) int radeon_bo_unpin(struct radeon_bo *bo)
{ {
int r; int r, i;
if (!bo->pin_count) { if (!bo->pin_count) {
dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
...@@ -211,18 +221,12 @@ int radeon_bo_unpin(struct radeon_bo *bo) ...@@ -211,18 +221,12 @@ int radeon_bo_unpin(struct radeon_bo *bo)
bo->pin_count--; bo->pin_count--;
if (bo->pin_count) if (bo->pin_count)
return 0; return 0;
bo->tbo.proposed_placement = bo->tbo.mem.placement & for (i = 0; i < bo->placement.num_placement; i++)
~TTM_PL_FLAG_NO_EVICT; bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
retry: r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement, if (unlikely(r != 0))
true, false);
if (unlikely(r != 0)) {
if (r == -ERESTART)
goto retry;
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r; return r;
}
return 0;
} }
int radeon_bo_evict_vram(struct radeon_device *rdev) int radeon_bo_evict_vram(struct radeon_device *rdev)
...@@ -326,22 +330,18 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) ...@@ -326,22 +330,18 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
bo = lobj->bo; bo = lobj->bo;
if (!bo->pin_count) { if (!bo->pin_count) {
if (lobj->wdomain) { if (lobj->wdomain) {
bo->tbo.proposed_placement = radeon_ttm_placement_from_domain(bo,
radeon_ttm_flags_from_domain(lobj->wdomain); lobj->wdomain);
} else { } else {
bo->tbo.proposed_placement = radeon_ttm_placement_from_domain(bo,
radeon_ttm_flags_from_domain(lobj->rdomain); lobj->rdomain);
} }
retry:
r = ttm_buffer_object_validate(&bo->tbo, r = ttm_buffer_object_validate(&bo->tbo,
bo->tbo.proposed_placement, &bo->placement,
true, false); true, false);
if (unlikely(r)) { if (unlikely(r))
if (r == -ERESTART)
goto retry;
return r; return r;
} }
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags; lobj->tiling_flags = bo->tiling_flags;
if (fence) { if (fence) {
...@@ -378,7 +378,7 @@ int radeon_bo_fbdev_mmap(struct radeon_bo *bo, ...@@ -378,7 +378,7 @@ int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
return ttm_fbdev_mmap(vma, &bo->tbo); return ttm_fbdev_mmap(vma, &bo->tbo);
} }
static int radeon_bo_get_surface_reg(struct radeon_bo *bo) int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{ {
struct radeon_device *rdev = bo->rdev; struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg; struct radeon_surface_reg *reg;
......
...@@ -175,5 +175,5 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, ...@@ -175,5 +175,5 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif #endif
...@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0; return 0;
} }
static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo) static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{ {
uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE; struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
default: default:
return (cur_placement & ~TTM_PL_MASK_CACHING) | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
} }
*placement = rbo->placement;
} }
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
...@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct radeon_device *rdev; struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
uint32_t proposed_placement; u32 placements;
struct ttm_placement placement;
int r; int r;
rdev = radeon_get_rdev(bo->bdev); rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem; tmp_mem = *new_mem;
tmp_mem.mm_node = NULL; tmp_mem.mm_node = NULL;
proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; placement.fpfn = 0;
r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem, placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait); interruptible, no_wait);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
...@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, ...@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct radeon_device *rdev; struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
uint32_t proposed_flags; struct ttm_placement placement;
u32 placements;
int r; int r;
rdev = radeon_get_rdev(bo->bdev); rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem; tmp_mem = *new_mem;
tmp_mem.mm_node = NULL; tmp_mem.mm_node = NULL;
proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; placement.fpfn = 0;
r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, placement.lpfn = 0;
interruptible, no_wait); placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
...@@ -407,18 +423,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, ...@@ -407,18 +423,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
return r; return r;
} }
const uint32_t radeon_mem_prios[] = {
TTM_PL_VRAM,
TTM_PL_TT,
TTM_PL_SYSTEM,
};
const uint32_t radeon_busy_prios[] = {
TTM_PL_TT,
TTM_PL_VRAM,
TTM_PL_SYSTEM,
};
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible) bool lazy, bool interruptible)
{ {
...@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) ...@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
} }
static struct ttm_bo_driver radeon_bo_driver = { static struct ttm_bo_driver radeon_bo_driver = {
.mem_type_prio = radeon_mem_prios,
.mem_busy_prio = radeon_busy_prios,
.num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
.num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry, .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
.invalidate_caches = &radeon_invalidate_caches, .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type, .init_mem_type = &radeon_init_mem_type,
...@@ -483,7 +483,7 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -483,7 +483,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r; return r;
} }
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
0, rdev->mc.real_vram_size >> PAGE_SHIFT); rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) { if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n"); DRM_ERROR("Failed initializing VRAM heap.\n");
return r; return r;
...@@ -506,7 +506,7 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -506,7 +506,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_INFO("radeon: %uM of VRAM memory ready\n", DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned)rdev->mc.real_vram_size / (1024 * 1024)); (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
0, rdev->mc.gtt_size >> PAGE_SHIFT); rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) { if (r) {
DRM_ERROR("Failed initializing GTT heap.\n"); DRM_ERROR("Failed initializing GTT heap.\n");
return r; return r;
......
...@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev) ...@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev); radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */ /* Resume clock after posting */
r300_clock_startup(rdev); r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rs400_startup(rdev); return rs400_startup(rdev);
} }
......
...@@ -586,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev) ...@@ -586,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */ /* Resume clock after posting */
rv515_clock_startup(rdev); rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rs600_startup(rdev); return rs600_startup(rdev);
} }
......
...@@ -260,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -260,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay); b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256); c.full = rfixed_const(256);
a.full = rfixed_mul(wm->num_line_pair, b); a.full = rfixed_div(b, c);
request_fifo_depth.full = rfixed_div(a, c); request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) { if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4; wm->lb_request_fifo_depth = 4;
} else { } else {
...@@ -390,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -390,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */ /* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
...@@ -399,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -399,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
} else { } else {
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a); wm->priority_mark.full = rfixed_div(estimated_width, a);
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
} }
} }
...@@ -655,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev) ...@@ -655,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */ /* Resume clock after posting */
rv515_clock_startup(rdev); rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rs690_startup(rdev); return rs690_startup(rdev);
} }
......
...@@ -513,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev) ...@@ -513,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context); atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */ /* Resume clock after posting */
rv515_clock_startup(rdev); rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rv515_startup(rdev); return rv515_startup(rdev);
} }
...@@ -889,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -889,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay); b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256); c.full = rfixed_const(256);
a.full = rfixed_mul(wm->num_line_pair, b); a.full = rfixed_div(b, c);
request_fifo_depth.full = rfixed_div(a, c); request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) { if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4; wm->lb_request_fifo_depth = 4;
} else { } else {
...@@ -992,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, ...@@ -992,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */ /* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
estimated_width.full = rfixed_div(estimated_width, consumption_time); estimated_width.full = rfixed_div(estimated_width, consumption_time);
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = rfixed_const(10); wm->priority_mark.full = wm->priority_mark_max.full;
} else { } else {
a.full = rfixed_const(16); a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a); wm->priority_mark.full = rfixed_div(estimated_width, a);
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
} }
} }
......
...@@ -874,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev) ...@@ -874,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
{ {
int r; int r;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
rv770_mc_program(rdev); rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev); rv770_agp_enable(rdev);
...@@ -1039,25 +1047,17 @@ int rv770_init(struct radeon_device *rdev) ...@@ -1039,25 +1047,17 @@ int rv770_init(struct radeon_device *rdev)
rdev->ih.ring_obj = NULL; rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024); r600_ih_ring_init(rdev, 64 * 1024);
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
r = r600_pcie_gart_init(rdev); r = r600_pcie_gart_init(rdev);
if (r) if (r)
return r; return r;
rdev->accel_working = true;
r = r600_blit_init(rdev); r = r600_blit_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled blitter (%d).\n", r); DRM_ERROR("radeon: failed blitter (%d).\n", r);
rdev->accel_working = false; return r;
} }
rdev->accel_working = true;
r = rv770_startup(rdev); r = rv770_startup(rdev);
if (r) { if (r) {
rv770_suspend(rdev); rv770_suspend(rdev);
...@@ -1069,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev) ...@@ -1069,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev)
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev); r = radeon_ib_pool_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} }
r = r600_ib_test(rdev); r = r600_ib_test(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r); DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} }
} }
......
...@@ -27,6 +27,14 @@ ...@@ -27,6 +27,14 @@
/* /*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/ */
/* Notes:
*
* We store bo pointer in drm_mm_node struct so we know which bo own a
* specific node. There is no protection on the pointer, thus to make
* sure things don't go berserk you have to access this pointer while
* holding the global lru lock and make sure anytime you free a node you
* reset the pointer to NULL.
*/
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h" #include "ttm/ttm_bo_driver.h"
...@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = { ...@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = {
.mode = S_IRUGO .mode = S_IRUGO
}; };
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
{
int i;
for (i = 0; i <= TTM_PL_PRIV5; i++)
if (flags & (1 << i)) {
*mem_type = i;
return 0;
}
return -EINVAL;
}
static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
struct ttm_mem_type_manager *man)
{
printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
man->default_caching);
spin_lock(&glob->lru_lock);
drm_mm_debug_table(&man->manager, TTM_PFX);
spin_unlock(&glob->lru_lock);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man;
int i, ret, mem_type;
printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return;
man = &bdev->man[mem_type];
printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
i, placement->placement[i], mem_type);
ttm_mem_type_manager_debug(glob, man);
}
}
static ssize_t ttm_bo_global_show(struct kobject *kobj, static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr, struct attribute *attr,
char *buffer) char *buffer)
...@@ -117,7 +179,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) ...@@ -117,7 +179,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
ret = wait_event_interruptible(bo->event_queue, ret = wait_event_interruptible(bo->event_queue,
atomic_read(&bo->reserved) == 0); atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return -ERESTART; return ret;
} else { } else {
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
} }
...@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve); ...@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
/* /*
* Call bo->mutex locked. * Call bo->mutex locked.
*/ */
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
...@@ -329,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -329,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
} }
if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bo->mem.mem_type == TTM_PL_SYSTEM) {
bo->mem = *mem;
struct ttm_mem_reg *old_mem = &bo->mem;
uint32_t save_flags = old_mem->placement;
*old_mem = *mem;
mem->mm_node = NULL; mem->mm_node = NULL;
ttm_flag_masked(&save_flags, mem->placement,
TTM_PL_MASK_MEMTYPE);
goto moved; goto moved;
} }
...@@ -419,6 +474,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) ...@@ -419,6 +474,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
kref_put(&bo->list_kref, ttm_bo_ref_bug); kref_put(&bo->list_kref, ttm_bo_ref_bug);
} }
if (bo->mem.mm_node) { if (bo->mem.mm_node) {
bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node); drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL; bo->mem.mm_node = NULL;
} }
...@@ -555,24 +611,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) ...@@ -555,24 +611,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
} }
EXPORT_SYMBOL(ttm_bo_unref); EXPORT_SYMBOL(ttm_bo_unref);
static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool interruptible, bool no_wait) bool no_wait)
{ {
int ret = 0;
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem; struct ttm_mem_reg evict_mem;
uint32_t proposed_placement; struct ttm_placement placement;
int ret = 0;
if (bo->mem.mem_type != mem_type)
goto out;
spin_lock(&bo->lock); spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait); ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret != -ERESTART) { if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
"Failed to expire sync object before " "Failed to expire sync object before "
"buffer eviction.\n"); "buffer eviction.\n");
...@@ -585,116 +638,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, ...@@ -585,116 +638,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
evict_mem = bo->mem; evict_mem = bo->mem;
evict_mem.mm_node = NULL; evict_mem.mm_node = NULL;
proposed_placement = bdev->driver->evict_flags(bo); placement.fpfn = 0;
placement.lpfn = 0;
ret = ttm_bo_mem_space(bo, proposed_placement, placement.num_placement = 0;
&evict_mem, interruptible, no_wait); placement.num_busy_placement = 0;
if (unlikely(ret != 0 && ret != -ERESTART)) bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
&evict_mem, interruptible, no_wait); no_wait);
if (ret) { if (ret) {
if (ret != -ERESTART) if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
"Failed to find memory space for " "Failed to find memory space for "
"buffer 0x%p eviction.\n", bo); "buffer 0x%p eviction.\n", bo);
ttm_bo_mem_space_debug(bo, &placement);
}
goto out; goto out;
} }
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait); no_wait);
if (ret) { if (ret) {
if (ret != -ERESTART) if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
goto out;
}
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
if (evict_mem.mm_node) { if (evict_mem.mm_node) {
evict_mem.mm_node->private = NULL;
drm_mm_put_block(evict_mem.mm_node); drm_mm_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL; evict_mem.mm_node = NULL;
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
goto out;
}
bo->evicted = true; bo->evicted = true;
out: out:
return ret; return ret;
} }
/** static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem,
uint32_t mem_type, uint32_t mem_type,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct drm_mm_node *node;
struct ttm_buffer_object *entry;
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct list_head *lru; struct ttm_buffer_object *bo;
unsigned long num_pages = mem->num_pages; int ret, put_count = 0;
int put_count = 0;
int ret;
retry_pre_get:
ret = drm_mm_pre_get(&man->manager);
if (unlikely(ret != 0))
return ret;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
do { bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
node = drm_mm_search_free(&man->manager, num_pages, kref_get(&bo->list_kref);
mem->page_alignment, 1); ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
if (node)
break;
lru = &man->lru;
if (list_empty(lru))
break;
entry = list_first_entry(lru, struct ttm_buffer_object, lru);
kref_get(&entry->list_kref);
ret =
ttm_bo_reserve_locked(entry, interruptible, no_wait,
false, 0);
if (likely(ret == 0)) if (likely(ret == 0))
put_count = ttm_bo_del_from_lru(entry); put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
while (put_count--) while (put_count--)
kref_put(&entry->list_kref, ttm_bo_ref_bug); kref_put(&bo->list_kref, ttm_bo_ref_bug);
ret = ttm_bo_evict(bo, interruptible, no_wait);
ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
ttm_bo_unreserve(entry); static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
struct drm_mm_node **node)
{
struct ttm_bo_global *glob = bo->glob;
unsigned long lpfn;
int ret;
kref_put(&entry->list_kref, ttm_bo_release_list); lpfn = placement->lpfn;
if (ret) if (!lpfn)
lpfn = man->size;
*node = NULL;
do {
ret = drm_mm_pre_get(&man->manager);
if (unlikely(ret))
return ret; return ret;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
} while (1); *node = drm_mm_search_free_in_range(&man->manager,
mem->num_pages, mem->page_alignment,
if (!node) { placement->fpfn, lpfn, 1);
if (unlikely(*node == NULL)) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
return -ENOMEM; return 0;
} }
*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&glob->lru_lock);
} while (*node == NULL);
return 0;
}
node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); /**
if (unlikely(!node)) { * Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct drm_mm_node *node;
int ret;
do {
ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
if (unlikely(ret != 0))
return ret;
if (node)
break;
spin_lock(&glob->lru_lock);
if (list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
goto retry_pre_get; break;
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
no_wait);
if (unlikely(ret != 0))
return ret;
} while (1);
if (node == NULL)
return -ENOMEM;
mem->mm_node = node; mem->mm_node = node;
mem->mem_type = mem_type; mem->mem_type = mem_type;
return 0; return 0;
...@@ -725,7 +801,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, ...@@ -725,7 +801,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
return result; return result;
} }
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
bool disallow_fixed, bool disallow_fixed,
uint32_t mem_type, uint32_t mem_type,
...@@ -758,33 +833,32 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, ...@@ -758,33 +833,32 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
* space. * space.
*/ */
int ttm_bo_mem_space(struct ttm_buffer_object *bo, int ttm_bo_mem_space(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
uint32_t num_prios = bdev->driver->num_mem_type_prio;
const uint32_t *prios = bdev->driver->mem_type_prio;
uint32_t i;
uint32_t mem_type = TTM_PL_SYSTEM; uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0; uint32_t cur_flags = 0;
bool type_found = false; bool type_found = false;
bool type_ok = false; bool type_ok = false;
bool has_eagain = false; bool has_erestartsys = false;
struct drm_mm_node *node = NULL; struct drm_mm_node *node = NULL;
int ret; int i, ret;
mem->mm_node = NULL; mem->mm_node = NULL;
for (i = 0; i < num_prios; ++i) { for (i = 0; i <= placement->num_placement; ++i) {
mem_type = prios[i]; ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type]; man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man, type_ok = ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user, bo->type == ttm_bo_type_user,
mem_type, proposed_placement, mem_type,
placement->placement[i],
&cur_flags); &cur_flags);
if (!type_ok) if (!type_ok)
...@@ -792,32 +866,22 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -792,32 +866,22 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags); cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, placement->placement[i],
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM) if (mem_type == TTM_PL_SYSTEM)
break; break;
if (man->has_type && man->use_type) { if (man->has_type && man->use_type) {
type_found = true; type_found = true;
do { ret = ttm_bo_man_get_node(bo, man, placement, mem,
ret = drm_mm_pre_get(&man->manager); &node);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
spin_lock(&glob->lru_lock);
node = drm_mm_search_free(&man->manager,
mem->num_pages,
mem->page_alignment,
1);
if (unlikely(!node)) {
spin_unlock(&glob->lru_lock);
break;
}
node = drm_mm_get_block_atomic(node,
mem->num_pages,
mem->
page_alignment);
spin_unlock(&glob->lru_lock);
} while (!node);
} }
if (node) if (node)
break; break;
...@@ -827,66 +891,64 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -827,66 +891,64 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node; mem->mm_node = node;
mem->mem_type = mem_type; mem->mem_type = mem_type;
mem->placement = cur_flags; mem->placement = cur_flags;
if (node)
node->private = bo;
return 0; return 0;
} }
if (!type_found) if (!type_found)
return -EINVAL; return -EINVAL;
num_prios = bdev->driver->num_mem_busy_prio; for (i = 0; i <= placement->num_busy_placement; ++i) {
prios = bdev->driver->mem_busy_prio; ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
for (i = 0; i < num_prios; ++i) { if (ret)
mem_type = prios[i]; return ret;
man = &bdev->man[mem_type]; man = &bdev->man[mem_type];
if (!man->has_type) if (!man->has_type)
continue; continue;
if (!ttm_bo_mt_compatible(man, if (!ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user, bo->type == ttm_bo_type_user,
mem_type, mem_type,
proposed_placement, &cur_flags)) placement->placement[i],
&cur_flags))
continue; continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags); cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, placement->placement[i],
~TTM_PL_MASK_MEMTYPE);
ret = ttm_bo_mem_force_space(bdev, mem, mem_type, ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait); interruptible, no_wait);
if (ret == 0 && mem->mm_node) { if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags; mem->placement = cur_flags;
mem->mm_node->private = bo;
return 0; return 0;
} }
if (ret == -ERESTARTSYS)
if (ret == -ERESTART) has_erestartsys = true;
has_eagain = true;
} }
ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
ret = (has_eagain) ? -ERESTART : -ENOMEM;
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_bo_mem_space); EXPORT_SYMBOL(ttm_bo_mem_space);
int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
{ {
int ret = 0;
if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
return -EBUSY; return -EBUSY;
ret = wait_event_interruptible(bo->event_queue, return wait_event_interruptible(bo->event_queue,
atomic_read(&bo->cpu_writers) == 0); atomic_read(&bo->cpu_writers) == 0);
if (ret == -ERESTARTSYS)
ret = -ERESTART;
return ret;
} }
int ttm_bo_move_buffer(struct ttm_buffer_object *bo, int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
...@@ -900,101 +962,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -900,101 +962,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary, * Have the driver move function wait for idle when necessary,
* instead of doing it here. * instead of doing it here.
*/ */
spin_lock(&bo->lock); spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait); ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
if (ret) if (ret)
return ret; return ret;
mem.num_pages = bo->num_pages; mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT; mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment; mem.page_alignment = bo->mem.page_alignment;
/* /*
* Determine where to move the buffer. * Determine where to move the buffer.
*/ */
ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
interruptible, no_wait);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
out_unlock: out_unlock:
if (ret && mem.mm_node) { if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node); drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
} }
return ret; return ret;
} }
static int ttm_bo_mem_compat(uint32_t proposed_placement, static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) int i;
return 0;
if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
return 0;
return 1; for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement &
TTM_PL_MASK_CACHING) &&
(placement->placement[i] & mem->placement &
TTM_PL_MASK_MEM))
return i;
}
return -1;
} }
int ttm_buffer_object_validate(struct ttm_buffer_object *bo, int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
int ret; int ret;
BUG_ON(!atomic_read(&bo->reserved)); BUG_ON(!atomic_read(&bo->reserved));
bo->proposed_placement = proposed_placement; /* Check that range is valid */
if (placement->lpfn || placement->fpfn)
TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", if (placement->fpfn > placement->lpfn ||
(unsigned long)proposed_placement, (placement->lpfn - placement->fpfn) < bo->num_pages)
(unsigned long)bo->mem.placement); return -EINVAL;
/* /*
* Check whether we need to move buffer. * Check whether we need to move buffer.
*/ */
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { if (ret < 0) {
ret = ttm_bo_move_buffer(bo, bo->proposed_placement, ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
interruptible, no_wait); if (ret)
if (ret) {
if (ret != -ERESTART)
printk(KERN_ERR TTM_PFX
"Failed moving buffer. "
"Proposed placement 0x%08x\n",
bo->proposed_placement);
if (ret == -ENOMEM)
printk(KERN_ERR TTM_PFX
"Out of aperture space or "
"DRM memory quota.\n");
return ret; return ret;
} else {
/*
* Use the access and other non-mapping-related flag bits from
* the compatible memory placement flags to the active flags
*/
ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
~TTM_PL_MASK_MEMTYPE);
} }
}
/* /*
* We might need to add a TTM. * We might need to add a TTM.
*/ */
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, true); ret = ttm_bo_add_ttm(bo, true);
if (ret) if (ret)
return ret; return ret;
} }
/*
* Validation has succeeded, move the access and other
* non-mapping-related flag bits from the proposed flags to
* the active flags
*/
ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
~TTM_PL_MASK_MEMTYPE);
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_buffer_object_validate); EXPORT_SYMBOL(ttm_buffer_object_validate);
...@@ -1042,8 +1085,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -1042,8 +1085,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
size_t acc_size, size_t acc_size,
void (*destroy) (struct ttm_buffer_object *)) void (*destroy) (struct ttm_buffer_object *))
{ {
int ret = 0; int i, c, ret = 0;
unsigned long num_pages; unsigned long num_pages;
uint32_t placements[8];
struct ttm_placement placement;
size += buffer_start & ~PAGE_MASK; size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
...@@ -1100,7 +1145,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -1100,7 +1145,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
goto out_err; goto out_err;
} }
ret = ttm_buffer_object_validate(bo, flags, interruptible, false); placement.fpfn = 0;
placement.lpfn = 0;
for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
if (flags & (1 << i))
placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
placement.placement = placements;
placement.num_placement = c;
placement.busy_placement = placements;
placement.num_busy_placement = c;
ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -1135,8 +1189,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, ...@@ -1135,8 +1189,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo) struct ttm_buffer_object **p_bo)
{ {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
int ret;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
int ret;
size_t acc_size = size_t acc_size =
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
...@@ -1161,66 +1215,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, ...@@ -1161,66 +1215,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
return ret; return ret;
} }
static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
uint32_t mem_type, bool allow_errors)
{
int ret;
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bo->lock);
if (ret && allow_errors)
goto out;
if (bo->mem.mem_type == mem_type)
ret = ttm_bo_evict(bo, mem_type, false, false);
if (ret) {
if (allow_errors) {
goto out;
} else {
ret = 0;
printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
}
}
out:
return ret;
}
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
struct list_head *head,
unsigned mem_type, bool allow_errors) unsigned mem_type, bool allow_errors)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct ttm_buffer_object *entry;
int ret; int ret;
int put_count;
/* /*
* Can't use standard list traversal since we're unlocking. * Can't use standard list traversal since we're unlocking.
*/ */
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
while (!list_empty(head)) {
entry = list_first_entry(head, struct ttm_buffer_object, lru);
kref_get(&entry->list_kref);
ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
put_count = ttm_bo_del_from_lru(entry);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
while (put_count--) ret = ttm_mem_evict_first(bdev, mem_type, false, false);
kref_put(&entry->list_kref, ttm_bo_ref_bug); if (ret) {
BUG_ON(ret); if (allow_errors) {
ret = ttm_bo_leave_list(entry, mem_type, allow_errors); return ret;
ttm_bo_unreserve(entry); } else {
kref_put(&entry->list_kref, ttm_bo_release_list); printk(KERN_ERR TTM_PFX
"Cleanup eviction failed\n");
}
}
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
return 0; return 0;
} }
...@@ -1247,7 +1267,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ...@@ -1247,7 +1267,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0; ret = 0;
if (mem_type > 0) { if (mem_type > 0) {
ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager)) if (drm_mm_clean(&man->manager))
...@@ -1280,12 +1300,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) ...@@ -1280,12 +1300,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0; return 0;
} }
return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); return ttm_bo_force_list_clean(bdev, mem_type, true);
} }
EXPORT_SYMBOL(ttm_bo_evict_mm); EXPORT_SYMBOL(ttm_bo_evict_mm);
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_offset, unsigned long p_size) unsigned long p_size)
{ {
int ret = -EINVAL; int ret = -EINVAL;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
...@@ -1315,7 +1335,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ...@@ -1315,7 +1335,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type); type);
return ret; return ret;
} }
ret = drm_mm_init(&man->manager, p_offset, p_size); ret = drm_mm_init(&man->manager, 0, p_size);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1464,7 +1484,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, ...@@ -1464,7 +1484,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
* Initialize the system memory buffer type. * Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized. * Other types need to be driver / IOCTL initialized.
*/ */
ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_sys; goto out_no_sys;
...@@ -1694,7 +1714,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, ...@@ -1694,7 +1714,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
ret = wait_event_interruptible ret = wait_event_interruptible
(bo->event_queue, atomic_read(&bo->reserved) == 0); (bo->event_queue, atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return -ERESTART; return ret;
} else { } else {
wait_event(bo->event_queue, wait_event(bo->event_queue,
atomic_read(&bo->reserved) == 0); atomic_read(&bo->reserved) == 0);
......
...@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_wait(bo, false, true, false); ret = ttm_bo_wait(bo, false, true, false);
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
retval = (ret != -ERESTART) ? retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock; goto out_unlock;
} }
...@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, ...@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
switch (ret) { switch (ret) {
case 0: case 0:
break; break;
case -ERESTART:
ret = -EINTR;
goto out_unref;
case -EBUSY: case -EBUSY:
ret = -EAGAIN; ret = -EAGAIN;
goto out_unref; goto out_unref;
...@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, ...@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
switch (ret) { switch (ret) {
case 0: case 0:
break; break;
case -ERESTART:
return -EINTR;
case -EBUSY: case -EBUSY:
return -EAGAIN; return -EAGAIN;
default: default:
......
...@@ -323,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, ...@@ -323,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
* No special dma32 zone needed. * No special dma32 zone needed.
*/ */
if (mem <= ((uint64_t) 1ULL << 32)) if (mem <= ((uint64_t) 1ULL << 32)) {
kfree(zone);
return 0; return 0;
}
/* /*
* Limit max dma32 memory to 4GB for now * Limit max dma32 memory to 4GB for now
......
...@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, ...@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
int atomic); int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int atomic);
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size, unsigned long size,
unsigned alignment) unsigned alignment)
...@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa ...@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
{ {
return drm_mm_get_block_generic(parent, size, alignment, 1); return drm_mm_get_block_generic(parent, size, alignment, 1);
} }
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 1);
}
extern void drm_mm_put_block(struct drm_mm_node *cur); extern void drm_mm_put_block(struct drm_mm_node *cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
int best_match); int best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start, extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
unsigned long size); unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm); extern void drm_mm_takedown(struct drm_mm *mm);
...@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) ...@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm; return block->mm;
} }
extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
#endif #endif
......
...@@ -44,6 +44,29 @@ struct ttm_bo_device; ...@@ -44,6 +44,29 @@ struct ttm_bo_device;
struct drm_mm_node; struct drm_mm_node;
/**
* struct ttm_placement
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
* @num_placement: number of prefered placements
* @placement: prefered placements
* @num_busy_placement: number of prefered placements when need to evict buffer
* @busy_placement: prefered placements when need to evict buffer
*
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
unsigned fpfn;
unsigned lpfn;
unsigned num_placement;
const uint32_t *placement;
unsigned num_busy_placement;
const uint32_t *busy_placement;
};
/** /**
* struct ttm_mem_reg * struct ttm_mem_reg
* *
...@@ -109,10 +132,6 @@ struct ttm_tt; ...@@ -109,10 +132,6 @@ struct ttm_tt;
* the object is destroyed. * the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change. * @event_queue: Queue for processes waiting on buffer object status change.
* @lock: spinlock protecting mostly synchronization members. * @lock: spinlock protecting mostly synchronization members.
* @proposed_placement: Proposed placement for the buffer. Changed only by the
* creator prior to validation as opposed to bo->mem.proposed_flags which is
* changed by the implementation prior to a buffer move if it wants to outsmart
* the buffer creator / user. This latter happens, for example, at eviction.
* @mem: structure describing current placement. * @mem: structure describing current placement.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers * @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member * pinned in physical memory. If this behaviour is not desired, this member
...@@ -177,7 +196,6 @@ struct ttm_buffer_object { ...@@ -177,7 +196,6 @@ struct ttm_buffer_object {
* Members protected by the bo::reserved lock. * Members protected by the bo::reserved lock.
*/ */
uint32_t proposed_placement;
struct ttm_mem_reg mem; struct ttm_mem_reg mem;
struct file *persistant_swap_storage; struct file *persistant_swap_storage;
struct ttm_tt *ttm; struct ttm_tt *ttm;
...@@ -285,7 +303,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) ...@@ -285,7 +303,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
* Note: It might be necessary to block validations before the * Note: It might be necessary to block validations before the
* wait by reserving the buffer. * wait by reserving the buffer.
* Returns -EBUSY if no_wait is true and the buffer is busy. * Returns -EBUSY if no_wait is true and the buffer is busy.
* Returns -ERESTART if interrupted by a signal. * Returns -ERESTARTSYS if interrupted by a signal.
*/ */
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
...@@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, ...@@ -293,21 +311,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* ttm_buffer_object_validate * ttm_buffer_object_validate
* *
* @bo: The buffer object. * @bo: The buffer object.
* @proposed_placement: Proposed_placement for the buffer object. * @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping. * @interruptible: Sleep interruptible if sleeping.
* @no_wait: Return immediately if the buffer is busy. * @no_wait: Return immediately if the buffer is busy.
* *
* Changes placement and caching policy of the buffer object * Changes placement and caching policy of the buffer object
* according to bo::proposed_flags. * according proposed placement.
* Returns * Returns
* -EINVAL on invalid proposed_flags. * -EINVAL on invalid proposed placement.
* -ENOMEM on out-of-memory condition. * -ENOMEM on out-of-memory condition.
* -EBUSY if no_wait is true and buffer busy. * -EBUSY if no_wait is true and buffer busy.
* -ERESTART if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
/** /**
* ttm_bo_unref * ttm_bo_unref
* *
...@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo); ...@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
* waiting for buffer idle. This lock is recursive. * waiting for buffer idle. This lock is recursive.
* Returns * Returns
* -EBUSY if the buffer is busy and no_wait is true. * -EBUSY if the buffer is busy and no_wait is true.
* -ERESTART if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
extern int extern int
...@@ -371,7 +390,7 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); ...@@ -371,7 +390,7 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
* Returns * Returns
* -ENOMEM: Out of memory. * -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags. * -EINVAL: Invalid placement flags.
* -ERESTART: Interrupted by signal while sleeping waiting for resources. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/ */
extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
...@@ -411,7 +430,7 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -411,7 +430,7 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
* Returns * Returns
* -ENOMEM: Out of memory. * -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags. * -EINVAL: Invalid placement flags.
* -ERESTART: Interrupted by signal while waiting for resources. * -ERESTARTSYS: Interrupted by signal while waiting for resources.
*/ */
extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
...@@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, ...@@ -445,7 +464,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
* *
* @bdev: Pointer to a ttm_bo_device struct. * @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type. * @mem_type: The memory type.
* @p_offset: offset for managed area in pages.
* @p_size: size managed area in pages. * @p_size: size managed area in pages.
* *
* Initialize a manager for a given memory type. * Initialize a manager for a given memory type.
...@@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, ...@@ -458,7 +476,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
*/ */
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_offset, unsigned long p_size); unsigned long p_size);
/** /**
* ttm_bo_clean_mm * ttm_bo_clean_mm
* *
...@@ -503,7 +521,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); ...@@ -503,7 +521,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
* *
* Returns: * Returns:
* -EINVAL: Invalid or uninitialized memory type. * -EINVAL: Invalid or uninitialized memory type.
* -ERESTART: The call was interrupted by a signal while waiting to * -ERESTARTSYS: The call was interrupted by a signal while waiting to
* evict a buffer. * evict a buffer.
*/ */
...@@ -606,7 +624,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, ...@@ -606,7 +624,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* be called from the fops::read and fops::write method. * be called from the fops::read and fops::write method.
* Returns: * Returns:
* See man (2) write, man(2) read. In particular, * See man (2) write, man(2) read. In particular,
* the function may return -EINTR if * the function may return -ERESTARTSYS if
* interrupted by a signal. * interrupted by a signal.
*/ */
......
...@@ -242,12 +242,6 @@ struct ttm_mem_type_manager { ...@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
/** /**
* struct ttm_bo_driver * struct ttm_bo_driver
* *
* @mem_type_prio: Priority array of memory types to place a buffer object in
* if it fits without evicting buffers from any of these memory types.
* @mem_busy_prio: Priority array of memory types to place a buffer object in
* if it needs to evict buffers to make room.
* @num_mem_type_prio: Number of elements in the @mem_type_prio array.
* @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
* @create_ttm_backend_entry: Callback to create a struct ttm_backend. * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
* @invalidate_caches: Callback to invalidate read caches when a buffer object * @invalidate_caches: Callback to invalidate read caches when a buffer object
* has been evicted. * has been evicted.
...@@ -265,11 +259,6 @@ struct ttm_mem_type_manager { ...@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
*/ */
struct ttm_bo_driver { struct ttm_bo_driver {
const uint32_t *mem_type_prio;
const uint32_t *mem_busy_prio;
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
/** /**
* struct ttm_bo_driver member create_ttm_backend_entry * struct ttm_bo_driver member create_ttm_backend_entry
* *
...@@ -306,7 +295,8 @@ struct ttm_bo_driver { ...@@ -306,7 +295,8 @@ struct ttm_bo_driver {
* finished, they'll end up in bo->mem.flags * finished, they'll end up in bo->mem.flags
*/ */
uint32_t(*evict_flags) (struct ttm_buffer_object *bo); void(*evict_flags) (struct ttm_buffer_object *bo,
struct ttm_placement *placement);
/** /**
* struct ttm_bo_driver member move: * struct ttm_bo_driver member move:
* *
...@@ -648,10 +638,10 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, ...@@ -648,10 +638,10 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* -EBUSY: No space available (only if no_wait == 1). * -EBUSY: No space available (only if no_wait == 1).
* -ENOMEM: Could not allocate memory for the buffer object, either due to * -ENOMEM: Could not allocate memory for the buffer object, either due to
* fragmentation or concurrent allocators. * fragmentation or concurrent allocators.
* -ERESTART: An interruptible sleep was interrupted by a signal. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/ */
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
uint32_t proposed_placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
/** /**
...@@ -663,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -663,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Wait until a buffer object is no longer sync'ed for CPU access. * Wait until a buffer object is no longer sync'ed for CPU access.
* Returns: * Returns:
* -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
* -ERESTART: An interruptible sleep was interrupted by a signal. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/ */
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
...@@ -767,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); ...@@ -767,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
* -EAGAIN: The reservation may cause a deadlock. * -EAGAIN: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and * Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1). * try again. (only if use_sequence == 1).
* -ERESTART: A wait for the buffer to become unreserved was interrupted by * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space. * a signal. Release all buffer reservations and return to user-space.
*/ */
extern int ttm_bo_reserve(struct ttm_buffer_object *bo, extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
...@@ -808,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, ...@@ -808,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
* *
* Returns: * Returns:
* -EBUSY: If no_wait == 1 and the buffer is already reserved. * -EBUSY: If no_wait == 1 and the buffer is already reserved.
* -ERESTART: If interruptible == 1 and the process received a signal * -ERESTARTSYS: If interruptible == 1 and the process received a signal
* while sleeping. * while sleeping.
*/ */
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment