Commit eb7caf84 authored by Oded Gabbay's avatar Oded Gabbay

habanalabs: maintain a list of file private data objects

This patch adds a new list to the driver's device structure. The list will
keep the file private data structures that the driver creates when a user
process opens the device.

This change is needed because it is useless to try to count how many FD
are open. Instead, track our own private data structure per open file and
once it is released, remove it from the list. As long as the list is not
empty, it means we have a user that can do something with our device.
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 86d5307a
...@@ -42,10 +42,12 @@ static void hpriv_release(struct kref *ref) ...@@ -42,10 +42,12 @@ static void hpriv_release(struct kref *ref)
{ {
struct hl_fpriv *hpriv; struct hl_fpriv *hpriv;
struct hl_device *hdev; struct hl_device *hdev;
struct hl_ctx *ctx;
hpriv = container_of(ref, struct hl_fpriv, refcount); hpriv = container_of(ref, struct hl_fpriv, refcount);
hdev = hpriv->hdev; hdev = hpriv->hdev;
ctx = hpriv->ctx;
put_pid(hpriv->taskpid); put_pid(hpriv->taskpid);
...@@ -53,13 +55,12 @@ static void hpriv_release(struct kref *ref) ...@@ -53,13 +55,12 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->restore_phase_mutex); mutex_destroy(&hpriv->restore_phase_mutex);
kfree(hpriv); mutex_lock(&hdev->fpriv_list_lock);
list_del(&hpriv->dev_node);
/* Now the FD is really closed */
atomic_dec(&hdev->fd_open_cnt);
/* This allows a new user context to open the device */
hdev->compute_ctx = NULL; hdev->compute_ctx = NULL;
mutex_unlock(&hdev->fpriv_list_lock);
kfree(hpriv);
} }
void hl_hpriv_get(struct hl_fpriv *hpriv) void hl_hpriv_get(struct hl_fpriv *hpriv)
...@@ -229,14 +230,14 @@ static int device_early_init(struct hl_device *hdev) ...@@ -229,14 +230,14 @@ static int device_early_init(struct hl_device *hdev)
hl_cb_mgr_init(&hdev->kernel_cb_mgr); hl_cb_mgr_init(&hdev->kernel_cb_mgr);
mutex_init(&hdev->fd_open_cnt_lock);
mutex_init(&hdev->send_cpu_message_lock); mutex_init(&hdev->send_cpu_message_lock);
mutex_init(&hdev->debug_lock); mutex_init(&hdev->debug_lock);
mutex_init(&hdev->mmu_cache_lock); mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list); INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
spin_lock_init(&hdev->hw_queues_mirror_lock); spin_lock_init(&hdev->hw_queues_mirror_lock);
INIT_LIST_HEAD(&hdev->fpriv_list);
mutex_init(&hdev->fpriv_list_lock);
atomic_set(&hdev->in_reset, 0); atomic_set(&hdev->in_reset, 0);
atomic_set(&hdev->fd_open_cnt, 0);
atomic_set(&hdev->cs_active_cnt, 0); atomic_set(&hdev->cs_active_cnt, 0);
return 0; return 0;
...@@ -266,6 +267,8 @@ static void device_early_fini(struct hl_device *hdev) ...@@ -266,6 +267,8 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->debug_lock); mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock); mutex_destroy(&hdev->send_cpu_message_lock);
mutex_destroy(&hdev->fpriv_list_lock);
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
kfree(hdev->hl_chip_info); kfree(hdev->hl_chip_info);
...@@ -277,8 +280,6 @@ static void device_early_fini(struct hl_device *hdev) ...@@ -277,8 +280,6 @@ static void device_early_fini(struct hl_device *hdev)
if (hdev->asic_funcs->early_fini) if (hdev->asic_funcs->early_fini)
hdev->asic_funcs->early_fini(hdev); hdev->asic_funcs->early_fini(hdev);
mutex_destroy(&hdev->fd_open_cnt_lock);
} }
static void set_freq_to_low_job(struct work_struct *work) static void set_freq_to_low_job(struct work_struct *work)
...@@ -286,9 +287,13 @@ static void set_freq_to_low_job(struct work_struct *work) ...@@ -286,9 +287,13 @@ static void set_freq_to_low_job(struct work_struct *work)
struct hl_device *hdev = container_of(work, struct hl_device, struct hl_device *hdev = container_of(work, struct hl_device,
work_freq.work); work_freq.work);
if (atomic_read(&hdev->fd_open_cnt) == 0) mutex_lock(&hdev->fpriv_list_lock);
if (!hdev->compute_ctx)
hl_device_set_frequency(hdev, PLL_LOW); hl_device_set_frequency(hdev, PLL_LOW);
mutex_unlock(&hdev->fpriv_list_lock);
schedule_delayed_work(&hdev->work_freq, schedule_delayed_work(&hdev->work_freq,
usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
} }
...@@ -338,7 +343,7 @@ static int device_late_init(struct hl_device *hdev) ...@@ -338,7 +343,7 @@ static int device_late_init(struct hl_device *hdev)
hdev->high_pll = hdev->asic_prop.high_pll; hdev->high_pll = hdev->asic_prop.high_pll;
/* force setting to low frequency */ /* force setting to low frequency */
atomic_set(&hdev->curr_pll_profile, PLL_LOW); hdev->curr_pll_profile = PLL_LOW;
if (hdev->pm_mng_profile == PM_AUTO) if (hdev->pm_mng_profile == PM_AUTO)
hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW); hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
...@@ -387,38 +392,26 @@ static void device_late_fini(struct hl_device *hdev) ...@@ -387,38 +392,26 @@ static void device_late_fini(struct hl_device *hdev)
* @hdev: pointer to habanalabs device structure * @hdev: pointer to habanalabs device structure
* @freq: the new frequency value * @freq: the new frequency value
* *
* Change the frequency if needed. * Change the frequency if needed. This function has no protection against
* We allose to set PLL to low only if there is no user process * concurrency, therefore it is assumed that the calling function has protected
* Returns 0 if no change was done, otherwise returns 1; * itself against the case of calling this function from multiple threads with
* different values
*
* Returns 0 if no change was done, otherwise returns 1
*/ */
int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq) int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
{ {
enum hl_pll_frequency old_freq = if ((hdev->pm_mng_profile == PM_MANUAL) ||
(freq == PLL_HIGH) ? PLL_LOW : PLL_HIGH; (hdev->curr_pll_profile == freq))
int ret;
if (hdev->pm_mng_profile == PM_MANUAL)
return 0;
ret = atomic_cmpxchg(&hdev->curr_pll_profile, old_freq, freq);
if (ret == freq)
return 0;
/*
* in case we want to lower frequency, check if device is not
* opened. We must have a check here to workaround race condition with
* hl_device_open
*/
if ((freq == PLL_LOW) && (atomic_read(&hdev->fd_open_cnt) > 0)) {
atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
return 0; return 0;
}
dev_dbg(hdev->dev, "Changing device frequency to %s\n", dev_dbg(hdev->dev, "Changing device frequency to %s\n",
freq == PLL_HIGH ? "high" : "low"); freq == PLL_HIGH ? "high" : "low");
hdev->asic_funcs->set_pll_profile(hdev, freq); hdev->asic_funcs->set_pll_profile(hdev, freq);
hdev->curr_pll_profile = freq;
return 1; return 1;
} }
...@@ -449,19 +442,8 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) ...@@ -449,19 +442,8 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
goto out; goto out;
} }
mutex_lock(&hdev->fd_open_cnt_lock);
if (atomic_read(&hdev->fd_open_cnt) > 1) {
dev_err(hdev->dev,
"Failed to enable debug mode. More then a single user is using the device\n");
rc = -EPERM;
goto unlock_fd_open_lock;
}
hdev->in_debug = 1; hdev->in_debug = 1;
unlock_fd_open_lock:
mutex_unlock(&hdev->fd_open_cnt_lock);
out: out:
mutex_unlock(&hdev->debug_lock); mutex_unlock(&hdev->debug_lock);
...@@ -568,6 +550,7 @@ int hl_device_resume(struct hl_device *hdev) ...@@ -568,6 +550,7 @@ int hl_device_resume(struct hl_device *hdev)
static void device_kill_open_processes(struct hl_device *hdev) static void device_kill_open_processes(struct hl_device *hdev)
{ {
u16 pending_total, pending_cnt; u16 pending_total, pending_cnt;
struct hl_fpriv *hpriv;
struct task_struct *task = NULL; struct task_struct *task = NULL;
if (hdev->pldm) if (hdev->pldm)
...@@ -575,32 +558,30 @@ static void device_kill_open_processes(struct hl_device *hdev) ...@@ -575,32 +558,30 @@ static void device_kill_open_processes(struct hl_device *hdev)
else else
pending_total = HL_PENDING_RESET_PER_SEC; pending_total = HL_PENDING_RESET_PER_SEC;
pending_cnt = pending_total; /* Giving time for user to close FD, and for processes that are inside
* hl_device_open to finish
/* Flush all processes that are inside hl_open */ */
mutex_lock(&hdev->fd_open_cnt_lock); if (!list_empty(&hdev->fpriv_list))
while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
pending_cnt--;
dev_info(hdev->dev,
"Can't HARD reset, waiting for user to close FD\n");
ssleep(1); ssleep(1);
}
if (atomic_read(&hdev->fd_open_cnt)) { mutex_lock(&hdev->fpriv_list_lock);
task = get_pid_task(hdev->compute_ctx->hpriv->taskpid,
PIDTYPE_PID); /* This section must be protected because we are dereferencing
* pointers that are freed if the process exits
*/
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
if (task) { if (task) {
dev_info(hdev->dev, "Killing user processes\n"); dev_info(hdev->dev, "Killing user process\n");
send_sig(SIGKILL, task, 1); send_sig(SIGKILL, task, 1);
msleep(100); usleep_range(1000, 10000);
put_task_struct(task); put_task_struct(task);
} }
} }
mutex_unlock(&hdev->fpriv_list_lock);
/* We killed the open users, but because the driver cleans up after the /* We killed the open users, but because the driver cleans up after the
* user contexts are closed (e.g. mmu mappings), we need to wait again * user contexts are closed (e.g. mmu mappings), we need to wait again
* to make sure the cleaning phase is finished before continuing with * to make sure the cleaning phase is finished before continuing with
...@@ -609,19 +590,18 @@ static void device_kill_open_processes(struct hl_device *hdev) ...@@ -609,19 +590,18 @@ static void device_kill_open_processes(struct hl_device *hdev)
pending_cnt = pending_total; pending_cnt = pending_total;
while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) { while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
dev_info(hdev->dev,
"Waiting for all unmap operations to finish before hard reset\n");
pending_cnt--; pending_cnt--;
ssleep(1); ssleep(1);
} }
if (atomic_read(&hdev->fd_open_cnt)) if (!list_empty(&hdev->fpriv_list))
dev_crit(hdev->dev, dev_crit(hdev->dev,
"Going to hard reset with open user contexts\n"); "Going to hard reset with open user contexts\n");
mutex_unlock(&hdev->fd_open_cnt_lock);
} }
static void device_hard_reset_pending(struct work_struct *work) static void device_hard_reset_pending(struct work_struct *work)
...@@ -677,13 +657,16 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, ...@@ -677,13 +657,16 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
/* This also blocks future CS/VM/JOB completion operations */ /* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true; hdev->disabled = true;
/* /* Flush anyone that is inside the critical section of enqueue
* Flush anyone that is inside the critical section of enqueue
* jobs to the H/W * jobs to the H/W
*/ */
hdev->asic_funcs->hw_queues_lock(hdev); hdev->asic_funcs->hw_queues_lock(hdev);
hdev->asic_funcs->hw_queues_unlock(hdev); hdev->asic_funcs->hw_queues_unlock(hdev);
/* Flush anyone that is inside device open */
mutex_lock(&hdev->fpriv_list_lock);
mutex_unlock(&hdev->fpriv_list_lock);
dev_err(hdev->dev, "Going to RESET device!\n"); dev_err(hdev->dev, "Going to RESET device!\n");
} }
...@@ -759,12 +742,16 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, ...@@ -759,12 +742,16 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]); hl_cq_reset(hdev, &hdev->completion_queue[i]);
mutex_lock(&hdev->fpriv_list_lock);
/* Make sure the context switch phase will run again */ /* Make sure the context switch phase will run again */
if (hdev->compute_ctx) { if (hdev->compute_ctx) {
atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1); atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
hdev->compute_ctx->thread_ctx_switch_wait_token = 0; hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
} }
mutex_unlock(&hdev->fpriv_list_lock);
/* Finished tear-down, starting to re-initialize */ /* Finished tear-down, starting to re-initialize */
if (hard_reset) { if (hard_reset) {
...@@ -1125,13 +1112,16 @@ void hl_device_fini(struct hl_device *hdev) ...@@ -1125,13 +1112,16 @@ void hl_device_fini(struct hl_device *hdev)
/* Mark device as disabled */ /* Mark device as disabled */
hdev->disabled = true; hdev->disabled = true;
/* /* Flush anyone that is inside the critical section of enqueue
* Flush anyone that is inside the critical section of enqueue
* jobs to the H/W * jobs to the H/W
*/ */
hdev->asic_funcs->hw_queues_lock(hdev); hdev->asic_funcs->hw_queues_lock(hdev);
hdev->asic_funcs->hw_queues_unlock(hdev); hdev->asic_funcs->hw_queues_unlock(hdev);
/* Flush anyone that is inside device open */
mutex_lock(&hdev->fpriv_list_lock);
mutex_unlock(&hdev->fpriv_list_lock);
hdev->hard_reset_pending = true; hdev->hard_reset_pending = true;
hl_hwmon_fini(hdev); hl_hwmon_fini(hdev);
......
...@@ -254,11 +254,11 @@ static ssize_t pm_mng_profile_store(struct device *dev, ...@@ -254,11 +254,11 @@ static ssize_t pm_mng_profile_store(struct device *dev,
goto out; goto out;
} }
mutex_lock(&hdev->fd_open_cnt_lock); mutex_lock(&hdev->fpriv_list_lock);
if (atomic_read(&hdev->fd_open_cnt) > 0) { if (hdev->compute_ctx) {
dev_err(hdev->dev, dev_err(hdev->dev,
"Can't change PM profile while user process is opened on the device\n"); "Can't change PM profile while compute context is opened on the device\n");
count = -EPERM; count = -EPERM;
goto unlock_mutex; goto unlock_mutex;
} }
...@@ -266,24 +266,35 @@ static ssize_t pm_mng_profile_store(struct device *dev, ...@@ -266,24 +266,35 @@ static ssize_t pm_mng_profile_store(struct device *dev,
if (strncmp("auto", buf, strlen("auto")) == 0) { if (strncmp("auto", buf, strlen("auto")) == 0) {
/* Make sure we are in LOW PLL when changing modes */ /* Make sure we are in LOW PLL when changing modes */
if (hdev->pm_mng_profile == PM_MANUAL) { if (hdev->pm_mng_profile == PM_MANUAL) {
atomic_set(&hdev->curr_pll_profile, PLL_HIGH); hdev->curr_pll_profile = PLL_HIGH;
hl_device_set_frequency(hdev, PLL_LOW); hl_device_set_frequency(hdev, PLL_LOW);
hdev->pm_mng_profile = PM_AUTO; hdev->pm_mng_profile = PM_AUTO;
} }
} else if (strncmp("manual", buf, strlen("manual")) == 0) { } else if (strncmp("manual", buf, strlen("manual")) == 0) {
/* Make sure we are in LOW PLL when changing modes */
if (hdev->pm_mng_profile == PM_AUTO) { if (hdev->pm_mng_profile == PM_AUTO) {
flush_delayed_work(&hdev->work_freq); /* Must release the lock because the work thread also
* takes this lock. But before we release it, set
* the mode to manual so nothing will change if a user
* suddenly opens the device
*/
hdev->pm_mng_profile = PM_MANUAL; hdev->pm_mng_profile = PM_MANUAL;
mutex_unlock(&hdev->fpriv_list_lock);
/* Flush the current work so we can return to the user
* knowing that he is the only one changing frequencies
*/
flush_delayed_work(&hdev->work_freq);
return count;
} }
} else { } else {
dev_err(hdev->dev, "value should be auto or manual\n"); dev_err(hdev->dev, "value should be auto or manual\n");
count = -EINVAL; count = -EINVAL;
goto unlock_mutex;
} }
unlock_mutex: unlock_mutex:
mutex_unlock(&hdev->fd_open_cnt_lock); mutex_unlock(&hdev->fpriv_list_lock);
out: out:
return count; return count;
} }
......
...@@ -914,6 +914,7 @@ struct hl_debug_params { ...@@ -914,6 +914,7 @@ struct hl_debug_params {
* @ctx_mgr: context manager to handle multiple context for this FD. * @ctx_mgr: context manager to handle multiple context for this FD.
* @cb_mgr: command buffer manager to handle multiple buffers for this FD. * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
* @debugfs_list: list of relevant ASIC debugfs. * @debugfs_list: list of relevant ASIC debugfs.
* @dev_node: node in the device list of file private data
* @refcount: number of related contexts. * @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase. * @restore_phase_mutex: lock for context switch and restore phase.
*/ */
...@@ -925,6 +926,7 @@ struct hl_fpriv { ...@@ -925,6 +926,7 @@ struct hl_fpriv {
struct hl_ctx_mgr ctx_mgr; struct hl_ctx_mgr ctx_mgr;
struct hl_cb_mgr cb_mgr; struct hl_cb_mgr cb_mgr;
struct list_head debugfs_list; struct list_head debugfs_list;
struct list_head dev_node;
struct kref refcount; struct kref refcount;
struct mutex restore_phase_mutex; struct mutex restore_phase_mutex;
}; };
...@@ -1178,12 +1180,6 @@ struct hl_device_reset_work { ...@@ -1178,12 +1180,6 @@ struct hl_device_reset_work {
* @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool. * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool.
* @asid_bitmap: holds used/available ASIDs. * @asid_bitmap: holds used/available ASIDs.
* @asid_mutex: protects asid_bitmap. * @asid_mutex: protects asid_bitmap.
* @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although
* fd_open_cnt is atomic, we need this lock to serialize
* the open function because the driver currently supports
* only a single process at a time. In addition, we need a
* lock here so we can flush user processes which are opening
* the device while we are trying to hard reset it
* @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue. * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
* @debug_lock: protects critical section of setting debug mode for device * @debug_lock: protects critical section of setting debug mode for device
* @asic_prop: ASIC specific immutable properties. * @asic_prop: ASIC specific immutable properties.
...@@ -1199,6 +1195,9 @@ struct hl_device_reset_work { ...@@ -1199,6 +1195,9 @@ struct hl_device_reset_work {
* @hl_debugfs: device's debugfs manager. * @hl_debugfs: device's debugfs manager.
* @cb_pool: list of preallocated CBs. * @cb_pool: list of preallocated CBs.
* @cb_pool_lock: protects the CB pool. * @cb_pool_lock: protects the CB pool.
* @fpriv_list: list of file private data structures. Each structure is created
* when a user opens the device
* @fpriv_list_lock: protects the fpriv_list
* @compute_ctx: current compute context executing. * @compute_ctx: current compute context executing.
* @dram_used_mem: current DRAM memory consumption. * @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value. * @timeout_jiffies: device CS timeout value.
...@@ -1206,10 +1205,9 @@ struct hl_device_reset_work { ...@@ -1206,10 +1205,9 @@ struct hl_device_reset_work {
* value is saved so in case of hard-reset, KMD will restore this * value is saved so in case of hard-reset, KMD will restore this
* value and update the F/W after the re-initialization * value and update the F/W after the re-initialization
* @in_reset: is device in reset flow. * @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @fd_open_cnt: number of open user processes.
* @cs_active_cnt: number of active command submissions on this device (active * @cs_active_cnt: number of active command submissions on this device (active
* means already in H/W queues) * means already in H/W queues)
* @curr_pll_profile: current PLL profile.
* @major: habanalabs KMD major. * @major: habanalabs KMD major.
* @high_pll: high PLL profile frequency. * @high_pll: high PLL profile frequency.
* @soft_reset_cnt: number of soft reset since KMD loading. * @soft_reset_cnt: number of soft reset since KMD loading.
...@@ -1228,7 +1226,7 @@ struct hl_device_reset_work { ...@@ -1228,7 +1226,7 @@ struct hl_device_reset_work {
* @mmu_enable: is MMU enabled. * @mmu_enable: is MMU enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts) * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device * @dma_mask: the dma mask that was set for this device
* @in_debug: is device under debug. This, together with fd_open_cnt, enforces * @in_debug: is device under debug. This, together with fpriv_list, enforces
* that only a single user is configuring the debug infrastructure. * that only a single user is configuring the debug infrastructure.
*/ */
struct hl_device { struct hl_device {
...@@ -1256,8 +1254,6 @@ struct hl_device { ...@@ -1256,8 +1254,6 @@ struct hl_device {
struct gen_pool *cpu_accessible_dma_pool; struct gen_pool *cpu_accessible_dma_pool;
unsigned long *asid_bitmap; unsigned long *asid_bitmap;
struct mutex asid_mutex; struct mutex asid_mutex;
/* TODO: remove fd_open_cnt_lock for multiple process support */
struct mutex fd_open_cnt_lock;
struct mutex send_cpu_message_lock; struct mutex send_cpu_message_lock;
struct mutex debug_lock; struct mutex debug_lock;
struct asic_fixed_properties asic_prop; struct asic_fixed_properties asic_prop;
...@@ -1276,15 +1272,17 @@ struct hl_device { ...@@ -1276,15 +1272,17 @@ struct hl_device {
struct list_head cb_pool; struct list_head cb_pool;
spinlock_t cb_pool_lock; spinlock_t cb_pool_lock;
struct list_head fpriv_list;
struct mutex fpriv_list_lock;
struct hl_ctx *compute_ctx; struct hl_ctx *compute_ctx;
atomic64_t dram_used_mem; atomic64_t dram_used_mem;
u64 timeout_jiffies; u64 timeout_jiffies;
u64 max_power; u64 max_power;
atomic_t in_reset; atomic_t in_reset;
atomic_t curr_pll_profile;
atomic_t fd_open_cnt;
atomic_t cs_active_cnt; atomic_t cs_active_cnt;
enum hl_pll_frequency curr_pll_profile;
u32 major; u32 major;
u32 high_pll; u32 high_pll;
u32 soft_reset_cnt; u32 soft_reset_cnt;
......
...@@ -95,80 +95,78 @@ int hl_device_open(struct inode *inode, struct file *filp) ...@@ -95,80 +95,78 @@ int hl_device_open(struct inode *inode, struct file *filp)
return -ENXIO; return -ENXIO;
} }
mutex_lock(&hdev->fd_open_cnt_lock); hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
mutex_init(&hpriv->restore_phase_mutex);
kref_init(&hpriv->refcount);
nonseekable_open(inode, filp);
hl_cb_mgr_init(&hpriv->cb_mgr);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
hpriv->taskpid = find_get_pid(current->pid);
mutex_lock(&hdev->fpriv_list_lock);
if (hl_device_disabled_or_in_reset(hdev)) { if (hl_device_disabled_or_in_reset(hdev)) {
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"Can't open %s because it is disabled or in reset\n", "Can't open %s because it is disabled or in reset\n",
dev_name(hdev->dev)); dev_name(hdev->dev));
mutex_unlock(&hdev->fd_open_cnt_lock); rc = -EPERM;
return -EPERM; goto out_err;
} }
if (hdev->in_debug) { if (hdev->in_debug) {
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"Can't open %s because it is being debugged by another user\n", "Can't open %s because it is being debugged by another user\n",
dev_name(hdev->dev)); dev_name(hdev->dev));
mutex_unlock(&hdev->fd_open_cnt_lock); rc = -EPERM;
return -EPERM; goto out_err;
} }
if (atomic_read(&hdev->fd_open_cnt)) { if (hdev->compute_ctx) {
dev_info_ratelimited(hdev->dev, dev_info_ratelimited(hdev->dev,
"Can't open %s because another user is working on it\n", "Can't open %s because another user is working on it\n",
dev_name(hdev->dev)); dev_name(hdev->dev));
mutex_unlock(&hdev->fd_open_cnt_lock); rc = -EBUSY;
return -EBUSY; goto out_err;
}
atomic_inc(&hdev->fd_open_cnt);
mutex_unlock(&hdev->fd_open_cnt_lock);
hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
if (!hpriv) {
rc = -ENOMEM;
goto close_device;
} }
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
mutex_init(&hpriv->restore_phase_mutex);
kref_init(&hpriv->refcount);
nonseekable_open(inode, filp);
hl_cb_mgr_init(&hpriv->cb_mgr);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
rc = hl_ctx_create(hdev, hpriv); rc = hl_ctx_create(hdev, hpriv);
if (rc) { if (rc) {
dev_err(hdev->dev, "Failed to open FD (CTX fail)\n"); dev_err(hdev->dev, "Failed to create context %d\n", rc);
goto out_err; goto out_err;
} }
hpriv->taskpid = find_get_pid(current->pid); /* Device is IDLE at this point so it is legal to change PLLs.
* There is no need to check anything because if the PLL is
/* * already HIGH, the set function will return without doing
* Device is IDLE at this point so it is legal to change PLLs. There * anything
* is no need to check anything because if the PLL is already HIGH, the
* set function will return without doing anything
*/ */
hl_device_set_frequency(hdev, PLL_HIGH); hl_device_set_frequency(hdev, PLL_HIGH);
list_add(&hpriv->dev_node, &hdev->fpriv_list);
mutex_unlock(&hdev->fpriv_list_lock);
hl_debugfs_add_file(hpriv); hl_debugfs_add_file(hpriv);
return 0; return 0;
out_err: out_err:
filp->private_data = NULL; mutex_unlock(&hdev->fpriv_list_lock);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
mutex_destroy(&hpriv->restore_phase_mutex); mutex_destroy(&hpriv->restore_phase_mutex);
kfree(hpriv); put_pid(hpriv->taskpid);
close_device: kfree(hpriv);
atomic_dec(&hdev->fd_open_cnt);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment