Commit 959ffef1 authored by Chaitanya Kulkarni's avatar Chaitanya Kulkarni Committed by Keith Busch

nvme-fabrics: open code __nvmf_host_find()

There is no point in maintaining a separate funciton __nvmf_host_find()
that has only one caller nvmf_host_add() especially when caller and
callee both are small enough to merge.

Due to this we are actually repeating the error handling code in both
callee and caller for no reason that can be avoided, but instead we have
to read both function to establish the correctness along with additional
lockdep warning check due to involved locking.

Just open code __nvmf_host_find() in nvme_host_alloc() with appropriate
comment that removes repeated error checks in the callee/caller and
lockdep check that is needed for the nvmf_hosts_mutex involvement,
diffstats :-

 drivers/nvme/host/fabrics.c | 75 +++++++++++++------------------------
 1 file changed, 27 insertions(+), 48 deletions(-)
Signed-off-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMax Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent 900095bf
...@@ -21,48 +21,6 @@ static DEFINE_MUTEX(nvmf_hosts_mutex); ...@@ -21,48 +21,6 @@ static DEFINE_MUTEX(nvmf_hosts_mutex);
static struct nvmf_host *nvmf_default_host; static struct nvmf_host *nvmf_default_host;
/**
* __nvmf_host_find() - Find a matching to a previously created host
* @hostnqn: Host NQN to match
* @id: Host ID to match
*
* We have defined a host as how it is perceived by the target.
* Therefore, we don't allow different Host NQNs with the same Host ID.
* Similarly, we do not allow the usage of the same Host NQN with different
* Host IDs. This will maintain unambiguous host identification.
*
* Return: Returns host pointer on success, NULL in case of no match or
* ERR_PTR(-EINVAL) in case of error match.
*/
static struct nvmf_host *__nvmf_host_find(const char *hostnqn, uuid_t *id)
{
struct nvmf_host *host;
lockdep_assert_held(&nvmf_hosts_mutex);
list_for_each_entry(host, &nvmf_hosts, list) {
bool same_hostnqn = !strcmp(host->nqn, hostnqn);
bool same_hostid = uuid_equal(&host->id, id);
if (same_hostnqn && same_hostid)
return host;
if (same_hostnqn) {
pr_err("found same hostnqn %s but different hostid %pUb\n",
hostnqn, id);
return ERR_PTR(-EINVAL);
}
if (same_hostid) {
pr_err("found same hostid %pUb but different hostnqn %s\n",
id, hostnqn);
return ERR_PTR(-EINVAL);
}
}
return NULL;
}
static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id) static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id)
{ {
struct nvmf_host *host; struct nvmf_host *host;
...@@ -83,12 +41,33 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id) ...@@ -83,12 +41,33 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id)
struct nvmf_host *host; struct nvmf_host *host;
mutex_lock(&nvmf_hosts_mutex); mutex_lock(&nvmf_hosts_mutex);
host = __nvmf_host_find(hostnqn, id);
if (IS_ERR(host)) { /*
goto out_unlock; * We have defined a host as how it is perceived by the target.
} else if (host) { * Therefore, we don't allow different Host NQNs with the same Host ID.
kref_get(&host->ref); * Similarly, we do not allow the usage of the same Host NQN with
goto out_unlock; * different Host IDs. This'll maintain unambiguous host identification.
*/
list_for_each_entry(host, &nvmf_hosts, list) {
bool same_hostnqn = !strcmp(host->nqn, hostnqn);
bool same_hostid = uuid_equal(&host->id, id);
if (same_hostnqn && same_hostid) {
kref_get(&host->ref);
goto out_unlock;
}
if (same_hostnqn) {
pr_err("found same hostnqn %s but different hostid %pUb\n",
hostnqn, id);
host = ERR_PTR(-EINVAL);
goto out_unlock;
}
if (same_hostid) {
pr_err("found same hostid %pUb but different hostnqn %s\n",
id, hostnqn);
host = ERR_PTR(-EINVAL);
goto out_unlock;
}
} }
host = nvmf_host_alloc(hostnqn, id); host = nvmf_host_alloc(hostnqn, id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment