Commit ed55635e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "The highlights this merge window include:

   - Allow target fabric drivers to function as built-in.  (Roland)
   - Fix tcm_loop multi-TPG endpoint nexus bug.  (Hannes)
   - Move per device config_item_type into se_subsystem_api, allowing
     configfs attributes to be defined at module_init time.  (Jerome +
     nab)
   - Convert existing IBLOCK/FILEIO/RAMDISK/PSCSI/TCMU drivers to use
     external configfs attributes.  (nab)
   - A number of iser-target fixes related to active session + network
     portal shutdown stability during extended stress testing.  (Sagi +
     Slava)
   - Dynamic allocation of T10-PI contexts for iser-target, fixing a
     potentially bogus iscsi_np->tpg_np pointer reference in >= v3.14
     code.  (Sagi)
   - iser-target performance + scalability improvements.  (Sagi)
   - Fixes for SPC-4 Persistent Reservation AllRegistrants spec
     compliance.  (Ilias + James + nab)
   - Avoid potential short kern_sendmsg() in iscsi-target for now until
     Al's conversion to use msghdr iteration is merged post -rc1.
     (Viro)

  Also, Sagi has requested a number of iser-target patches (9) that
  address stability issues he's encountered during extended stress
  testing be considered for v3.10.y + v3.14.y code.  Given the amount of
  LOC involved, it will certainly require extra backporting effort.

  Apologies in advance to Greg-KH & Co on this.  Sagi and I will be
  working post-merge to ensure they each get applied correctly"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (53 commits)
  target: Allow AllRegistrants to re-RESERVE existing reservation
  uapi/linux/target_core_user.h: fix headers_install.sh badness
  iscsi-target: Fail connection on short sendmsg writes
  iscsi-target: nullify session in failed login sequence
  target: Avoid dropping AllRegistrants reservation during unregister
  target: Fix R_HOLDER bit usage for AllRegistrants
  iscsi-target: Drop left-over bogus iscsi_np->tpg_np
  iser-target: Fix wc->wr_id cast warning
  iser-target: Remove code duplication
  iser-target: Adjust log levels and prettify some prints
  iser-target: Use debug_level parameter to control logging level
  iser-target: Fix logout sequence
  iser-target: Don't wait for session commands from completion context
  iser-target: Reduce CQ lock contention by batch polling
  iser-target: Introduce isert_poll_budget
  iser-target: Remove an atomic operation from the IO path
  iser-target: Remove redundant call to isert_conn_terminate
  iser-target: Use single CQ for TX and RX
  iser-target: Centralize completion elements to a context
  iser-target: Cast wr_id with uintptr_t instead of unsinged long
  ...
parents 5be95b7e ae450e24
This diff is collapsed.
...@@ -4,9 +4,37 @@ ...@@ -4,9 +4,37 @@
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h> #include <rdma/rdma_cm.h>
#define DRV_NAME "isert"
#define PFX DRV_NAME ": "
#define isert_dbg(fmt, arg...) \
do { \
if (unlikely(isert_debug_level > 2)) \
printk(KERN_DEBUG PFX "%s: " fmt,\
__func__ , ## arg); \
} while (0)
#define isert_warn(fmt, arg...) \
do { \
if (unlikely(isert_debug_level > 0)) \
pr_warn(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
#define isert_info(fmt, arg...) \
do { \
if (unlikely(isert_debug_level > 1)) \
pr_info(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
#define isert_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
#define ISERT_RDMA_LISTEN_BACKLOG 10 #define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256 #define ISCSI_ISER_SG_TABLESIZE 256
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
#define ISER_BEACON_WRID 0xfffffffffffffffeULL
enum isert_desc_type { enum isert_desc_type {
ISCSI_TX_CONTROL, ISCSI_TX_CONTROL,
...@@ -23,6 +51,7 @@ enum iser_ib_op_code { ...@@ -23,6 +51,7 @@ enum iser_ib_op_code {
enum iser_conn_state { enum iser_conn_state {
ISER_CONN_INIT, ISER_CONN_INIT,
ISER_CONN_UP, ISER_CONN_UP,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING, ISER_CONN_TERMINATING,
ISER_CONN_DOWN, ISER_CONN_DOWN,
}; };
...@@ -44,9 +73,6 @@ struct iser_tx_desc { ...@@ -44,9 +73,6 @@ struct iser_tx_desc {
struct ib_sge tx_sg[2]; struct ib_sge tx_sg[2];
int num_sge; int num_sge;
struct isert_cmd *isert_cmd; struct isert_cmd *isert_cmd;
struct llist_node *comp_llnode_batch;
struct llist_node comp_llnode;
bool llnode_active;
struct ib_send_wr send_wr; struct ib_send_wr send_wr;
} __packed; } __packed;
...@@ -81,6 +107,12 @@ struct isert_data_buf { ...@@ -81,6 +107,12 @@ struct isert_data_buf {
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
}; };
enum {
DATA = 0,
PROT = 1,
SIG = 2,
};
struct isert_rdma_wr { struct isert_rdma_wr {
struct list_head wr_list; struct list_head wr_list;
struct isert_cmd *isert_cmd; struct isert_cmd *isert_cmd;
...@@ -90,6 +122,7 @@ struct isert_rdma_wr { ...@@ -90,6 +122,7 @@ struct isert_rdma_wr {
int send_wr_num; int send_wr_num;
struct ib_send_wr *send_wr; struct ib_send_wr *send_wr;
struct ib_send_wr s_send_wr; struct ib_send_wr s_send_wr;
struct ib_sge ib_sg[3];
struct isert_data_buf data; struct isert_data_buf data;
struct isert_data_buf prot; struct isert_data_buf prot;
struct fast_reg_descriptor *fr_desc; struct fast_reg_descriptor *fr_desc;
...@@ -117,14 +150,15 @@ struct isert_device; ...@@ -117,14 +150,15 @@ struct isert_device;
struct isert_conn { struct isert_conn {
enum iser_conn_state state; enum iser_conn_state state;
int post_recv_buf_count; int post_recv_buf_count;
atomic_t post_send_buf_count;
u32 responder_resources; u32 responder_resources;
u32 initiator_depth; u32 initiator_depth;
bool pi_support;
u32 max_sge; u32 max_sge;
char *login_buf; char *login_buf;
char *login_req_buf; char *login_req_buf;
char *login_rsp_buf; char *login_rsp_buf;
u64 login_req_dma; u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma; u64 login_rsp_dma;
unsigned int conn_rx_desc_head; unsigned int conn_rx_desc_head;
struct iser_rx_desc *conn_rx_descs; struct iser_rx_desc *conn_rx_descs;
...@@ -132,13 +166,13 @@ struct isert_conn { ...@@ -132,13 +166,13 @@ struct isert_conn {
struct iscsi_conn *conn; struct iscsi_conn *conn;
struct list_head conn_accept_node; struct list_head conn_accept_node;
struct completion conn_login_comp; struct completion conn_login_comp;
struct completion login_req_comp;
struct iser_tx_desc conn_login_tx_desc; struct iser_tx_desc conn_login_tx_desc;
struct rdma_cm_id *conn_cm_id; struct rdma_cm_id *conn_cm_id;
struct ib_pd *conn_pd; struct ib_pd *conn_pd;
struct ib_mr *conn_mr; struct ib_mr *conn_mr;
struct ib_qp *conn_qp; struct ib_qp *conn_qp;
struct isert_device *conn_device; struct isert_device *conn_device;
struct work_struct conn_logout_work;
struct mutex conn_mutex; struct mutex conn_mutex;
struct completion conn_wait; struct completion conn_wait;
struct completion conn_wait_comp_err; struct completion conn_wait_comp_err;
...@@ -147,31 +181,38 @@ struct isert_conn { ...@@ -147,31 +181,38 @@ struct isert_conn {
int conn_fr_pool_size; int conn_fr_pool_size;
/* lock to protect fastreg pool */ /* lock to protect fastreg pool */
spinlock_t conn_lock; spinlock_t conn_lock;
#define ISERT_COMP_BATCH_COUNT 8 struct work_struct release_work;
int conn_comp_batch; struct ib_recv_wr beacon;
struct llist_head conn_comp_llist; bool logout_posted;
bool disconnect;
}; };
#define ISERT_MAX_CQ 64 #define ISERT_MAX_CQ 64
struct isert_cq_desc { /**
struct isert_device *device; * struct isert_comp - iSER completion context
int cq_index; *
struct work_struct cq_rx_work; * @device: pointer to device handle
struct work_struct cq_tx_work; * @cq: completion queue
* @wcs: work completion array
* @active_qps: Number of active QPs attached
* to completion context
* @work: completion work handle
*/
struct isert_comp {
struct isert_device *device;
struct ib_cq *cq;
struct ib_wc wcs[16];
int active_qps;
struct work_struct work;
}; };
struct isert_device { struct isert_device {
int use_fastreg; int use_fastreg;
bool pi_capable; bool pi_capable;
int cqs_used;
int refcount; int refcount;
int cq_active_qps[ISERT_MAX_CQ];
struct ib_device *ib_device; struct ib_device *ib_device;
struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; struct isert_comp *comps;
struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; int comps_used;
struct isert_cq_desc *cq_desc;
struct list_head dev_node; struct list_head dev_node;
struct ib_device_attr dev_attr; struct ib_device_attr dev_attr;
int (*reg_rdma_mem)(struct iscsi_conn *conn, int (*reg_rdma_mem)(struct iscsi_conn *conn,
...@@ -182,6 +223,7 @@ struct isert_device { ...@@ -182,6 +223,7 @@ struct isert_device {
}; };
struct isert_np { struct isert_np {
struct iscsi_np *np;
struct semaphore np_sem; struct semaphore np_sem;
struct rdma_cm_id *np_cm_id; struct rdma_cm_id *np_cm_id;
struct mutex np_accept_mutex; struct mutex np_accept_mutex;
......
...@@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void) ...@@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void)
return ret; return ret;
r2t_out: r2t_out:
iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_r2t_cache); kmem_cache_destroy(lio_r2t_cache);
ooo_out: ooo_out:
kmem_cache_destroy(lio_ooo_cache); kmem_cache_destroy(lio_ooo_cache);
......
...@@ -790,7 +790,6 @@ struct iscsi_np { ...@@ -790,7 +790,6 @@ struct iscsi_np {
void *np_context; void *np_context;
struct iscsit_transport *np_transport; struct iscsit_transport *np_transport;
struct list_head np_list; struct list_head np_list;
struct iscsi_tpg_np *tpg_np;
} ____cacheline_aligned; } ____cacheline_aligned;
struct iscsi_tpg_np { struct iscsi_tpg_np {
......
...@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1( ...@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
{ {
struct iscsi_session *sess = NULL; struct iscsi_session *sess = NULL;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
enum target_prot_op sup_pro_ops;
int ret; int ret;
sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
...@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1( ...@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess); kfree(sess);
return -ENOMEM; return -ENOMEM;
} }
sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
sess->se_sess = transport_init_session(sup_pro_ops); sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) { if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES); ISCSI_LOGIN_STATUS_NO_RESOURCES);
...@@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, ...@@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
} }
kfree(conn->sess->sess_ops); kfree(conn->sess->sess_ops);
kfree(conn->sess); kfree(conn->sess);
conn->sess = NULL;
old_sess_out: old_sess_out:
iscsi_stop_login_thread_timer(np); iscsi_stop_login_thread_timer(np);
...@@ -1204,6 +1203,9 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, ...@@ -1204,6 +1203,9 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
conn->sock = NULL; conn->sock = NULL;
} }
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
if (conn->conn_transport->iscsit_free_conn) if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn); conn->conn_transport->iscsit_free_conn(conn);
...@@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
} }
login->zero_tsih = zero_tsih; login->zero_tsih = zero_tsih;
conn->sess->se_sess->sup_prot_ops =
conn->conn_transport->iscsit_get_sup_prot_ops(conn);
tpg = conn->tpg; tpg = conn->tpg;
if (!tpg) { if (!tpg) {
pr_err("Unable to locate struct iscsi_conn->tpg\n"); pr_err("Unable to locate struct iscsi_conn->tpg\n");
......
...@@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( ...@@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
init_completion(&tpg_np->tpg_np_comp); init_completion(&tpg_np->tpg_np_comp);
kref_init(&tpg_np->tpg_np_kref); kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np; tpg_np->tpg_np = np;
np->tpg_np = tpg_np;
tpg_np->tpg = tpg; tpg_np->tpg = tpg;
spin_lock(&tpg->tpg_np_lock); spin_lock(&tpg->tpg_np_lock);
......
...@@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type) ...@@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type)
void iscsit_put_transport(struct iscsit_transport *t) void iscsit_put_transport(struct iscsit_transport *t)
{ {
if (t->owner) module_put(t->owner);
module_put(t->owner);
} }
int iscsit_register_transport(struct iscsit_transport *t) int iscsit_register_transport(struct iscsit_transport *t)
......
...@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data( ...@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
struct iscsi_conn *conn, struct iscsi_conn *conn,
struct iscsi_data_count *count) struct iscsi_data_count *count)
{ {
int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; int ret, iov_len;
struct kvec *iov_p; struct kvec *iov_p;
struct msghdr msg; struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops) if (!conn || !conn->sock || !conn->conn_ops)
return -1; return -1;
if (data <= 0) { if (count->data_length <= 0) {
pr_err("Data length is: %d\n", data); pr_err("Data length is: %d\n", count->data_length);
return -1; return -1;
} }
...@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data( ...@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
iov_p = count->iov; iov_p = count->iov;
iov_len = count->iov_count; iov_len = count->iov_count;
while (total_tx < data) { ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, count->data_length);
(data - total_tx)); if (ret != count->data_length) {
if (tx_loop <= 0) { pr_err("Unexpected ret: %d send data %d\n",
pr_debug("tx_loop: %d total_tx %d\n", ret, count->data_length);
tx_loop, total_tx); return -EPIPE;
return tx_loop;
}
total_tx += tx_loop;
pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
tx_loop, total_tx, data);
} }
pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
return total_tx; return ret;
} }
int rx_data( int rx_data(
......
...@@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work) ...@@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED); set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
goto out_done; goto out_done;
} }
tl_nexus = tl_hba->tl_nexus; tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) { if (!tl_nexus) {
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
" does not exist\n"); " does not exist\n");
...@@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) ...@@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
* to struct scsi_device * to struct scsi_device
*/ */
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
struct tcm_loop_nexus *tl_nexus,
int lun, int task, enum tcm_tmreq_table tmr) int lun, int task, enum tcm_tmreq_table tmr)
{ {
struct se_cmd *se_cmd = NULL; struct se_cmd *se_cmd = NULL;
struct se_session *se_sess; struct se_session *se_sess;
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_cmd *tl_cmd = NULL; struct tcm_loop_cmd *tl_cmd = NULL;
struct tcm_loop_tmr *tl_tmr = NULL; struct tcm_loop_tmr *tl_tmr = NULL;
int ret = TMR_FUNCTION_FAILED, rc; int ret = TMR_FUNCTION_FAILED, rc;
/*
* Locate the tl_nexus and se_sess pointers
*/
tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return ret;
}
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) { if (!tl_cmd) {
pr_err("Unable to allocate memory for tl_cmd\n"); pr_err("Unable to allocate memory for tl_cmd\n");
...@@ -243,7 +253,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, ...@@ -243,7 +253,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
se_cmd = &tl_cmd->tl_se_cmd; se_cmd = &tl_cmd->tl_se_cmd;
se_tpg = &tl_tpg->tl_se_tpg; se_tpg = &tl_tpg->tl_se_tpg;
se_sess = tl_nexus->se_sess; se_sess = tl_tpg->tl_nexus->se_sess;
/* /*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure * Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/ */
...@@ -288,7 +298,6 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, ...@@ -288,7 +298,6 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
static int tcm_loop_abort_task(struct scsi_cmnd *sc) static int tcm_loop_abort_task(struct scsi_cmnd *sc)
{ {
struct tcm_loop_hba *tl_hba; struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg; struct tcm_loop_tpg *tl_tpg;
int ret = FAILED; int ret = FAILED;
...@@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) ...@@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
* Locate the tcm_loop_hba_t pointer * Locate the tcm_loop_hba_t pointer
*/ */
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
/*
* Locate the tl_nexus and se_sess pointers
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
sc->request->tag, TMR_ABORT_TASK); sc->request->tag, TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
} }
...@@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) ...@@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
static int tcm_loop_device_reset(struct scsi_cmnd *sc) static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{ {
struct tcm_loop_hba *tl_hba; struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg; struct tcm_loop_tpg *tl_tpg;
int ret = FAILED; int ret = FAILED;
...@@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) ...@@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Locate the tcm_loop_hba_t pointer * Locate the tcm_loop_hba_t pointer
*/ */
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
/*
* Locate the tl_nexus and se_sess pointers
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
0, TMR_LUN_RESET); 0, TMR_LUN_RESET);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
} }
...@@ -940,8 +924,8 @@ static int tcm_loop_make_nexus( ...@@ -940,8 +924,8 @@ static int tcm_loop_make_nexus(
struct tcm_loop_nexus *tl_nexus; struct tcm_loop_nexus *tl_nexus;
int ret = -ENOMEM; int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) { if (tl_tpg->tl_nexus) {
pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); pr_debug("tl_tpg->tl_nexus already exists\n");
return -EEXIST; return -EEXIST;
} }
se_tpg = &tl_tpg->tl_se_tpg; se_tpg = &tl_tpg->tl_se_tpg;
...@@ -976,7 +960,7 @@ static int tcm_loop_make_nexus( ...@@ -976,7 +960,7 @@ static int tcm_loop_make_nexus(
*/ */
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus); tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_hba->tl_nexus = tl_nexus; tl_tpg->tl_nexus = tl_nexus;
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name); name);
...@@ -992,12 +976,8 @@ static int tcm_loop_drop_nexus( ...@@ -992,12 +976,8 @@ static int tcm_loop_drop_nexus(
{ {
struct se_session *se_sess; struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus; struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba = tpg->tl_hba;
if (!tl_hba) tl_nexus = tpg->tl_nexus;
return -ENODEV;
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) if (!tl_nexus)
return -ENODEV; return -ENODEV;
...@@ -1013,13 +993,13 @@ static int tcm_loop_drop_nexus( ...@@ -1013,13 +993,13 @@ static int tcm_loop_drop_nexus(
} }
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname); tl_nexus->se_sess->se_node_acl->initiatorname);
/* /*
* Release the SCSI I_T Nexus to the emulated SAS Target Port * Release the SCSI I_T Nexus to the emulated SAS Target Port
*/ */
transport_deregister_session(tl_nexus->se_sess); transport_deregister_session(tl_nexus->se_sess);
tpg->tl_hba->tl_nexus = NULL; tpg->tl_nexus = NULL;
kfree(tl_nexus); kfree(tl_nexus);
return 0; return 0;
} }
...@@ -1035,7 +1015,7 @@ static ssize_t tcm_loop_tpg_show_nexus( ...@@ -1035,7 +1015,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
struct tcm_loop_nexus *tl_nexus; struct tcm_loop_nexus *tl_nexus;
ssize_t ret; ssize_t ret;
tl_nexus = tl_tpg->tl_hba->tl_nexus; tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) if (!tl_nexus)
return -ENODEV; return -ENODEV;
......
...@@ -27,11 +27,6 @@ struct tcm_loop_tmr { ...@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
}; };
struct tcm_loop_nexus { struct tcm_loop_nexus {
int it_nexus_active;
/*
* Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
*/
struct scsi_host *sh;
/* /*
* Pointer to TCM session for I_T Nexus * Pointer to TCM session for I_T Nexus
*/ */
...@@ -51,6 +46,7 @@ struct tcm_loop_tpg { ...@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
atomic_t tl_tpg_port_count; atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg; struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba; struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
}; };
struct tcm_loop_hba { struct tcm_loop_hba {
...@@ -59,7 +55,6 @@ struct tcm_loop_hba { ...@@ -59,7 +55,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba; struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun; struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep; struct se_port *tl_hba_lun_sep;
struct tcm_loop_nexus *tl_nexus;
struct device dev; struct device dev;
struct Scsi_Host *sh; struct Scsi_Host *sh;
struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
......
This diff is collapsed.
...@@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count( ...@@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count(
dev, dev->dev_attrib.max_unmap_lba_count); dev, dev->dev_attrib.max_unmap_lba_count);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
int se_dev_set_max_unmap_block_desc_count( int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev, struct se_device *dev,
...@@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count( ...@@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count(
dev, dev->dev_attrib.max_unmap_block_desc_count); dev, dev->dev_attrib.max_unmap_block_desc_count);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
int se_dev_set_unmap_granularity( int se_dev_set_unmap_granularity(
struct se_device *dev, struct se_device *dev,
...@@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity( ...@@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity(
dev, dev->dev_attrib.unmap_granularity); dev, dev->dev_attrib.unmap_granularity);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_unmap_granularity);
int se_dev_set_unmap_granularity_alignment( int se_dev_set_unmap_granularity_alignment(
struct se_device *dev, struct se_device *dev,
...@@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment( ...@@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment(
dev, dev->dev_attrib.unmap_granularity_alignment); dev, dev->dev_attrib.unmap_granularity_alignment);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
int se_dev_set_max_write_same_len( int se_dev_set_max_write_same_len(
struct se_device *dev, struct se_device *dev,
...@@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len( ...@@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len(
dev, dev->dev_attrib.max_write_same_len); dev, dev->dev_attrib.max_write_same_len);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_max_write_same_len);
static void dev_set_t10_wwn_model_alias(struct se_device *dev) static void dev_set_t10_wwn_model_alias(struct se_device *dev)
{ {
...@@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) ...@@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
int se_dev_set_emulate_dpo(struct se_device *dev, int flag) int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{ {
...@@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) ...@@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_dpo);
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{ {
...@@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) ...@@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag); pr_err("Illegal value %d\n", flag);
return -EINVAL; return -EINVAL;
} }
if (flag &&
dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
pr_err("emulate_fua_write not supported for pSCSI\n");
return -EINVAL;
}
dev->dev_attrib.emulate_fua_write = flag; dev->dev_attrib.emulate_fua_write = flag;
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
dev, dev->dev_attrib.emulate_fua_write); dev, dev->dev_attrib.emulate_fua_write);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{ {
...@@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) ...@@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{ {
...@@ -793,11 +796,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) ...@@ -793,11 +796,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag); pr_err("Illegal value %d\n", flag);
return -EINVAL; return -EINVAL;
} }
if (flag &&
dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
pr_err("emulate_write_cache not supported for pSCSI\n");
return -EINVAL;
}
if (flag && if (flag &&
dev->transport->get_write_cache) { dev->transport->get_write_cache) {
pr_err("emulate_write_cache not supported for this device\n"); pr_err("emulate_write_cache not supported for this device\n");
...@@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) ...@@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
dev, dev->dev_attrib.emulate_write_cache); dev, dev->dev_attrib.emulate_write_cache);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{ {
...@@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) ...@@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
int se_dev_set_emulate_tas(struct se_device *dev, int flag) int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{ {
...@@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) ...@@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_tas);
int se_dev_set_emulate_tpu(struct se_device *dev, int flag) int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{ {
...@@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) ...@@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
dev, flag); dev, flag);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_tpu);
int se_dev_set_emulate_tpws(struct se_device *dev, int flag) int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{ {
...@@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) ...@@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
dev, flag); dev, flag);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_tpws);
int se_dev_set_emulate_caw(struct se_device *dev, int flag) int se_dev_set_emulate_caw(struct se_device *dev, int flag)
{ {
...@@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag) ...@@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_caw);
int se_dev_set_emulate_3pc(struct se_device *dev, int flag) int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
{ {
...@@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag) ...@@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_3pc);
int se_dev_set_pi_prot_type(struct se_device *dev, int flag) int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
{ {
...@@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag) ...@@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_pi_prot_type);
int se_dev_set_pi_prot_format(struct se_device *dev, int flag) int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
{ {
...@@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag) ...@@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_pi_prot_format);
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{ {
...@@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) ...@@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
(dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
{ {
...@@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) ...@@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
int se_dev_set_is_nonrot(struct se_device *dev, int flag) int se_dev_set_is_nonrot(struct se_device *dev, int flag)
{ {
...@@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag) ...@@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
dev, flag); dev, flag);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_is_nonrot);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
{ {
...@@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) ...@@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
/* /*
* Note, this can only be called on unexported SE Device Object. * Note, this can only be called on unexported SE Device Object.
...@@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) ...@@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
return -EINVAL; return -EINVAL;
} }
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { if (queue_depth > dev->dev_attrib.queue_depth) {
if (queue_depth > dev->dev_attrib.hw_queue_depth) { if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth: %u" pr_err("dev[%p]: Passed queue_depth:"
" exceeds TCM/SE_Device TCQ: %u\n", " %u exceeds TCM/SE_Device MAX"
dev, queue_depth, " TCQ: %u\n", dev, queue_depth,
dev->dev_attrib.hw_queue_depth); dev->dev_attrib.hw_queue_depth);
return -EINVAL; return -EINVAL;
} }
} else {
if (queue_depth > dev->dev_attrib.queue_depth) {
if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
}
} }
dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth); dev, queue_depth);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_queue_depth);
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{ {
...@@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) ...@@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
DA_STATUS_MAX_SECTORS_MIN); DA_STATUS_MAX_SECTORS_MIN);
return -EINVAL; return -EINVAL;
} }
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { pr_err("dev[%p]: Passed fabric_max_sectors: %u"
pr_err("dev[%p]: Passed fabric_max_sectors: %u" " greater than DA_STATUS_MAX_SECTORS_MAX:"
" greater than TCM/SE_Device max_sectors:" " %u\n", dev, fabric_max_sectors,
" %u\n", dev, fabric_max_sectors, DA_STATUS_MAX_SECTORS_MAX);
dev->dev_attrib.hw_max_sectors); return -EINVAL;
return -EINVAL;
}
} else {
if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
" %u\n", dev, fabric_max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
return -EINVAL;
}
} }
/* /*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
...@@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) ...@@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
dev, fabric_max_sectors); dev, fabric_max_sectors);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{ {
...@@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) ...@@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, dev->export_count); dev, dev->export_count);
return -EINVAL; return -EINVAL;
} }
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
pr_err("dev[%p]: Passed optimal_sectors cannot be"
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be" pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than fabric_max_sectors: %u\n", dev, " greater than fabric_max_sectors: %u\n", dev,
...@@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) ...@@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, optimal_sectors); dev, optimal_sectors);
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_optimal_sectors);
int se_dev_set_block_size(struct se_device *dev, u32 block_size) int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{ {
...@@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) ...@@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return -EINVAL; return -EINVAL;
} }
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
pr_err("dev[%p]: Not allowed to change block_size for"
" Physical Device, use for Linux/SCSI to change"
" block_size for underlying hardware\n", dev);
return -EINVAL;
}
dev->dev_attrib.block_size = block_size; dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n", pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size); dev, block_size);
...@@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) ...@@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return 0; return 0;
} }
EXPORT_SYMBOL(se_dev_set_block_size);
struct se_lun *core_dev_add_lun( struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg, struct se_portal_group *tpg,
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include "target_core_file.h" #include "target_core_file.h"
...@@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd) ...@@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &fd_sbc_ops); return sbc_parse_cdb(cmd, &fd_sbc_ops);
} }
DEF_TB_DEFAULT_ATTRIBS(fileio);
static struct configfs_attribute *fileio_backend_dev_attrs[] = {
&fileio_dev_attrib_emulate_model_alias.attr,
&fileio_dev_attrib_emulate_dpo.attr,
&fileio_dev_attrib_emulate_fua_write.attr,
&fileio_dev_attrib_emulate_fua_read.attr,
&fileio_dev_attrib_emulate_write_cache.attr,
&fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
&fileio_dev_attrib_emulate_tas.attr,
&fileio_dev_attrib_emulate_tpu.attr,
&fileio_dev_attrib_emulate_tpws.attr,
&fileio_dev_attrib_emulate_caw.attr,
&fileio_dev_attrib_emulate_3pc.attr,
&fileio_dev_attrib_pi_prot_type.attr,
&fileio_dev_attrib_hw_pi_prot_type.attr,
&fileio_dev_attrib_pi_prot_format.attr,
&fileio_dev_attrib_enforce_pr_isids.attr,
&fileio_dev_attrib_is_nonrot.attr,
&fileio_dev_attrib_emulate_rest_reord.attr,
&fileio_dev_attrib_force_pr_aptpl.attr,
&fileio_dev_attrib_hw_block_size.attr,
&fileio_dev_attrib_block_size.attr,
&fileio_dev_attrib_hw_max_sectors.attr,
&fileio_dev_attrib_fabric_max_sectors.attr,
&fileio_dev_attrib_optimal_sectors.attr,
&fileio_dev_attrib_hw_queue_depth.attr,
&fileio_dev_attrib_queue_depth.attr,
&fileio_dev_attrib_max_unmap_lba_count.attr,
&fileio_dev_attrib_max_unmap_block_desc_count.attr,
&fileio_dev_attrib_unmap_granularity.attr,
&fileio_dev_attrib_unmap_granularity_alignment.attr,
&fileio_dev_attrib_max_write_same_len.attr,
NULL,
};
static struct se_subsystem_api fileio_template = { static struct se_subsystem_api fileio_template = {
.name = "fileio", .name = "fileio",
.inquiry_prod = "FILEIO", .inquiry_prod = "FILEIO",
...@@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = { ...@@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = {
static int __init fileio_module_init(void) static int __init fileio_module_init(void)
{ {
struct target_backend_cits *tbc = &fileio_template.tb_cits;
target_core_setup_sub_cits(&fileio_template);
tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
return transport_subsystem_register(&fileio_template); return transport_subsystem_register(&fileio_template);
} }
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_internal.h" #include "target_core_internal.h"
...@@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return hba; return hba;
out_module_put: out_module_put:
if (hba->transport->owner) module_put(hba->transport->owner);
module_put(hba->transport->owner);
hba->transport = NULL; hba->transport = NULL;
out_free_hba: out_free_hba:
kfree(hba); kfree(hba);
...@@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba) ...@@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba)
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id); " Core\n", hba->hba_id);
if (hba->transport->owner) module_put(hba->transport->owner);
module_put(hba->transport->owner);
hba->transport = NULL; hba->transport = NULL;
kfree(hba); kfree(hba);
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include "target_core_iblock.h" #include "target_core_iblock.h"
...@@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev) ...@@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev)
return q->flush_flags & REQ_FLUSH; return q->flush_flags & REQ_FLUSH;
} }
DEF_TB_DEFAULT_ATTRIBS(iblock);
static struct configfs_attribute *iblock_backend_dev_attrs[] = {
&iblock_dev_attrib_emulate_model_alias.attr,
&iblock_dev_attrib_emulate_dpo.attr,
&iblock_dev_attrib_emulate_fua_write.attr,
&iblock_dev_attrib_emulate_fua_read.attr,
&iblock_dev_attrib_emulate_write_cache.attr,
&iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
&iblock_dev_attrib_emulate_tas.attr,
&iblock_dev_attrib_emulate_tpu.attr,
&iblock_dev_attrib_emulate_tpws.attr,
&iblock_dev_attrib_emulate_caw.attr,
&iblock_dev_attrib_emulate_3pc.attr,
&iblock_dev_attrib_pi_prot_type.attr,
&iblock_dev_attrib_hw_pi_prot_type.attr,
&iblock_dev_attrib_pi_prot_format.attr,
&iblock_dev_attrib_enforce_pr_isids.attr,
&iblock_dev_attrib_is_nonrot.attr,
&iblock_dev_attrib_emulate_rest_reord.attr,
&iblock_dev_attrib_force_pr_aptpl.attr,
&iblock_dev_attrib_hw_block_size.attr,
&iblock_dev_attrib_block_size.attr,
&iblock_dev_attrib_hw_max_sectors.attr,
&iblock_dev_attrib_fabric_max_sectors.attr,
&iblock_dev_attrib_optimal_sectors.attr,
&iblock_dev_attrib_hw_queue_depth.attr,
&iblock_dev_attrib_queue_depth.attr,
&iblock_dev_attrib_max_unmap_lba_count.attr,
&iblock_dev_attrib_max_unmap_block_desc_count.attr,
&iblock_dev_attrib_unmap_granularity.attr,
&iblock_dev_attrib_unmap_granularity_alignment.attr,
&iblock_dev_attrib_max_write_same_len.attr,
NULL,
};
static struct se_subsystem_api iblock_template = { static struct se_subsystem_api iblock_template = {
.name = "iblock", .name = "iblock",
.inquiry_prod = "IBLOCK", .inquiry_prod = "IBLOCK",
...@@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = { ...@@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = {
static int __init iblock_module_init(void) static int __init iblock_module_init(void)
{ {
struct target_backend_cits *tbc = &iblock_template.tb_cits;
target_core_setup_sub_cits(&iblock_template);
tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
return transport_subsystem_register(&iblock_template); return transport_subsystem_register(&iblock_template);
} }
......
...@@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *, ...@@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *, void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_max_write_same_len(struct se_device *, u32);
int se_dev_set_emulate_model_alias(struct se_device *, int);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
int se_dev_set_emulate_write_cache(struct se_device *, int);
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
int se_dev_set_emulate_tas(struct se_device *, int);
int se_dev_set_emulate_tpu(struct se_device *, int);
int se_dev_set_emulate_tpws(struct se_device *, int);
int se_dev_set_emulate_caw(struct se_device *, int);
int se_dev_set_emulate_3pc(struct se_device *, int);
int se_dev_set_pi_prot_type(struct se_device *, int);
int se_dev_set_pi_prot_format(struct se_device *, int);
int se_dev_set_enforce_pr_isids(struct se_device *, int);
int se_dev_set_force_pr_aptpl(struct se_device *, int);
int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
void core_dev_del_lun(struct se_portal_group *, struct se_lun *); void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
......
This diff is collapsed.
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include "target_core_alua.h" #include "target_core_alua.h"
#include "target_core_pscsi.h" #include "target_core_pscsi.h"
...@@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate) ...@@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate)
kfree(pt); kfree(pt);
} }
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
TB_DEV_ATTR_RO(pscsi, hw_block_size);
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
&pscsi_dev_attrib_hw_pi_prot_type.attr,
&pscsi_dev_attrib_hw_block_size.attr,
&pscsi_dev_attrib_hw_max_sectors.attr,
&pscsi_dev_attrib_hw_queue_depth.attr,
NULL,
};
static struct se_subsystem_api pscsi_template = { static struct se_subsystem_api pscsi_template = {
.name = "pscsi", .name = "pscsi",
.owner = THIS_MODULE, .owner = THIS_MODULE,
...@@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = { ...@@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = {
static int __init pscsi_module_init(void) static int __init pscsi_module_init(void)
{ {
struct target_backend_cits *tbc = &pscsi_template.tb_cits;
target_core_setup_sub_cits(&pscsi_template);
tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
return transport_subsystem_register(&pscsi_template); return transport_subsystem_register(&pscsi_template);
} }
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include "target_core_rd.h" #include "target_core_rd.h"
...@@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd) ...@@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &rd_sbc_ops); return sbc_parse_cdb(cmd, &rd_sbc_ops);
} }
DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
&rd_mcp_dev_attrib_emulate_model_alias.attr,
&rd_mcp_dev_attrib_emulate_dpo.attr,
&rd_mcp_dev_attrib_emulate_fua_write.attr,
&rd_mcp_dev_attrib_emulate_fua_read.attr,
&rd_mcp_dev_attrib_emulate_write_cache.attr,
&rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
&rd_mcp_dev_attrib_emulate_tas.attr,
&rd_mcp_dev_attrib_emulate_tpu.attr,
&rd_mcp_dev_attrib_emulate_tpws.attr,
&rd_mcp_dev_attrib_emulate_caw.attr,
&rd_mcp_dev_attrib_emulate_3pc.attr,
&rd_mcp_dev_attrib_pi_prot_type.attr,
&rd_mcp_dev_attrib_hw_pi_prot_type.attr,
&rd_mcp_dev_attrib_pi_prot_format.attr,
&rd_mcp_dev_attrib_enforce_pr_isids.attr,
&rd_mcp_dev_attrib_is_nonrot.attr,
&rd_mcp_dev_attrib_emulate_rest_reord.attr,
&rd_mcp_dev_attrib_force_pr_aptpl.attr,
&rd_mcp_dev_attrib_hw_block_size.attr,
&rd_mcp_dev_attrib_block_size.attr,
&rd_mcp_dev_attrib_hw_max_sectors.attr,
&rd_mcp_dev_attrib_fabric_max_sectors.attr,
&rd_mcp_dev_attrib_optimal_sectors.attr,
&rd_mcp_dev_attrib_hw_queue_depth.attr,
&rd_mcp_dev_attrib_queue_depth.attr,
&rd_mcp_dev_attrib_max_unmap_lba_count.attr,
&rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
&rd_mcp_dev_attrib_unmap_granularity.attr,
&rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
&rd_mcp_dev_attrib_max_write_same_len.attr,
NULL,
};
static struct se_subsystem_api rd_mcp_template = { static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp", .name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP", .inquiry_prod = "RAMDISK-MCP",
...@@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = { ...@@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = {
int __init rd_module_init(void) int __init rd_module_init(void)
{ {
struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
int ret; int ret;
target_core_setup_sub_cits(&rd_mcp_template);
tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
ret = transport_subsystem_register(&rd_mcp_template); ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
#include <target/target_core_backend_configfs.h>
#include <linux/target_core_user.h> #include <linux/target_core_user.h>
/* /*
...@@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd) ...@@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return ret; return ret;
} }
DEF_TB_DEFAULT_ATTRIBS(tcmu);
static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
&tcmu_dev_attrib_emulate_model_alias.attr,
&tcmu_dev_attrib_emulate_dpo.attr,
&tcmu_dev_attrib_emulate_fua_write.attr,
&tcmu_dev_attrib_emulate_fua_read.attr,
&tcmu_dev_attrib_emulate_write_cache.attr,
&tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
&tcmu_dev_attrib_emulate_tas.attr,
&tcmu_dev_attrib_emulate_tpu.attr,
&tcmu_dev_attrib_emulate_tpws.attr,
&tcmu_dev_attrib_emulate_caw.attr,
&tcmu_dev_attrib_emulate_3pc.attr,
&tcmu_dev_attrib_pi_prot_type.attr,
&tcmu_dev_attrib_hw_pi_prot_type.attr,
&tcmu_dev_attrib_pi_prot_format.attr,
&tcmu_dev_attrib_enforce_pr_isids.attr,
&tcmu_dev_attrib_is_nonrot.attr,
&tcmu_dev_attrib_emulate_rest_reord.attr,
&tcmu_dev_attrib_force_pr_aptpl.attr,
&tcmu_dev_attrib_hw_block_size.attr,
&tcmu_dev_attrib_block_size.attr,
&tcmu_dev_attrib_hw_max_sectors.attr,
&tcmu_dev_attrib_fabric_max_sectors.attr,
&tcmu_dev_attrib_optimal_sectors.attr,
&tcmu_dev_attrib_hw_queue_depth.attr,
&tcmu_dev_attrib_queue_depth.attr,
&tcmu_dev_attrib_max_unmap_lba_count.attr,
&tcmu_dev_attrib_max_unmap_block_desc_count.attr,
&tcmu_dev_attrib_unmap_granularity.attr,
&tcmu_dev_attrib_unmap_granularity_alignment.attr,
&tcmu_dev_attrib_max_write_same_len.attr,
NULL,
};
static struct se_subsystem_api tcmu_template = { static struct se_subsystem_api tcmu_template = {
.name = "user", .name = "user",
.inquiry_prod = "USER", .inquiry_prod = "USER",
...@@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = { ...@@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = {
static int __init tcmu_module_init(void) static int __init tcmu_module_init(void)
{ {
struct target_backend_cits *tbc = &tcmu_template.tb_cits;
int ret; int ret;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
...@@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void) ...@@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void)
goto out_unreg_device; goto out_unreg_device;
} }
target_core_setup_sub_cits(&tcmu_template);
tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
ret = transport_subsystem_register(&tcmu_template); ret = transport_subsystem_register(&tcmu_template);
if (ret) if (ret)
goto out_unreg_genl; goto out_unreg_genl;
......
...@@ -5,6 +5,15 @@ ...@@ -5,6 +5,15 @@
#define TRANSPORT_PLUGIN_VHBA_PDEV 2 #define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3 #define TRANSPORT_PLUGIN_VHBA_VDEV 3
struct target_backend_cits {
struct config_item_type tb_dev_cit;
struct config_item_type tb_dev_attrib_cit;
struct config_item_type tb_dev_pr_cit;
struct config_item_type tb_dev_wwn_cit;
struct config_item_type tb_dev_alua_tg_pt_gps_cit;
struct config_item_type tb_dev_stat_cit;
};
struct se_subsystem_api { struct se_subsystem_api {
struct list_head sub_api_list; struct list_head sub_api_list;
...@@ -44,6 +53,8 @@ struct se_subsystem_api { ...@@ -44,6 +53,8 @@ struct se_subsystem_api {
int (*init_prot)(struct se_device *); int (*init_prot)(struct se_device *);
int (*format_prot)(struct se_device *); int (*format_prot)(struct se_device *);
void (*free_prot)(struct se_device *); void (*free_prot)(struct se_device *);
struct target_backend_cits tb_cits;
}; };
struct sbc_ops { struct sbc_ops {
...@@ -96,4 +107,36 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, ...@@ -96,4 +107,36 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
void array_free(void *array, int n); void array_free(void *array, int n);
/* From target_core_configfs.c to setup default backend config_item_types */
void target_core_setup_sub_cits(struct se_subsystem_api *);
/* attribute helpers from target_core_device.c for backend drivers */
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_max_write_same_len(struct se_device *, u32);
int se_dev_set_emulate_model_alias(struct se_device *, int);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
int se_dev_set_emulate_write_cache(struct se_device *, int);
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
int se_dev_set_emulate_tas(struct se_device *, int);
int se_dev_set_emulate_tpu(struct se_device *, int);
int se_dev_set_emulate_tpws(struct se_device *, int);
int se_dev_set_emulate_caw(struct se_device *, int);
int se_dev_set_emulate_3pc(struct se_device *, int);
int se_dev_set_pi_prot_type(struct se_device *, int);
int se_dev_set_pi_prot_format(struct se_device *, int);
int se_dev_set_enforce_pr_isids(struct se_device *, int);
int se_dev_set_force_pr_aptpl(struct se_device *, int);
int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
#endif /* TARGET_CORE_BACKEND_H */ #endif /* TARGET_CORE_BACKEND_H */
#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
#define TARGET_CORE_BACKEND_CONFIGFS_H
#include <target/configfs_macros.h>
#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
static ssize_t _backend##_dev_show_attr_##_name( \
struct se_dev_attrib *da, \
char *page) \
{ \
return snprintf(page, PAGE_SIZE, "%u\n", \
(u32)da->da_dev->dev_attrib._name); \
}
#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
static ssize_t _backend##_dev_store_attr_##_name( \
struct se_dev_attrib *da, \
const char *page, \
size_t count) \
{ \
unsigned long val; \
int ret; \
\
ret = kstrtoul(page, 0, &val); \
if (ret < 0) { \
pr_err("kstrtoul() failed with ret: %d\n", ret); \
return -EINVAL; \
} \
ret = se_dev_set_##_name(da->da_dev, (u32)val); \
\
return (!ret) ? count : -EINVAL; \
}
#define DEF_TB_DEV_ATTRIB(_backend, _name) \
DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
#define TB_DEV_ATTR(_backend, _name, _mode) \
static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_backend##_dev_show_attr_##_name, \
_backend##_dev_store_attr_##_name);
#define TB_DEV_ATTR_RO(_backend, _name) \
static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_backend##_dev_show_attr_##_name);
/*
* Default list of target backend device attributes as defined by
* struct se_dev_attrib
*/
#define DEF_TB_DEFAULT_ATTRIBS(_backend) \
DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \
TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \
TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \
TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \
TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \
TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \
TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \
TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \
TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \
TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \
TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \
TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \
TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \
TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \
DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \
TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \
TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \
TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \
TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \
TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \
TB_DEV_ATTR_RO(_backend, hw_block_size); \
DEF_TB_DEV_ATTRIB(_backend, block_size); \
TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
TB_DEV_ATTR_RO(_backend, hw_queue_depth); \
DEF_TB_DEV_ATTRIB(_backend, queue_depth); \
TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \
TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \
TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \
TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \
TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \
TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
...@@ -6,10 +6,6 @@ ...@@ -6,10 +6,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/uio.h> #include <linux/uio.h>
#ifndef __packed
#define __packed __attribute__((packed))
#endif
#define TCMU_VERSION "1.0" #define TCMU_VERSION "1.0"
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment