Commit dd0e19f3 authored by Scott Teel's avatar Scott Teel Committed by James Bottomley

[SCSI] hpsa: add controller base data-at-rest encryption compatibility ioaccel2

Add controller-based data-at-rest encryption compatibility
to ioaccel2 path (HP SSD Smart Path).

Encryption feature requires driver to supply additional fields
for encryption enable, tweak index, and data encryption key index
in the ioaccel2 request structure.

Encryption enable flag and data encryption key index come from
raid_map data structure from raid offload command.

During ioaccel2 submission, check device structure's raid map to see if
encryption is enabled for the device. If so, call new function below.

Add function set_encrypt_ioaccel2 to set encryption flag, data encryption key
index, and calculate tweak value from request's logical block address.
Signed-off-by: default avatarScott Teel <scott.teel@hp.com>
Signed-off-by: default avatarStephen M. Cameron <scameron@beardog.cce.hp.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 51c35139
...@@ -2027,6 +2027,14 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, ...@@ -2027,6 +2027,14 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
le16_to_cpu(map_buff->row_cnt)); le16_to_cpu(map_buff->row_cnt));
dev_info(&h->pdev->dev, "layout_map_count = %u\n", dev_info(&h->pdev->dev, "layout_map_count = %u\n",
le16_to_cpu(map_buff->layout_map_count)); le16_to_cpu(map_buff->layout_map_count));
dev_info(&h->pdev->dev, "flags = %u\n",
le16_to_cpu(map_buff->flags));
if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
dev_info(&h->pdev->dev, "encrypytion = ON\n");
else
dev_info(&h->pdev->dev, "encrypytion = OFF\n");
dev_info(&h->pdev->dev, "dekindex = %u\n",
le16_to_cpu(map_buff->dekindex));
map_cnt = le16_to_cpu(map_buff->layout_map_count); map_cnt = le16_to_cpu(map_buff->layout_map_count);
for (map = 0; map < map_cnt; map++) { for (map = 0; map < map_cnt; map++) {
...@@ -2967,6 +2975,128 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, ...@@ -2967,6 +2975,128 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
cmd->cmnd, cmd->cmd_len, dev->scsi3addr); cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
} }
/*
* Set encryption parameters for the ioaccel2 request
*/
static void set_encrypt_ioaccel2(struct ctlr_info *h,
struct CommandList *c, struct io_accel2_cmd *cp)
{
struct scsi_cmnd *cmd = c->scsi_cmd;
struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
struct raid_map_data *map = &dev->raid_map;
u64 first_block;
BUG_ON(!(dev->offload_config && dev->offload_enabled));
/* Are we doing encryption on this device */
if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
return;
/* Set the data encryption key index. */
cp->dekindex = map->dekindex;
/* Set the encryption enable flag, encoded into direction field. */
cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
/* Set encryption tweak values based on logical block address
* If block size is 512, tweak value is LBA.
* For other block sizes, tweak is (LBA * block size)/ 512)
*/
switch (cmd->cmnd[0]) {
/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
case WRITE_6:
case READ_6:
if (map->volume_blk_size == 512) {
cp->tweak_lower =
(((u32) cmd->cmnd[2]) << 8) |
cmd->cmnd[3];
cp->tweak_upper = 0;
} else {
first_block =
(((u64) cmd->cmnd[2]) << 8) |
cmd->cmnd[3];
first_block = (first_block * map->volume_blk_size)/512;
cp->tweak_lower = (u32)first_block;
cp->tweak_upper = (u32)(first_block >> 32);
}
break;
case WRITE_10:
case READ_10:
if (map->volume_blk_size == 512) {
cp->tweak_lower =
(((u32) cmd->cmnd[2]) << 24) |
(((u32) cmd->cmnd[3]) << 16) |
(((u32) cmd->cmnd[4]) << 8) |
cmd->cmnd[5];
cp->tweak_upper = 0;
} else {
first_block =
(((u64) cmd->cmnd[2]) << 24) |
(((u64) cmd->cmnd[3]) << 16) |
(((u64) cmd->cmnd[4]) << 8) |
cmd->cmnd[5];
first_block = (first_block * map->volume_blk_size)/512;
cp->tweak_lower = (u32)first_block;
cp->tweak_upper = (u32)(first_block >> 32);
}
break;
/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
case WRITE_12:
case READ_12:
if (map->volume_blk_size == 512) {
cp->tweak_lower =
(((u32) cmd->cmnd[2]) << 24) |
(((u32) cmd->cmnd[3]) << 16) |
(((u32) cmd->cmnd[4]) << 8) |
cmd->cmnd[5];
cp->tweak_upper = 0;
} else {
first_block =
(((u64) cmd->cmnd[2]) << 24) |
(((u64) cmd->cmnd[3]) << 16) |
(((u64) cmd->cmnd[4]) << 8) |
cmd->cmnd[5];
first_block = (first_block * map->volume_blk_size)/512;
cp->tweak_lower = (u32)first_block;
cp->tweak_upper = (u32)(first_block >> 32);
}
break;
case WRITE_16:
case READ_16:
if (map->volume_blk_size == 512) {
cp->tweak_lower =
(((u32) cmd->cmnd[6]) << 24) |
(((u32) cmd->cmnd[7]) << 16) |
(((u32) cmd->cmnd[8]) << 8) |
cmd->cmnd[9];
cp->tweak_upper =
(((u32) cmd->cmnd[2]) << 24) |
(((u32) cmd->cmnd[3]) << 16) |
(((u32) cmd->cmnd[4]) << 8) |
cmd->cmnd[5];
} else {
first_block =
(((u64) cmd->cmnd[2]) << 56) |
(((u64) cmd->cmnd[3]) << 48) |
(((u64) cmd->cmnd[4]) << 40) |
(((u64) cmd->cmnd[5]) << 32) |
(((u64) cmd->cmnd[6]) << 24) |
(((u64) cmd->cmnd[7]) << 16) |
(((u64) cmd->cmnd[8]) << 8) |
cmd->cmnd[9];
first_block = (first_block * map->volume_blk_size)/512;
cp->tweak_lower = (u32)first_block;
cp->tweak_upper = (u32)(first_block >> 32);
}
break;
default:
dev_err(&h->pdev->dev,
"ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
__func__);
BUG();
break;
}
}
static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
u8 *scsi3addr) u8 *scsi3addr)
...@@ -3016,13 +3146,16 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, ...@@ -3016,13 +3146,16 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
switch (cmd->sc_data_direction) { switch (cmd->sc_data_direction) {
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
cp->direction = IOACCEL2_DIR_DATA_OUT; cp->direction &= ~IOACCEL2_DIRECTION_MASK;
cp->direction |= IOACCEL2_DIR_DATA_OUT;
break; break;
case DMA_FROM_DEVICE: case DMA_FROM_DEVICE:
cp->direction = IOACCEL2_DIR_DATA_IN; cp->direction &= ~IOACCEL2_DIRECTION_MASK;
cp->direction |= IOACCEL2_DIR_DATA_IN;
break; break;
case DMA_NONE: case DMA_NONE:
cp->direction = IOACCEL2_DIR_NO_DATA; cp->direction &= ~IOACCEL2_DIRECTION_MASK;
cp->direction |= IOACCEL2_DIR_NO_DATA;
break; break;
default: default:
dev_err(&h->pdev->dev, "unknown data direction: %d\n", dev_err(&h->pdev->dev, "unknown data direction: %d\n",
...@@ -3031,10 +3164,15 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, ...@@ -3031,10 +3164,15 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
break; break;
} }
} else { } else {
cp->direction = IOACCEL2_DIR_NO_DATA; cp->direction &= ~IOACCEL2_DIRECTION_MASK;
cp->direction |= IOACCEL2_DIR_NO_DATA;
} }
/* Set encryption parameters, if necessary */
set_encrypt_ioaccel2(h, c, cp);
cp->scsi_nexus = ioaccel_handle; cp->scsi_nexus = ioaccel_handle;
cp->Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
DIRECT_LOOKUP_BIT; DIRECT_LOOKUP_BIT;
memcpy(cp->cdb, cdb, sizeof(cp->cdb)); memcpy(cp->cdb, cdb, sizeof(cp->cdb));
memset(cp->cciss_lun, 0, sizeof(cp->cciss_lun)); memset(cp->cciss_lun, 0, sizeof(cp->cciss_lun));
...@@ -3792,8 +3930,9 @@ static void hpsa_get_tag(struct ctlr_info *h, ...@@ -3792,8 +3930,9 @@ static void hpsa_get_tag(struct ctlr_info *h,
if (c->cmd_type == CMD_IOACCEL2) { if (c->cmd_type == CMD_IOACCEL2) {
struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
&h->ioaccel2_cmd_pool[c->cmdindex]; &h->ioaccel2_cmd_pool[c->cmdindex];
*tagupper = cm2->Tag.upper; /* upper tag not used in ioaccel2 mode */
*taglower = cm2->Tag.lower; memset(tagupper, 0, sizeof(*tagupper));
*taglower = cm2->Tag;
return; return;
} }
*tagupper = c->Header.Tag.upper; *tagupper = c->Header.Tag.upper;
...@@ -3841,8 +3980,8 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, ...@@ -3841,8 +3980,8 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
break; break;
} }
cmd_special_free(h, c); cmd_special_free(h, c);
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
abort->Header.Tag.upper, abort->Header.Tag.lower); __func__, tagupper, taglower);
return rc; return rc;
} }
...@@ -6970,6 +7109,28 @@ static void __exit hpsa_cleanup(void) ...@@ -6970,6 +7109,28 @@ static void __exit hpsa_cleanup(void)
static void __attribute__((unused)) verify_offsets(void) static void __attribute__((unused)) verify_offsets(void)
{ {
#define VERIFY_OFFSET(member, offset) \
BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
VERIFY_OFFSET(structure_size, 0);
VERIFY_OFFSET(volume_blk_size, 4);
VERIFY_OFFSET(volume_blk_cnt, 8);
VERIFY_OFFSET(phys_blk_shift, 16);
VERIFY_OFFSET(parity_rotation_shift, 17);
VERIFY_OFFSET(strip_size, 18);
VERIFY_OFFSET(disk_starting_blk, 20);
VERIFY_OFFSET(disk_blk_cnt, 28);
VERIFY_OFFSET(data_disks_per_row, 36);
VERIFY_OFFSET(metadata_disks_per_row, 38);
VERIFY_OFFSET(row_cnt, 40);
VERIFY_OFFSET(layout_map_count, 42);
VERIFY_OFFSET(flags, 44);
VERIFY_OFFSET(dekindex, 46);
/* VERIFY_OFFSET(reserved, 48 */
VERIFY_OFFSET(data, 64);
#undef VERIFY_OFFSET
#define VERIFY_OFFSET(member, offset) \ #define VERIFY_OFFSET(member, offset) \
BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
......
...@@ -209,7 +209,10 @@ struct raid_map_data { ...@@ -209,7 +209,10 @@ struct raid_map_data {
u16 row_cnt; /* rows in each layout map */ u16 row_cnt; /* rows in each layout map */
u16 layout_map_count; /* layout maps (1 map per mirror/parity u16 layout_map_count; /* layout maps (1 map per mirror/parity
* group) */ * group) */
u8 reserved[20]; u16 flags; /* Bit 0 set if encryption enabled */
#define RAID_MAP_FLAG_ENCRYPT_ON 0x01
u16 dekindex; /* Data encryption key index. */
u8 reserved[16];
struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
}; };
...@@ -502,11 +505,17 @@ struct io_accel2_scsi_response { ...@@ -502,11 +505,17 @@ struct io_accel2_scsi_response {
*/ */
struct io_accel2_cmd { struct io_accel2_cmd {
u8 IU_type; /* IU Type */ u8 IU_type; /* IU Type */
u8 direction; /* Transfer direction, 2 bits */ u8 direction; /* direction, memtype, and encryption */
#define IOACCEL2_DIRECTION_MASK 0x03 /* bits 0,1: direction */
#define IOACCEL2_DIRECTION_MEMTYPE_MASK 0x04 /* bit 2: memtype source/dest */
/* 0b=PCIe, 1b=DDR */
#define IOACCEL2_DIRECTION_ENCRYPT_MASK 0x08 /* bit 3: encryption flag */
/* 0=off, 1=on */
u8 reply_queue; /* Reply Queue ID */ u8 reply_queue; /* Reply Queue ID */
u8 reserved1; /* Reserved */ u8 reserved1; /* Reserved */
u32 scsi_nexus; /* Device Handle */ u32 scsi_nexus; /* Device Handle */
struct vals32 Tag; /* cciss tag */ u32 Tag; /* cciss tag, lower 4 bytes only */
u32 tweak_lower; /* Encryption tweak, lower 4 bytes */
u8 cdb[16]; /* SCSI Command Descriptor Block */ u8 cdb[16]; /* SCSI Command Descriptor Block */
u8 cciss_lun[8]; /* 8 byte SCSI address */ u8 cciss_lun[8]; /* 8 byte SCSI address */
u32 data_len; /* Total bytes to transfer */ u32 data_len; /* Total bytes to transfer */
...@@ -514,10 +523,10 @@ struct io_accel2_cmd { ...@@ -514,10 +523,10 @@ struct io_accel2_cmd {
#define IOACCEL2_PRIORITY_MASK 0x78 #define IOACCEL2_PRIORITY_MASK 0x78
#define IOACCEL2_ATTR_MASK 0x07 #define IOACCEL2_ATTR_MASK 0x07
u8 sg_count; /* Number of sg elements */ u8 sg_count; /* Number of sg elements */
u8 reserved3[2]; /* Reserved */ u16 dekindex; /* Data encryption key index */
u64 err_ptr; /* Error Pointer */ u64 err_ptr; /* Error Pointer */
u32 err_len; /* Error Length*/ u32 err_len; /* Error Length*/
u8 reserved4[4]; /* Reserved */ u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
struct io_accel2_scsi_response error_data; struct io_accel2_scsi_response error_data;
u8 pad[IOACCEL2_PAD]; u8 pad[IOACCEL2_PAD];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment