Commit 9806884d authored by Stephen Boyd's avatar Stephen Boyd Committed by Andy Gross

soc: qcom: smem: Handle big endian CPUs

The contents of smem are always in little endian, but the smem
driver is not capable of being used on big endian CPUs. Annotate
the little endian data members and update the code to do the
proper byte swapping.

Cc: Bjorn Andersson <bjorn.andersson@sonymobile.com>
Signed-off-by: default avatarStephen Boyd <sboyd@codeaurora.org>
Reviewed-by: default avatarBjorn Andersson <bjorn.andersson@sonymobile.com>
Signed-off-by: default avatarAndy Gross <agross@codeaurora.org>
parent 1a03964d
...@@ -92,9 +92,9 @@ ...@@ -92,9 +92,9 @@
* @params: parameters to the command * @params: parameters to the command
*/ */
struct smem_proc_comm { struct smem_proc_comm {
u32 command; __le32 command;
u32 status; __le32 status;
u32 params[2]; __le32 params[2];
}; };
/** /**
...@@ -106,10 +106,10 @@ struct smem_proc_comm { ...@@ -106,10 +106,10 @@ struct smem_proc_comm {
* the default region. bits 0,1 are reserved * the default region. bits 0,1 are reserved
*/ */
struct smem_global_entry { struct smem_global_entry {
u32 allocated; __le32 allocated;
u32 offset; __le32 offset;
u32 size; __le32 size;
u32 aux_base; /* bits 1:0 reserved */ __le32 aux_base; /* bits 1:0 reserved */
}; };
#define AUX_BASE_MASK 0xfffffffc #define AUX_BASE_MASK 0xfffffffc
...@@ -125,11 +125,11 @@ struct smem_global_entry { ...@@ -125,11 +125,11 @@ struct smem_global_entry {
*/ */
struct smem_header { struct smem_header {
struct smem_proc_comm proc_comm[4]; struct smem_proc_comm proc_comm[4];
u32 version[32]; __le32 version[32];
u32 initialized; __le32 initialized;
u32 free_offset; __le32 free_offset;
u32 available; __le32 available;
u32 reserved; __le32 reserved;
struct smem_global_entry toc[SMEM_ITEM_COUNT]; struct smem_global_entry toc[SMEM_ITEM_COUNT];
}; };
...@@ -143,12 +143,12 @@ struct smem_header { ...@@ -143,12 +143,12 @@ struct smem_header {
* @reserved: reserved entries for later use * @reserved: reserved entries for later use
*/ */
struct smem_ptable_entry { struct smem_ptable_entry {
u32 offset; __le32 offset;
u32 size; __le32 size;
u32 flags; __le32 flags;
u16 host0; __le16 host0;
u16 host1; __le16 host1;
u32 reserved[8]; __le32 reserved[8];
}; };
/** /**
...@@ -160,13 +160,14 @@ struct smem_ptable_entry { ...@@ -160,13 +160,14 @@ struct smem_ptable_entry {
* @entry: list of @smem_ptable_entry for the @num_entries partitions * @entry: list of @smem_ptable_entry for the @num_entries partitions
*/ */
struct smem_ptable { struct smem_ptable {
u32 magic; u8 magic[4];
u32 version; __le32 version;
u32 num_entries; __le32 num_entries;
u32 reserved[5]; __le32 reserved[5];
struct smem_ptable_entry entry[]; struct smem_ptable_entry entry[];
}; };
#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */
static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
/** /**
* struct smem_partition_header - header of the partitions * struct smem_partition_header - header of the partitions
...@@ -181,15 +182,16 @@ struct smem_ptable { ...@@ -181,15 +182,16 @@ struct smem_ptable {
* @reserved: for now reserved entries * @reserved: for now reserved entries
*/ */
struct smem_partition_header { struct smem_partition_header {
u32 magic; u8 magic[4];
u16 host0; __le16 host0;
u16 host1; __le16 host1;
u32 size; __le32 size;
u32 offset_free_uncached; __le32 offset_free_uncached;
u32 offset_free_cached; __le32 offset_free_cached;
u32 reserved[3]; __le32 reserved[3];
}; };
#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */
static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
/** /**
* struct smem_private_entry - header of each item in the private partition * struct smem_private_entry - header of each item in the private partition
...@@ -201,12 +203,12 @@ struct smem_partition_header { ...@@ -201,12 +203,12 @@ struct smem_partition_header {
* @reserved: for now reserved entry * @reserved: for now reserved entry
*/ */
struct smem_private_entry { struct smem_private_entry {
u16 canary; u16 canary; /* bytes are the same so no swapping needed */
u16 item; __le16 item;
u32 size; /* includes padding bytes */ __le32 size; /* includes padding bytes */
u16 padding_data; __le16 padding_data;
u16 padding_hdr; __le16 padding_hdr;
u32 reserved; __le32 reserved;
}; };
#define SMEM_PRIVATE_CANARY 0xa5a5 #define SMEM_PRIVATE_CANARY 0xa5a5
...@@ -242,6 +244,45 @@ struct qcom_smem { ...@@ -242,6 +244,45 @@ struct qcom_smem {
struct smem_region regions[0]; struct smem_region regions[0];
}; };
static struct smem_private_entry *
phdr_to_last_private_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
return p + le32_to_cpu(phdr->offset_free_uncached);
}
static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
return p + le32_to_cpu(phdr->offset_free_cached);
}
static struct smem_private_entry *
phdr_to_first_private_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
return p + sizeof(*phdr);
}
static struct smem_private_entry *
private_entry_next(struct smem_private_entry *e)
{
void *p = e;
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
le32_to_cpu(e->size);
}
static void *entry_to_item(struct smem_private_entry *e)
{
void *p = e;
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
}
/* Pointer to the one and only smem handle */ /* Pointer to the one and only smem handle */
static struct qcom_smem *__smem; static struct qcom_smem *__smem;
...@@ -254,16 +295,16 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, ...@@ -254,16 +295,16 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
size_t size) size_t size)
{ {
struct smem_partition_header *phdr; struct smem_partition_header *phdr;
struct smem_private_entry *hdr; struct smem_private_entry *hdr, *end;
size_t alloc_size; size_t alloc_size;
void *p; void *cached;
phdr = smem->partitions[host]; phdr = smem->partitions[host];
hdr = phdr_to_first_private_entry(phdr);
end = phdr_to_last_private_entry(phdr);
cached = phdr_to_first_cached_entry(phdr);
p = (void *)phdr + sizeof(*phdr); while (hdr < end) {
while (p < (void *)phdr + phdr->offset_free_uncached) {
hdr = p;
if (hdr->canary != SMEM_PRIVATE_CANARY) { if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev, dev_err(smem->dev,
"Found invalid canary in host %d partition\n", "Found invalid canary in host %d partition\n",
...@@ -271,24 +312,23 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, ...@@ -271,24 +312,23 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
return -EINVAL; return -EINVAL;
} }
if (hdr->item == item) if (le16_to_cpu(hdr->item) == item)
return -EEXIST; return -EEXIST;
p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; hdr = private_entry_next(hdr);
} }
/* Check that we don't grow into the cached region */ /* Check that we don't grow into the cached region */
alloc_size = sizeof(*hdr) + ALIGN(size, 8); alloc_size = sizeof(*hdr) + ALIGN(size, 8);
if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) { if ((void *)hdr + alloc_size >= cached) {
dev_err(smem->dev, "Out of memory\n"); dev_err(smem->dev, "Out of memory\n");
return -ENOSPC; return -ENOSPC;
} }
hdr = p;
hdr->canary = SMEM_PRIVATE_CANARY; hdr->canary = SMEM_PRIVATE_CANARY;
hdr->item = item; hdr->item = cpu_to_le16(item);
hdr->size = ALIGN(size, 8); hdr->size = cpu_to_le32(ALIGN(size, 8));
hdr->padding_data = hdr->size - size; hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
hdr->padding_hdr = 0; hdr->padding_hdr = 0;
/* /*
...@@ -297,7 +337,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, ...@@ -297,7 +337,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
* gets a consistent view of the linked list. * gets a consistent view of the linked list.
*/ */
wmb(); wmb();
phdr->offset_free_uncached += alloc_size; le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
return 0; return 0;
} }
...@@ -318,11 +358,11 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, ...@@ -318,11 +358,11 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
return -EEXIST; return -EEXIST;
size = ALIGN(size, 8); size = ALIGN(size, 8);
if (WARN_ON(size > header->available)) if (WARN_ON(size > le32_to_cpu(header->available)))
return -ENOMEM; return -ENOMEM;
entry->offset = header->free_offset; entry->offset = header->free_offset;
entry->size = size; entry->size = cpu_to_le32(size);
/* /*
* Ensure the header is consistent before we mark the item allocated, * Ensure the header is consistent before we mark the item allocated,
...@@ -330,10 +370,10 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, ...@@ -330,10 +370,10 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
* even though they do not take the spinlock on read. * even though they do not take the spinlock on read.
*/ */
wmb(); wmb();
entry->allocated = 1; entry->allocated = cpu_to_le32(1);
header->free_offset += size; le32_add_cpu(&header->free_offset, size);
header->available -= size; le32_add_cpu(&header->available, -size);
return 0; return 0;
} }
...@@ -396,15 +436,15 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, ...@@ -396,15 +436,15 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
if (!entry->allocated) if (!entry->allocated)
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
aux_base = entry->aux_base & AUX_BASE_MASK; aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
for (i = 0; i < smem->num_regions; i++) { for (i = 0; i < smem->num_regions; i++) {
area = &smem->regions[i]; area = &smem->regions[i];
if (area->aux_base == aux_base || !aux_base) { if (area->aux_base == aux_base || !aux_base) {
if (size != NULL) if (size != NULL)
*size = entry->size; *size = le32_to_cpu(entry->size);
return area->virt_base + entry->offset; return area->virt_base + le32_to_cpu(entry->offset);
} }
} }
...@@ -417,30 +457,29 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, ...@@ -417,30 +457,29 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
size_t *size) size_t *size)
{ {
struct smem_partition_header *phdr; struct smem_partition_header *phdr;
struct smem_private_entry *hdr; struct smem_private_entry *e, *end;
void *p;
phdr = smem->partitions[host]; phdr = smem->partitions[host];
e = phdr_to_first_private_entry(phdr);
end = phdr_to_last_private_entry(phdr);
p = (void *)phdr + sizeof(*phdr); while (e < end) {
while (p < (void *)phdr + phdr->offset_free_uncached) { if (e->canary != SMEM_PRIVATE_CANARY) {
hdr = p;
if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev, dev_err(smem->dev,
"Found invalid canary in host %d partition\n", "Found invalid canary in host %d partition\n",
host); host);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (hdr->item == item) { if (le16_to_cpu(e->item) == item) {
if (size != NULL) if (size != NULL)
*size = hdr->size - hdr->padding_data; *size = le32_to_cpu(e->size) -
le16_to_cpu(e->padding_data);
return p + sizeof(*hdr) + hdr->padding_hdr; return entry_to_item(e);
} }
p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; e = private_entry_next(e);
} }
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
...@@ -500,10 +539,11 @@ int qcom_smem_get_free_space(unsigned host) ...@@ -500,10 +539,11 @@ int qcom_smem_get_free_space(unsigned host)
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
phdr = __smem->partitions[host]; phdr = __smem->partitions[host];
ret = phdr->offset_free_cached - phdr->offset_free_uncached; ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
} else { } else {
header = __smem->regions[0].virt_base; header = __smem->regions[0].virt_base;
ret = header->available; ret = le32_to_cpu(header->available);
} }
return ret; return ret;
...@@ -512,7 +552,7 @@ EXPORT_SYMBOL(qcom_smem_get_free_space); ...@@ -512,7 +552,7 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
static int qcom_smem_get_sbl_version(struct qcom_smem *smem) static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{ {
unsigned *versions; __le32 *versions;
size_t size; size_t size;
versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size); versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
...@@ -526,7 +566,7 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem) ...@@ -526,7 +566,7 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
return -EINVAL; return -EINVAL;
} }
return versions[SMEM_MASTER_SBL_VERSION_INDEX]; return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
} }
static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
...@@ -536,35 +576,38 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -536,35 +576,38 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
struct smem_ptable_entry *entry; struct smem_ptable_entry *entry;
struct smem_ptable *ptable; struct smem_ptable *ptable;
unsigned remote_host; unsigned remote_host;
u32 version, host0, host1;
int i; int i;
ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
if (ptable->magic != SMEM_PTABLE_MAGIC) if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
return 0; return 0;
if (ptable->version != 1) { version = le32_to_cpu(ptable->version);
if (version != 1) {
dev_err(smem->dev, dev_err(smem->dev,
"Unsupported partition header version %d\n", "Unsupported partition header version %d\n", version);
ptable->version);
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < ptable->num_entries; i++) { for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i]; entry = &ptable->entry[i];
host0 = le16_to_cpu(entry->host0);
host1 = le16_to_cpu(entry->host1);
if (entry->host0 != local_host && entry->host1 != local_host) if (host0 != local_host && host1 != local_host)
continue; continue;
if (!entry->offset) if (!le32_to_cpu(entry->offset))
continue; continue;
if (!entry->size) if (!le32_to_cpu(entry->size))
continue; continue;
if (entry->host0 == local_host) if (host0 == local_host)
remote_host = entry->host1; remote_host = host1;
else else
remote_host = entry->host0; remote_host = host0;
if (remote_host >= SMEM_HOST_COUNT) { if (remote_host >= SMEM_HOST_COUNT) {
dev_err(smem->dev, dev_err(smem->dev,
...@@ -580,21 +623,24 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -580,21 +623,24 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL; return -EINVAL;
} }
header = smem->regions[0].virt_base + entry->offset; header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
host0 = le16_to_cpu(header->host0);
host1 = le16_to_cpu(header->host1);
if (header->magic != SMEM_PART_MAGIC) { if (memcmp(header->magic, SMEM_PART_MAGIC,
sizeof(header->magic))) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d has invalid magic\n", i); "Partition %d has invalid magic\n", i);
return -EINVAL; return -EINVAL;
} }
if (header->host0 != local_host && header->host1 != local_host) { if (host0 != local_host && host1 != local_host) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d hosts are invalid\n", i); "Partition %d hosts are invalid\n", i);
return -EINVAL; return -EINVAL;
} }
if (header->host0 != remote_host && header->host1 != remote_host) { if (host0 != remote_host && host1 != remote_host) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d hosts are invalid\n", i); "Partition %d hosts are invalid\n", i);
return -EINVAL; return -EINVAL;
...@@ -606,7 +652,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -606,7 +652,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL; return -EINVAL;
} }
if (header->offset_free_uncached > header->size) { if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d has invalid free pointer\n", i); "Partition %d has invalid free pointer\n", i);
return -EINVAL; return -EINVAL;
...@@ -690,7 +736,8 @@ static int qcom_smem_probe(struct platform_device *pdev) ...@@ -690,7 +736,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
} }
header = smem->regions[0].virt_base; header = smem->regions[0].virt_base;
if (header->initialized != 1 || header->reserved) { if (le32_to_cpu(header->initialized) != 1 ||
le32_to_cpu(header->reserved)) {
dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment