Commit ead67421 authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'qcom-soc-for-4.4' of git://codeaurora.org/quic/kernel/agross-msm into next/drivers

Pull "Qualcomm ARM Based SoC Updates for 4.4" from Andy Gross:

* Implement id_table driver matching in SMD
* Avoid NULL pointer exception on remove of SMEM
* Reorder SMEM/SMD configs
* Make qcom_smem_get() return a pointer
* Handle big endian CPUs correctly in SMEM
* Represent SMD channel layout in structures
* Use __iowrite32_copy() in SMD
* Remove use of VLAIs in SMD
* Handle big endian CPUs correctly in SMD/RPM
* Handle big endian CPUs corretly in SMD
* Reject sending SMD packets that are too large
* Fix endianness issue in SCM __qcom_scm_is_call_available
* Add missing prototype for qcom_scm_is_available()
* Correct SMEM items for upper channels
* Use architecture level to build SCM correctly
* Delete unneeded of_node_put in SMD
* Correct active/slep state flagging in SMD/RPM
* Move RPM message ram out of SMEM DT node

* tag 'qcom-soc-for-4.4' of git://codeaurora.org/quic/kernel/agross-msm:
  soc: qcom: smem: Move RPM message ram out of smem DT node
  soc: qcom: smd-rpm: Correct the active vs sleep state flagging
  soc: qcom: smd: delete unneeded of_node_put
  firmware: qcom-scm: build for correct architecture level
  soc: qcom: smd: Correct SMEM items for upper channels
  qcom-scm: add missing prototype for qcom_scm_is_available()
  qcom-scm: fix endianess issue in __qcom_scm_is_call_available
  soc: qcom: smd: Reject send of too big packets
  soc: qcom: smd: Handle big endian CPUs
  soc: qcom: smd_rpm: Handle big endian CPUs
  soc: qcom: smd: Remove use of VLAIS
  soc: qcom: smd: Use __iowrite32_copy() instead of open-coding it
  soc: qcom: smd: Represent channel layout in structures
  soc: qcom: smem: Handle big endian CPUs
  soc: qcom: Make qcom_smem_get() return a pointer
  soc: qcom: Reorder SMEM/SMD configs
  soc: qcom: smem: Avoid NULL pointer exception on remove
  soc: qcom: smd: Implement id_table driver matching
parents 41e602e8 d0bfd7c9
...@@ -100,6 +100,15 @@ timer { ...@@ -100,6 +100,15 @@ timer {
clock-frequency = <19200000>; clock-frequency = <19200000>;
}; };
smem {
compatible = "qcom,smem";
memory-region = <&smem_region>;
qcom,rpm-msg-ram = <&rpm_msg_ram>;
hwlocks = <&tcsr_mutex 3>;
};
soc: soc { soc: soc {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
...@@ -250,13 +259,9 @@ tcsr_mutex: tcsr-mutex { ...@@ -250,13 +259,9 @@ tcsr_mutex: tcsr-mutex {
#hwlock-cells = <1>; #hwlock-cells = <1>;
}; };
smem@fa00000 { rpm_msg_ram: memory@fc428000 {
compatible = "qcom,smem"; compatible = "qcom,rpm-msg-ram";
memory-region = <&smem_region>;
reg = <0xfc428000 0x4000>; reg = <0xfc428000 0x4000>;
hwlocks = <&tcsr_mutex 3>;
}; };
blsp1_uart2: serial@f991e000 { blsp1_uart2: serial@f991e000 {
......
...@@ -16,7 +16,7 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o ...@@ -16,7 +16,7 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-y += broadcom/ obj-y += broadcom/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
......
...@@ -480,15 +480,15 @@ void __qcom_scm_cpu_power_down(u32 flags) ...@@ -480,15 +480,15 @@ void __qcom_scm_cpu_power_down(u32 flags)
int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
{ {
int ret; int ret;
u32 svc_cmd = (svc_id << 10) | cmd_id; __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
u32 ret_val = 0; __le32 ret_val = 0;
ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd, ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd,
sizeof(svc_cmd), &ret_val, sizeof(ret_val)); sizeof(svc_cmd), &ret_val, sizeof(ret_val));
if (ret) if (ret)
return ret; return ret;
return ret_val; return le32_to_cpu(ret_val);
} }
int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
......
...@@ -19,6 +19,14 @@ config QCOM_PM ...@@ -19,6 +19,14 @@ config QCOM_PM
modes. It interface with various system drivers to put the cores in modes. It interface with various system drivers to put the cores in
low power modes. low power modes.
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM
help
Say y here to enable support for the Qualcomm Shared Memory Manager.
The driver provides an interface to items in a heap shared among all
processors in a Qualcomm platform.
config QCOM_SMD config QCOM_SMD
tristate "Qualcomm Shared Memory Driver (SMD)" tristate "Qualcomm Shared Memory Driver (SMD)"
depends on QCOM_SMEM depends on QCOM_SMEM
...@@ -40,11 +48,3 @@ config QCOM_SMD_RPM ...@@ -40,11 +48,3 @@ config QCOM_SMD_RPM
Say M here if you want to include support for the Qualcomm RPM as a Say M here if you want to include support for the Qualcomm RPM as a
module. This will build a module called "qcom-smd-rpm". module. This will build a module called "qcom-smd-rpm".
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM
help
Say y here to enable support for the Qualcomm Shared Memory Manager.
The driver provides an interface to items in a heap shared among all
processors in a Qualcomm platform.
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smd.h> #include <linux/soc/qcom/smd.h>
#include <linux/soc/qcom/smd-rpm.h> #include <linux/soc/qcom/smd-rpm.h>
...@@ -44,8 +45,8 @@ struct qcom_smd_rpm { ...@@ -44,8 +45,8 @@ struct qcom_smd_rpm {
* @length: length of the payload * @length: length of the payload
*/ */
struct qcom_rpm_header { struct qcom_rpm_header {
u32 service_type; __le32 service_type;
u32 length; __le32 length;
}; };
/** /**
...@@ -57,11 +58,11 @@ struct qcom_rpm_header { ...@@ -57,11 +58,11 @@ struct qcom_rpm_header {
* @data_len: length of the payload following this header * @data_len: length of the payload following this header
*/ */
struct qcom_rpm_request { struct qcom_rpm_request {
u32 msg_id; __le32 msg_id;
u32 flags; __le32 flags;
u32 type; __le32 type;
u32 id; __le32 id;
u32 data_len; __le32 data_len;
}; };
/** /**
...@@ -74,10 +75,10 @@ struct qcom_rpm_request { ...@@ -74,10 +75,10 @@ struct qcom_rpm_request {
* Multiple of these messages can be stacked in an rpm message. * Multiple of these messages can be stacked in an rpm message.
*/ */
struct qcom_rpm_message { struct qcom_rpm_message {
u32 msg_type; __le32 msg_type;
u32 length; __le32 length;
union { union {
u32 msg_id; __le32 msg_id;
u8 message[0]; u8 message[0];
}; };
}; };
...@@ -104,30 +105,34 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, ...@@ -104,30 +105,34 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
static unsigned msg_id = 1; static unsigned msg_id = 1;
int left; int left;
int ret; int ret;
struct { struct {
struct qcom_rpm_header hdr; struct qcom_rpm_header hdr;
struct qcom_rpm_request req; struct qcom_rpm_request req;
u8 payload[count]; u8 payload[];
} pkt; } *pkt;
size_t size = sizeof(*pkt) + count;
/* SMD packets to the RPM may not exceed 256 bytes */ /* SMD packets to the RPM may not exceed 256 bytes */
if (WARN_ON(sizeof(pkt) >= 256)) if (WARN_ON(size >= 256))
return -EINVAL; return -EINVAL;
pkt = kmalloc(size, GFP_KERNEL);
if (!pkt)
return -ENOMEM;
mutex_lock(&rpm->lock); mutex_lock(&rpm->lock);
pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST; pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
pkt.hdr.length = sizeof(struct qcom_rpm_request) + count; pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
pkt.req.msg_id = msg_id++; pkt->req.msg_id = cpu_to_le32(msg_id++);
pkt.req.flags = BIT(state); pkt->req.flags = cpu_to_le32(state);
pkt.req.type = type; pkt->req.type = cpu_to_le32(type);
pkt.req.id = id; pkt->req.id = cpu_to_le32(id);
pkt.req.data_len = count; pkt->req.data_len = cpu_to_le32(count);
memcpy(pkt.payload, buf, count); memcpy(pkt->payload, buf, count);
ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt)); ret = qcom_smd_send(rpm->rpm_channel, pkt, sizeof(*pkt));
if (ret) if (ret)
goto out; goto out;
...@@ -138,6 +143,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, ...@@ -138,6 +143,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
ret = rpm->ack_status; ret = rpm->ack_status;
out: out:
kfree(pkt);
mutex_unlock(&rpm->lock); mutex_unlock(&rpm->lock);
return ret; return ret;
} }
...@@ -148,27 +154,29 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, ...@@ -148,27 +154,29 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
size_t count) size_t count)
{ {
const struct qcom_rpm_header *hdr = data; const struct qcom_rpm_header *hdr = data;
size_t hdr_length = le32_to_cpu(hdr->length);
const struct qcom_rpm_message *msg; const struct qcom_rpm_message *msg;
struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev); struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
const u8 *buf = data + sizeof(struct qcom_rpm_header); const u8 *buf = data + sizeof(struct qcom_rpm_header);
const u8 *end = buf + hdr->length; const u8 *end = buf + hdr_length;
char msgbuf[32]; char msgbuf[32];
int status = 0; int status = 0;
u32 len; u32 len, msg_length;
if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST || if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
hdr->length < sizeof(struct qcom_rpm_message)) { hdr_length < sizeof(struct qcom_rpm_message)) {
dev_err(&qsdev->dev, "invalid request\n"); dev_err(&qsdev->dev, "invalid request\n");
return 0; return 0;
} }
while (buf < end) { while (buf < end) {
msg = (struct qcom_rpm_message *)buf; msg = (struct qcom_rpm_message *)buf;
switch (msg->msg_type) { msg_length = le32_to_cpu(msg->length);
switch (le32_to_cpu(msg->msg_type)) {
case RPM_MSG_TYPE_MSG_ID: case RPM_MSG_TYPE_MSG_ID:
break; break;
case RPM_MSG_TYPE_ERR: case RPM_MSG_TYPE_ERR:
len = min_t(u32, ALIGN(msg->length, 4), sizeof(msgbuf)); len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
memcpy_fromio(msgbuf, msg->message, len); memcpy_fromio(msgbuf, msg->message, len);
msgbuf[len - 1] = 0; msgbuf[len - 1] = 0;
...@@ -179,7 +187,7 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, ...@@ -179,7 +187,7 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
break; break;
} }
buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4); buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
} }
rpm->ack_status = status; rpm->ack_status = status;
......
...@@ -65,7 +65,9 @@ ...@@ -65,7 +65,9 @@
*/ */
struct smd_channel_info; struct smd_channel_info;
struct smd_channel_info_pair;
struct smd_channel_info_word; struct smd_channel_info_word;
struct smd_channel_info_word_pair;
#define SMD_ALLOC_TBL_COUNT 2 #define SMD_ALLOC_TBL_COUNT 2
#define SMD_ALLOC_TBL_SIZE 64 #define SMD_ALLOC_TBL_SIZE 64
...@@ -85,8 +87,8 @@ static const struct { ...@@ -85,8 +87,8 @@ static const struct {
.fifo_base_id = 338 .fifo_base_id = 338
}, },
{ {
.alloc_tbl_id = 14, .alloc_tbl_id = 266,
.info_base_id = 266, .info_base_id = 138,
.fifo_base_id = 202, .fifo_base_id = 202,
}, },
}; };
...@@ -151,10 +153,8 @@ enum smd_channel_state { ...@@ -151,10 +153,8 @@ enum smd_channel_state {
* @name: name of the channel * @name: name of the channel
* @state: local state of the channel * @state: local state of the channel
* @remote_state: remote state of the channel * @remote_state: remote state of the channel
* @tx_info: byte aligned outgoing channel info * @info: byte aligned outgoing/incoming channel info
* @rx_info: byte aligned incoming channel info * @info_word: word aligned outgoing/incoming channel info
* @tx_info_word: word aligned outgoing channel info
* @rx_info_word: word aligned incoming channel info
* @tx_lock: lock to make writes to the channel mutually exclusive * @tx_lock: lock to make writes to the channel mutually exclusive
* @fblockread_event: wakeup event tied to tx fBLOCKREADINTR * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
* @tx_fifo: pointer to the outgoing ring buffer * @tx_fifo: pointer to the outgoing ring buffer
...@@ -175,11 +175,8 @@ struct qcom_smd_channel { ...@@ -175,11 +175,8 @@ struct qcom_smd_channel {
enum smd_channel_state state; enum smd_channel_state state;
enum smd_channel_state remote_state; enum smd_channel_state remote_state;
struct smd_channel_info *tx_info; struct smd_channel_info_pair *info;
struct smd_channel_info *rx_info; struct smd_channel_info_word_pair *info_word;
struct smd_channel_info_word *tx_info_word;
struct smd_channel_info_word *rx_info_word;
struct mutex tx_lock; struct mutex tx_lock;
wait_queue_head_t fblockread_event; wait_queue_head_t fblockread_event;
...@@ -215,7 +212,7 @@ struct qcom_smd { ...@@ -215,7 +212,7 @@ struct qcom_smd {
* Format of the smd_info smem items, for byte aligned channels. * Format of the smd_info smem items, for byte aligned channels.
*/ */
struct smd_channel_info { struct smd_channel_info {
u32 state; __le32 state;
u8 fDSR; u8 fDSR;
u8 fCTS; u8 fCTS;
u8 fCD; u8 fCD;
...@@ -224,46 +221,104 @@ struct smd_channel_info { ...@@ -224,46 +221,104 @@ struct smd_channel_info {
u8 fTAIL; u8 fTAIL;
u8 fSTATE; u8 fSTATE;
u8 fBLOCKREADINTR; u8 fBLOCKREADINTR;
u32 tail; __le32 tail;
u32 head; __le32 head;
};
struct smd_channel_info_pair {
struct smd_channel_info tx;
struct smd_channel_info rx;
}; };
/* /*
* Format of the smd_info smem items, for word aligned channels. * Format of the smd_info smem items, for word aligned channels.
*/ */
struct smd_channel_info_word { struct smd_channel_info_word {
u32 state; __le32 state;
u32 fDSR; __le32 fDSR;
u32 fCTS; __le32 fCTS;
u32 fCD; __le32 fCD;
u32 fRI; __le32 fRI;
u32 fHEAD; __le32 fHEAD;
u32 fTAIL; __le32 fTAIL;
u32 fSTATE; __le32 fSTATE;
u32 fBLOCKREADINTR; __le32 fBLOCKREADINTR;
u32 tail; __le32 tail;
u32 head; __le32 head;
}; };
#define GET_RX_CHANNEL_INFO(channel, param) \ struct smd_channel_info_word_pair {
(channel->rx_info_word ? \ struct smd_channel_info_word tx;
channel->rx_info_word->param : \ struct smd_channel_info_word rx;
channel->rx_info->param) };
#define SET_RX_CHANNEL_INFO(channel, param, value) \
(channel->rx_info_word ? \
(channel->rx_info_word->param = value) : \
(channel->rx_info->param = value))
#define GET_TX_CHANNEL_INFO(channel, param) \
(channel->tx_info_word ? \
channel->tx_info_word->param : \
channel->tx_info->param)
#define SET_TX_CHANNEL_INFO(channel, param, value) \ #define GET_RX_CHANNEL_FLAG(channel, param) \
(channel->tx_info_word ? \ ({ \
(channel->tx_info_word->param = value) : \ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
(channel->tx_info->param = value)) channel->info_word ? \
le32_to_cpu(channel->info_word->rx.param) : \
channel->info->rx.param; \
})
#define GET_RX_CHANNEL_INFO(channel, param) \
({ \
BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
le32_to_cpu(channel->info_word ? \
channel->info_word->rx.param : \
channel->info->rx.param); \
})
#define SET_RX_CHANNEL_FLAG(channel, param, value) \
({ \
BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
if (channel->info_word) \
channel->info_word->rx.param = cpu_to_le32(value); \
else \
channel->info->rx.param = value; \
})
#define SET_RX_CHANNEL_INFO(channel, param, value) \
({ \
BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
if (channel->info_word) \
channel->info_word->rx.param = cpu_to_le32(value); \
else \
channel->info->rx.param = cpu_to_le32(value); \
})
#define GET_TX_CHANNEL_FLAG(channel, param) \
({ \
BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
channel->info_word ? \
le32_to_cpu(channel->info_word->tx.param) : \
channel->info->tx.param; \
})
#define GET_TX_CHANNEL_INFO(channel, param) \
({ \
BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
le32_to_cpu(channel->info_word ? \
channel->info_word->tx.param : \
channel->info->tx.param); \
})
#define SET_TX_CHANNEL_FLAG(channel, param, value) \
({ \
BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
if (channel->info_word) \
channel->info_word->tx.param = cpu_to_le32(value); \
else \
channel->info->tx.param = value; \
})
#define SET_TX_CHANNEL_INFO(channel, param, value) \
({ \
BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
if (channel->info_word) \
channel->info_word->tx.param = cpu_to_le32(value); \
else \
channel->info->tx.param = cpu_to_le32(value); \
})
/** /**
* struct qcom_smd_alloc_entry - channel allocation entry * struct qcom_smd_alloc_entry - channel allocation entry
...@@ -274,9 +329,9 @@ struct smd_channel_info_word { ...@@ -274,9 +329,9 @@ struct smd_channel_info_word {
*/ */
struct qcom_smd_alloc_entry { struct qcom_smd_alloc_entry {
u8 name[20]; u8 name[20];
u32 cid; __le32 cid;
u32 flags; __le32 flags;
u32 ref_count; __le32 ref_count;
} __packed; } __packed;
#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff #define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
...@@ -305,14 +360,14 @@ static void qcom_smd_signal_channel(struct qcom_smd_channel *channel) ...@@ -305,14 +360,14 @@ static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
{ {
SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
SET_TX_CHANNEL_INFO(channel, fDSR, 0); SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
SET_TX_CHANNEL_INFO(channel, fCTS, 0); SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
SET_TX_CHANNEL_INFO(channel, fCD, 0); SET_TX_CHANNEL_FLAG(channel, fCD, 0);
SET_TX_CHANNEL_INFO(channel, fRI, 0); SET_TX_CHANNEL_FLAG(channel, fRI, 0);
SET_TX_CHANNEL_INFO(channel, fHEAD, 0); SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
SET_TX_CHANNEL_INFO(channel, fTAIL, 0); SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
SET_TX_CHANNEL_INFO(channel, fSTATE, 1); SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
SET_TX_CHANNEL_INFO(channel, head, 0); SET_TX_CHANNEL_INFO(channel, head, 0);
SET_TX_CHANNEL_INFO(channel, tail, 0); SET_TX_CHANNEL_INFO(channel, tail, 0);
...@@ -350,12 +405,12 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, ...@@ -350,12 +405,12 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state); dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
SET_TX_CHANNEL_INFO(channel, fDSR, is_open); SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
SET_TX_CHANNEL_INFO(channel, fCTS, is_open); SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
SET_TX_CHANNEL_INFO(channel, fCD, is_open); SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
SET_TX_CHANNEL_INFO(channel, state, state); SET_TX_CHANNEL_INFO(channel, state, state);
SET_TX_CHANNEL_INFO(channel, fSTATE, 1); SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
channel->state = state; channel->state = state;
qcom_smd_signal_channel(channel); qcom_smd_signal_channel(channel);
...@@ -364,20 +419,15 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, ...@@ -364,20 +419,15 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
/* /*
* Copy count bytes of data using 32bit accesses, if that's required. * Copy count bytes of data using 32bit accesses, if that's required.
*/ */
static void smd_copy_to_fifo(void __iomem *_dst, static void smd_copy_to_fifo(void __iomem *dst,
const void *_src, const void *src,
size_t count, size_t count,
bool word_aligned) bool word_aligned)
{ {
u32 *dst = (u32 *)_dst;
u32 *src = (u32 *)_src;
if (word_aligned) { if (word_aligned) {
count /= sizeof(u32); __iowrite32_copy(dst, src, count / sizeof(u32));
while (count--)
writel_relaxed(*src++, dst++);
} else { } else {
memcpy_toio(_dst, _src, count); memcpy_toio(dst, src, count);
} }
} }
...@@ -395,7 +445,7 @@ static void smd_copy_from_fifo(void *_dst, ...@@ -395,7 +445,7 @@ static void smd_copy_from_fifo(void *_dst,
if (word_aligned) { if (word_aligned) {
count /= sizeof(u32); count /= sizeof(u32);
while (count--) while (count--)
*dst++ = readl_relaxed(src++); *dst++ = __raw_readl(src++);
} else { } else {
memcpy_fromio(_dst, _src, count); memcpy_fromio(_dst, _src, count);
} }
...@@ -412,7 +462,7 @@ static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel, ...@@ -412,7 +462,7 @@ static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
unsigned tail; unsigned tail;
size_t len; size_t len;
word_aligned = channel->rx_info_word != NULL; word_aligned = channel->info_word;
tail = GET_RX_CHANNEL_INFO(channel, tail); tail = GET_RX_CHANNEL_INFO(channel, tail);
len = min_t(size_t, count, channel->fifo_size - tail); len = min_t(size_t, count, channel->fifo_size - tail);
...@@ -491,7 +541,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) ...@@ -491,7 +541,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
{ {
bool need_state_scan = false; bool need_state_scan = false;
int remote_state; int remote_state;
u32 pktlen; __le32 pktlen;
int avail; int avail;
int ret; int ret;
...@@ -502,10 +552,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) ...@@ -502,10 +552,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
need_state_scan = true; need_state_scan = true;
} }
/* Indicate that we have seen any state change */ /* Indicate that we have seen any state change */
SET_RX_CHANNEL_INFO(channel, fSTATE, 0); SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
/* Signal waiting qcom_smd_send() about the interrupt */ /* Signal waiting qcom_smd_send() about the interrupt */
if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR)) if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
wake_up_interruptible(&channel->fblockread_event); wake_up_interruptible(&channel->fblockread_event);
/* Don't consume any data until we've opened the channel */ /* Don't consume any data until we've opened the channel */
...@@ -513,7 +563,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) ...@@ -513,7 +563,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
goto out; goto out;
/* Indicate that we've seen the new data */ /* Indicate that we've seen the new data */
SET_RX_CHANNEL_INFO(channel, fHEAD, 0); SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
/* Consume data */ /* Consume data */
for (;;) { for (;;) {
...@@ -522,7 +572,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) ...@@ -522,7 +572,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
channel->pkt_size = pktlen; channel->pkt_size = le32_to_cpu(pktlen);
} else if (channel->pkt_size && avail >= channel->pkt_size) { } else if (channel->pkt_size && avail >= channel->pkt_size) {
ret = qcom_smd_channel_recv_single(channel); ret = qcom_smd_channel_recv_single(channel);
if (ret) if (ret)
...@@ -533,10 +583,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) ...@@ -533,10 +583,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
} }
/* Indicate that we have seen and updated tail */ /* Indicate that we have seen and updated tail */
SET_RX_CHANNEL_INFO(channel, fTAIL, 1); SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
/* Signal the remote that we've consumed the data (if requested) */ /* Signal the remote that we've consumed the data (if requested) */
if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) { if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
/* Ensure ordering of channel info updates */ /* Ensure ordering of channel info updates */
wmb(); wmb();
...@@ -627,7 +677,7 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, ...@@ -627,7 +677,7 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
unsigned head; unsigned head;
size_t len; size_t len;
word_aligned = channel->tx_info_word != NULL; word_aligned = channel->info_word;
head = GET_TX_CHANNEL_INFO(channel, head); head = GET_TX_CHANNEL_INFO(channel, head);
len = min_t(size_t, count, channel->fifo_size - head); len = min_t(size_t, count, channel->fifo_size - head);
...@@ -665,12 +715,16 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, ...@@ -665,12 +715,16 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
*/ */
int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
{ {
u32 hdr[5] = {len,}; __le32 hdr[5] = { cpu_to_le32(len), };
int tlen = sizeof(hdr) + len; int tlen = sizeof(hdr) + len;
int ret; int ret;
/* Word aligned channels only accept word size aligned data */ /* Word aligned channels only accept word size aligned data */
if (channel->rx_info_word != NULL && len % 4) if (channel->info_word && len % 4)
return -EINVAL;
/* Reject packets that are too big */
if (tlen >= channel->fifo_size)
return -EINVAL; return -EINVAL;
ret = mutex_lock_interruptible(&channel->tx_lock); ret = mutex_lock_interruptible(&channel->tx_lock);
...@@ -683,7 +737,7 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) ...@@ -683,7 +737,7 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
goto out; goto out;
} }
SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0); SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
ret = wait_event_interruptible(channel->fblockread_event, ret = wait_event_interruptible(channel->fblockread_event,
qcom_smd_get_tx_avail(channel) >= tlen || qcom_smd_get_tx_avail(channel) >= tlen ||
...@@ -691,15 +745,15 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) ...@@ -691,15 +745,15 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
if (ret) if (ret)
goto out; goto out;
SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
} }
SET_TX_CHANNEL_INFO(channel, fTAIL, 0); SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
qcom_smd_write_fifo(channel, data, len); qcom_smd_write_fifo(channel, data, len);
SET_TX_CHANNEL_INFO(channel, fHEAD, 1); SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
/* Ensure ordering of channel info updates */ /* Ensure ordering of channel info updates */
wmb(); wmb();
...@@ -727,6 +781,19 @@ static struct qcom_smd_driver *to_smd_driver(struct device *dev) ...@@ -727,6 +781,19 @@ static struct qcom_smd_driver *to_smd_driver(struct device *dev)
static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
{ {
struct qcom_smd_device *qsdev = to_smd_device(dev);
struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
const struct qcom_smd_id *match = qsdrv->smd_match_table;
const char *name = qsdev->channel->name;
if (match) {
while (match->name[0]) {
if (!strcmp(match->name, name))
return 1;
match++;
}
}
return of_driver_match_device(dev, drv); return of_driver_match_device(dev, drv);
} }
...@@ -854,10 +921,8 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, ...@@ -854,10 +921,8 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
for_each_available_child_of_node(edge_node, child) { for_each_available_child_of_node(edge_node, child) {
key = "qcom,smd-channels"; key = "qcom,smd-channels";
ret = of_property_read_string(child, key, &name); ret = of_property_read_string(child, key, &name);
if (ret) { if (ret)
of_node_put(child);
continue; continue;
}
if (strcmp(name, channel) == 0) if (strcmp(name, channel) == 0)
return child; return child;
...@@ -880,19 +945,17 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) ...@@ -880,19 +945,17 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
if (channel->qsdev) if (channel->qsdev)
return -EEXIST; return -EEXIST;
node = qcom_smd_match_channel(edge->of_node, channel->name);
if (!node) {
dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
return -ENXIO;
}
dev_dbg(smd->dev, "registering '%s'\n", channel->name); dev_dbg(smd->dev, "registering '%s'\n", channel->name);
qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
if (!qsdev) if (!qsdev)
return -ENOMEM; return -ENOMEM;
dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name); node = qcom_smd_match_channel(edge->of_node, channel->name);
dev_set_name(&qsdev->dev, "%s.%s",
edge->of_node->name,
node ? node->name : channel->name);
qsdev->dev.parent = smd->dev; qsdev->dev.parent = smd->dev;
qsdev->dev.bus = &qcom_smd_bus; qsdev->dev.bus = &qcom_smd_bus;
qsdev->dev.release = qcom_smd_release_device; qsdev->dev.release = qcom_smd_release_device;
...@@ -978,21 +1041,20 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed ...@@ -978,21 +1041,20 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
spin_lock_init(&channel->recv_lock); spin_lock_init(&channel->recv_lock);
init_waitqueue_head(&channel->fblockread_event); init_waitqueue_head(&channel->fblockread_event);
ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info, info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
&info_size); if (IS_ERR(info)) {
if (ret) ret = PTR_ERR(info);
goto free_name_and_channel; goto free_name_and_channel;
}
/* /*
* Use the size of the item to figure out which channel info struct to * Use the size of the item to figure out which channel info struct to
* use. * use.
*/ */
if (info_size == 2 * sizeof(struct smd_channel_info_word)) { if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
channel->tx_info_word = info; channel->info_word = info;
channel->rx_info_word = info + sizeof(struct smd_channel_info_word);
} else if (info_size == 2 * sizeof(struct smd_channel_info)) { } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
channel->tx_info = info; channel->info = info;
channel->rx_info = info + sizeof(struct smd_channel_info);
} else { } else {
dev_err(smd->dev, dev_err(smd->dev,
"channel info of size %zu not supported\n", info_size); "channel info of size %zu not supported\n", info_size);
...@@ -1000,10 +1062,11 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed ...@@ -1000,10 +1062,11 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
goto free_name_and_channel; goto free_name_and_channel;
} }
ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base, fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
&fifo_size); if (IS_ERR(fifo_base)) {
if (ret) ret = PTR_ERR(fifo_base);
goto free_name_and_channel; goto free_name_and_channel;
}
/* The channel consist of a rx and tx fifo of equal size */ /* The channel consist of a rx and tx fifo of equal size */
fifo_size /= 2; fifo_size /= 2;
...@@ -1040,20 +1103,19 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge) ...@@ -1040,20 +1103,19 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
unsigned long flags; unsigned long flags;
unsigned fifo_id; unsigned fifo_id;
unsigned info_id; unsigned info_id;
int ret;
int tbl; int tbl;
int i; int i;
u32 eflags, cid;
for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
ret = qcom_smem_get(edge->remote_pid, alloc_tbl = qcom_smem_get(edge->remote_pid,
smem_items[tbl].alloc_tbl_id, smem_items[tbl].alloc_tbl_id, NULL);
(void **)&alloc_tbl, if (IS_ERR(alloc_tbl))
NULL);
if (ret < 0)
continue; continue;
for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
entry = &alloc_tbl[i]; entry = &alloc_tbl[i];
eflags = le32_to_cpu(entry->flags);
if (test_bit(i, edge->allocated[tbl])) if (test_bit(i, edge->allocated[tbl]))
continue; continue;
...@@ -1063,14 +1125,15 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge) ...@@ -1063,14 +1125,15 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
if (!entry->name[0]) if (!entry->name[0])
continue; continue;
if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET)) if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
continue; continue;
if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
continue; continue;
info_id = smem_items[tbl].info_base_id + entry->cid; cid = le32_to_cpu(entry->cid);
fifo_id = smem_items[tbl].fifo_base_id + entry->cid; info_id = smem_items[tbl].info_base_id + cid;
fifo_id = smem_items[tbl].fifo_base_id + cid;
channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
if (IS_ERR(channel)) if (IS_ERR(channel))
...@@ -1227,11 +1290,12 @@ static int qcom_smd_probe(struct platform_device *pdev) ...@@ -1227,11 +1290,12 @@ static int qcom_smd_probe(struct platform_device *pdev)
int num_edges; int num_edges;
int ret; int ret;
int i = 0; int i = 0;
void *p;
/* Wait for smem */ /* Wait for smem */
ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL); p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
if (ret == -EPROBE_DEFER) if (PTR_ERR(p) == -EPROBE_DEFER)
return ret; return PTR_ERR(p);
num_edges = of_get_available_child_count(pdev->dev.of_node); num_edges = of_get_available_child_count(pdev->dev.of_node);
array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge); array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
......
...@@ -92,9 +92,9 @@ ...@@ -92,9 +92,9 @@
* @params: parameters to the command * @params: parameters to the command
*/ */
struct smem_proc_comm { struct smem_proc_comm {
u32 command; __le32 command;
u32 status; __le32 status;
u32 params[2]; __le32 params[2];
}; };
/** /**
...@@ -106,10 +106,10 @@ struct smem_proc_comm { ...@@ -106,10 +106,10 @@ struct smem_proc_comm {
* the default region. bits 0,1 are reserved * the default region. bits 0,1 are reserved
*/ */
struct smem_global_entry { struct smem_global_entry {
u32 allocated; __le32 allocated;
u32 offset; __le32 offset;
u32 size; __le32 size;
u32 aux_base; /* bits 1:0 reserved */ __le32 aux_base; /* bits 1:0 reserved */
}; };
#define AUX_BASE_MASK 0xfffffffc #define AUX_BASE_MASK 0xfffffffc
...@@ -125,11 +125,11 @@ struct smem_global_entry { ...@@ -125,11 +125,11 @@ struct smem_global_entry {
*/ */
struct smem_header { struct smem_header {
struct smem_proc_comm proc_comm[4]; struct smem_proc_comm proc_comm[4];
u32 version[32]; __le32 version[32];
u32 initialized; __le32 initialized;
u32 free_offset; __le32 free_offset;
u32 available; __le32 available;
u32 reserved; __le32 reserved;
struct smem_global_entry toc[SMEM_ITEM_COUNT]; struct smem_global_entry toc[SMEM_ITEM_COUNT];
}; };
...@@ -143,12 +143,12 @@ struct smem_header { ...@@ -143,12 +143,12 @@ struct smem_header {
* @reserved: reserved entries for later use * @reserved: reserved entries for later use
*/ */
struct smem_ptable_entry { struct smem_ptable_entry {
u32 offset; __le32 offset;
u32 size; __le32 size;
u32 flags; __le32 flags;
u16 host0; __le16 host0;
u16 host1; __le16 host1;
u32 reserved[8]; __le32 reserved[8];
}; };
/** /**
...@@ -160,13 +160,14 @@ struct smem_ptable_entry { ...@@ -160,13 +160,14 @@ struct smem_ptable_entry {
* @entry: list of @smem_ptable_entry for the @num_entries partitions * @entry: list of @smem_ptable_entry for the @num_entries partitions
*/ */
struct smem_ptable { struct smem_ptable {
u32 magic; u8 magic[4];
u32 version; __le32 version;
u32 num_entries; __le32 num_entries;
u32 reserved[5]; __le32 reserved[5];
struct smem_ptable_entry entry[]; struct smem_ptable_entry entry[];
}; };
#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */
static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
/** /**
* struct smem_partition_header - header of the partitions * struct smem_partition_header - header of the partitions
...@@ -181,15 +182,16 @@ struct smem_ptable { ...@@ -181,15 +182,16 @@ struct smem_ptable {
* @reserved: for now reserved entries * @reserved: for now reserved entries
*/ */
struct smem_partition_header { struct smem_partition_header {
u32 magic; u8 magic[4];
u16 host0; __le16 host0;
u16 host1; __le16 host1;
u32 size; __le32 size;
u32 offset_free_uncached; __le32 offset_free_uncached;
u32 offset_free_cached; __le32 offset_free_cached;
u32 reserved[3]; __le32 reserved[3];
}; };
#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */
static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
/** /**
* struct smem_private_entry - header of each item in the private partition * struct smem_private_entry - header of each item in the private partition
...@@ -201,12 +203,12 @@ struct smem_partition_header { ...@@ -201,12 +203,12 @@ struct smem_partition_header {
* @reserved: for now reserved entry * @reserved: for now reserved entry
*/ */
struct smem_private_entry { struct smem_private_entry {
u16 canary; u16 canary; /* bytes are the same so no swapping needed */
u16 item; __le16 item;
u32 size; /* includes padding bytes */ __le32 size; /* includes padding bytes */
u16 padding_data; __le16 padding_data;
u16 padding_hdr; __le16 padding_hdr;
u32 reserved; __le32 reserved;
}; };
#define SMEM_PRIVATE_CANARY 0xa5a5 #define SMEM_PRIVATE_CANARY 0xa5a5
...@@ -242,6 +244,45 @@ struct qcom_smem { ...@@ -242,6 +244,45 @@ struct qcom_smem {
struct smem_region regions[0]; struct smem_region regions[0];
}; };
static struct smem_private_entry *
phdr_to_last_private_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
return p + le32_to_cpu(phdr->offset_free_uncached);
}
static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
return p + le32_to_cpu(phdr->offset_free_cached);
}
static struct smem_private_entry *
phdr_to_first_private_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
return p + sizeof(*phdr);
}
static struct smem_private_entry *
private_entry_next(struct smem_private_entry *e)
{
void *p = e;
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
le32_to_cpu(e->size);
}
static void *entry_to_item(struct smem_private_entry *e)
{
void *p = e;
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
}
/* Pointer to the one and only smem handle */ /* Pointer to the one and only smem handle */
static struct qcom_smem *__smem; static struct qcom_smem *__smem;
...@@ -254,16 +295,16 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, ...@@ -254,16 +295,16 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
size_t size) size_t size)
{ {
struct smem_partition_header *phdr; struct smem_partition_header *phdr;
struct smem_private_entry *hdr; struct smem_private_entry *hdr, *end;
size_t alloc_size; size_t alloc_size;
void *p; void *cached;
phdr = smem->partitions[host]; phdr = smem->partitions[host];
hdr = phdr_to_first_private_entry(phdr);
end = phdr_to_last_private_entry(phdr);
cached = phdr_to_first_cached_entry(phdr);
p = (void *)phdr + sizeof(*phdr); while (hdr < end) {
while (p < (void *)phdr + phdr->offset_free_uncached) {
hdr = p;
if (hdr->canary != SMEM_PRIVATE_CANARY) { if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev, dev_err(smem->dev,
"Found invalid canary in host %d partition\n", "Found invalid canary in host %d partition\n",
...@@ -271,24 +312,23 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, ...@@ -271,24 +312,23 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
return -EINVAL; return -EINVAL;
} }
if (hdr->item == item) if (le16_to_cpu(hdr->item) == item)
return -EEXIST; return -EEXIST;
p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; hdr = private_entry_next(hdr);
} }
/* Check that we don't grow into the cached region */ /* Check that we don't grow into the cached region */
alloc_size = sizeof(*hdr) + ALIGN(size, 8); alloc_size = sizeof(*hdr) + ALIGN(size, 8);
if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) { if ((void *)hdr + alloc_size >= cached) {
dev_err(smem->dev, "Out of memory\n"); dev_err(smem->dev, "Out of memory\n");
return -ENOSPC; return -ENOSPC;
} }
hdr = p;
hdr->canary = SMEM_PRIVATE_CANARY; hdr->canary = SMEM_PRIVATE_CANARY;
hdr->item = item; hdr->item = cpu_to_le16(item);
hdr->size = ALIGN(size, 8); hdr->size = cpu_to_le32(ALIGN(size, 8));
hdr->padding_data = hdr->size - size; hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
hdr->padding_hdr = 0; hdr->padding_hdr = 0;
/* /*
...@@ -297,7 +337,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, ...@@ -297,7 +337,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
* gets a consistent view of the linked list. * gets a consistent view of the linked list.
*/ */
wmb(); wmb();
phdr->offset_free_uncached += alloc_size; le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
return 0; return 0;
} }
...@@ -318,11 +358,11 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, ...@@ -318,11 +358,11 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
return -EEXIST; return -EEXIST;
size = ALIGN(size, 8); size = ALIGN(size, 8);
if (WARN_ON(size > header->available)) if (WARN_ON(size > le32_to_cpu(header->available)))
return -ENOMEM; return -ENOMEM;
entry->offset = header->free_offset; entry->offset = header->free_offset;
entry->size = size; entry->size = cpu_to_le32(size);
/* /*
* Ensure the header is consistent before we mark the item allocated, * Ensure the header is consistent before we mark the item allocated,
...@@ -330,10 +370,10 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, ...@@ -330,10 +370,10 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
* even though they do not take the spinlock on read. * even though they do not take the spinlock on read.
*/ */
wmb(); wmb();
entry->allocated = 1; entry->allocated = cpu_to_le32(1);
header->free_offset += size; le32_add_cpu(&header->free_offset, size);
header->available -= size; le32_add_cpu(&header->available, -size);
return 0; return 0;
} }
...@@ -378,10 +418,9 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) ...@@ -378,10 +418,9 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
} }
EXPORT_SYMBOL(qcom_smem_alloc); EXPORT_SYMBOL(qcom_smem_alloc);
static int qcom_smem_get_global(struct qcom_smem *smem, static void *qcom_smem_get_global(struct qcom_smem *smem,
unsigned item, unsigned item,
void **ptr, size_t *size)
size_t *size)
{ {
struct smem_header *header; struct smem_header *header;
struct smem_region *area; struct smem_region *area;
...@@ -390,100 +429,94 @@ static int qcom_smem_get_global(struct qcom_smem *smem, ...@@ -390,100 +429,94 @@ static int qcom_smem_get_global(struct qcom_smem *smem,
unsigned i; unsigned i;
if (WARN_ON(item >= SMEM_ITEM_COUNT)) if (WARN_ON(item >= SMEM_ITEM_COUNT))
return -EINVAL; return ERR_PTR(-EINVAL);
header = smem->regions[0].virt_base; header = smem->regions[0].virt_base;
entry = &header->toc[item]; entry = &header->toc[item];
if (!entry->allocated) if (!entry->allocated)
return -ENXIO; return ERR_PTR(-ENXIO);
if (ptr != NULL) { aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
aux_base = entry->aux_base & AUX_BASE_MASK;
for (i = 0; i < smem->num_regions; i++) { for (i = 0; i < smem->num_regions; i++) {
area = &smem->regions[i]; area = &smem->regions[i];
if (area->aux_base == aux_base || !aux_base) { if (area->aux_base == aux_base || !aux_base) {
*ptr = area->virt_base + entry->offset; if (size != NULL)
break; *size = le32_to_cpu(entry->size);
} return area->virt_base + le32_to_cpu(entry->offset);
} }
} }
if (size != NULL)
*size = entry->size;
return 0; return ERR_PTR(-ENOENT);
} }
static int qcom_smem_get_private(struct qcom_smem *smem, static void *qcom_smem_get_private(struct qcom_smem *smem,
unsigned host, unsigned host,
unsigned item, unsigned item,
void **ptr, size_t *size)
size_t *size)
{ {
struct smem_partition_header *phdr; struct smem_partition_header *phdr;
struct smem_private_entry *hdr; struct smem_private_entry *e, *end;
void *p;
phdr = smem->partitions[host]; phdr = smem->partitions[host];
e = phdr_to_first_private_entry(phdr);
end = phdr_to_last_private_entry(phdr);
p = (void *)phdr + sizeof(*phdr); while (e < end) {
while (p < (void *)phdr + phdr->offset_free_uncached) { if (e->canary != SMEM_PRIVATE_CANARY) {
hdr = p;
if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev, dev_err(smem->dev,
"Found invalid canary in host %d partition\n", "Found invalid canary in host %d partition\n",
host); host);
return -EINVAL; return ERR_PTR(-EINVAL);
} }
if (hdr->item == item) { if (le16_to_cpu(e->item) == item) {
if (ptr != NULL)
*ptr = p + sizeof(*hdr) + hdr->padding_hdr;
if (size != NULL) if (size != NULL)
*size = hdr->size - hdr->padding_data; *size = le32_to_cpu(e->size) -
le16_to_cpu(e->padding_data);
return 0; return entry_to_item(e);
} }
p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; e = private_entry_next(e);
} }
return -ENOENT; return ERR_PTR(-ENOENT);
} }
/** /**
* qcom_smem_get() - resolve ptr of size of a smem item * qcom_smem_get() - resolve ptr of size of a smem item
* @host: the remote processor, or -1 * @host: the remote processor, or -1
* @item: smem item handle * @item: smem item handle
* @ptr: pointer to be filled out with address of the item
* @size: pointer to be filled out with size of the item * @size: pointer to be filled out with size of the item
* *
* Looks up pointer and size of a smem item. * Looks up smem item and returns pointer to it. Size of smem
* item is returned in @size.
*/ */
int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size) void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
if (!__smem) if (!__smem)
return -EPROBE_DEFER; return ptr;
ret = hwspin_lock_timeout_irqsave(__smem->hwlock, ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
HWSPINLOCK_TIMEOUT, HWSPINLOCK_TIMEOUT,
&flags); &flags);
if (ret) if (ret)
return ret; return ERR_PTR(ret);
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) if (host < SMEM_HOST_COUNT && __smem->partitions[host])
ret = qcom_smem_get_private(__smem, host, item, ptr, size); ptr = qcom_smem_get_private(__smem, host, item, size);
else else
ret = qcom_smem_get_global(__smem, item, ptr, size); ptr = qcom_smem_get_global(__smem, item, size);
hwspin_unlock_irqrestore(__smem->hwlock, &flags); hwspin_unlock_irqrestore(__smem->hwlock, &flags);
return ret;
return ptr;
} }
EXPORT_SYMBOL(qcom_smem_get); EXPORT_SYMBOL(qcom_smem_get);
...@@ -506,10 +539,11 @@ int qcom_smem_get_free_space(unsigned host) ...@@ -506,10 +539,11 @@ int qcom_smem_get_free_space(unsigned host)
if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
phdr = __smem->partitions[host]; phdr = __smem->partitions[host];
ret = phdr->offset_free_cached - phdr->offset_free_uncached; ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
} else { } else {
header = __smem->regions[0].virt_base; header = __smem->regions[0].virt_base;
ret = header->available; ret = le32_to_cpu(header->available);
} }
return ret; return ret;
...@@ -518,13 +552,11 @@ EXPORT_SYMBOL(qcom_smem_get_free_space); ...@@ -518,13 +552,11 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
static int qcom_smem_get_sbl_version(struct qcom_smem *smem) static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{ {
unsigned *versions; __le32 *versions;
size_t size; size_t size;
int ret;
ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
(void **)&versions, &size); if (IS_ERR(versions)) {
if (ret < 0) {
dev_err(smem->dev, "Unable to read the version item\n"); dev_err(smem->dev, "Unable to read the version item\n");
return -ENOENT; return -ENOENT;
} }
...@@ -534,7 +566,7 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem) ...@@ -534,7 +566,7 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
return -EINVAL; return -EINVAL;
} }
return versions[SMEM_MASTER_SBL_VERSION_INDEX]; return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
} }
static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
...@@ -544,35 +576,38 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -544,35 +576,38 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
struct smem_ptable_entry *entry; struct smem_ptable_entry *entry;
struct smem_ptable *ptable; struct smem_ptable *ptable;
unsigned remote_host; unsigned remote_host;
u32 version, host0, host1;
int i; int i;
ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
if (ptable->magic != SMEM_PTABLE_MAGIC) if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
return 0; return 0;
if (ptable->version != 1) { version = le32_to_cpu(ptable->version);
if (version != 1) {
dev_err(smem->dev, dev_err(smem->dev,
"Unsupported partition header version %d\n", "Unsupported partition header version %d\n", version);
ptable->version);
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < ptable->num_entries; i++) { for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i]; entry = &ptable->entry[i];
host0 = le16_to_cpu(entry->host0);
host1 = le16_to_cpu(entry->host1);
if (entry->host0 != local_host && entry->host1 != local_host) if (host0 != local_host && host1 != local_host)
continue; continue;
if (!entry->offset) if (!le32_to_cpu(entry->offset))
continue; continue;
if (!entry->size) if (!le32_to_cpu(entry->size))
continue; continue;
if (entry->host0 == local_host) if (host0 == local_host)
remote_host = entry->host1; remote_host = host1;
else else
remote_host = entry->host0; remote_host = host0;
if (remote_host >= SMEM_HOST_COUNT) { if (remote_host >= SMEM_HOST_COUNT) {
dev_err(smem->dev, dev_err(smem->dev,
...@@ -588,21 +623,24 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -588,21 +623,24 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL; return -EINVAL;
} }
header = smem->regions[0].virt_base + entry->offset; header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
host0 = le16_to_cpu(header->host0);
host1 = le16_to_cpu(header->host1);
if (header->magic != SMEM_PART_MAGIC) { if (memcmp(header->magic, SMEM_PART_MAGIC,
sizeof(header->magic))) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d has invalid magic\n", i); "Partition %d has invalid magic\n", i);
return -EINVAL; return -EINVAL;
} }
if (header->host0 != local_host && header->host1 != local_host) { if (host0 != local_host && host1 != local_host) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d hosts are invalid\n", i); "Partition %d hosts are invalid\n", i);
return -EINVAL; return -EINVAL;
} }
if (header->host0 != remote_host && header->host1 != remote_host) { if (host0 != remote_host && host1 != remote_host) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d hosts are invalid\n", i); "Partition %d hosts are invalid\n", i);
return -EINVAL; return -EINVAL;
...@@ -614,7 +652,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -614,7 +652,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL; return -EINVAL;
} }
if (header->offset_free_uncached > header->size) { if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
dev_err(smem->dev, dev_err(smem->dev,
"Partition %d has invalid free pointer\n", i); "Partition %d has invalid free pointer\n", i);
return -EINVAL; return -EINVAL;
...@@ -626,37 +664,47 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, ...@@ -626,37 +664,47 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return 0; return 0;
} }
static int qcom_smem_count_mem_regions(struct platform_device *pdev) static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
const char *name, int i)
{ {
struct resource *res; struct device_node *np;
int num_regions = 0; struct resource r;
int i; int ret;
for (i = 0; i < pdev->num_resources; i++) {
res = &pdev->resource[i];
if (resource_type(res) == IORESOURCE_MEM) np = of_parse_phandle(dev->of_node, name, 0);
num_regions++; if (!np) {
dev_err(dev, "No %s specified\n", name);
return -EINVAL;
} }
return num_regions; ret = of_address_to_resource(np, 0, &r);
of_node_put(np);
if (ret)
return ret;
smem->regions[i].aux_base = (u32)r.start;
smem->regions[i].size = resource_size(&r);
smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
resource_size(&r));
if (!smem->regions[i].virt_base)
return -ENOMEM;
return 0;
} }
static int qcom_smem_probe(struct platform_device *pdev) static int qcom_smem_probe(struct platform_device *pdev)
{ {
struct smem_header *header; struct smem_header *header;
struct device_node *np;
struct qcom_smem *smem; struct qcom_smem *smem;
struct resource *res;
struct resource r;
size_t array_size; size_t array_size;
int num_regions = 0; int num_regions;
int hwlock_id; int hwlock_id;
u32 version; u32 version;
int ret; int ret;
int i;
num_regions = qcom_smem_count_mem_regions(pdev) + 1; num_regions = 1;
if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
num_regions++;
array_size = num_regions * sizeof(struct smem_region); array_size = num_regions * sizeof(struct smem_region);
smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
...@@ -666,39 +714,17 @@ static int qcom_smem_probe(struct platform_device *pdev) ...@@ -666,39 +714,17 @@ static int qcom_smem_probe(struct platform_device *pdev)
smem->dev = &pdev->dev; smem->dev = &pdev->dev;
smem->num_regions = num_regions; smem->num_regions = num_regions;
np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
if (!np) {
dev_err(&pdev->dev, "No memory-region specified\n");
return -EINVAL;
}
ret = of_address_to_resource(np, 0, &r);
of_node_put(np);
if (ret) if (ret)
return ret; return ret;
smem->regions[0].aux_base = (u32)r.start; if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
smem->regions[0].size = resource_size(&r); "qcom,rpm-msg-ram", 1)))
smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev, return ret;
r.start,
resource_size(&r));
if (!smem->regions[0].virt_base)
return -ENOMEM;
for (i = 1; i < num_regions; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
smem->regions[i].aux_base = (u32)res->start;
smem->regions[i].size = resource_size(res);
smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
res->start,
resource_size(res));
if (!smem->regions[i].virt_base)
return -ENOMEM;
}
header = smem->regions[0].virt_base; header = smem->regions[0].virt_base;
if (header->initialized != 1 || header->reserved) { if (le32_to_cpu(header->initialized) != 1 ||
le32_to_cpu(header->reserved)) {
dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
return -EINVAL; return -EINVAL;
} }
...@@ -730,8 +756,8 @@ static int qcom_smem_probe(struct platform_device *pdev) ...@@ -730,8 +756,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
static int qcom_smem_remove(struct platform_device *pdev) static int qcom_smem_remove(struct platform_device *pdev)
{ {
__smem = NULL;
hwspin_lock_free(__smem->hwlock); hwspin_lock_free(__smem->hwlock);
__smem = NULL;
return 0; return 0;
} }
......
...@@ -23,6 +23,8 @@ struct qcom_scm_hdcp_req { ...@@ -23,6 +23,8 @@ struct qcom_scm_hdcp_req {
u32 val; u32 val;
}; };
extern bool qcom_scm_is_available(void);
extern bool qcom_scm_hdcp_available(void); extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp); u32 *resp);
......
...@@ -8,6 +8,14 @@ struct qcom_smd; ...@@ -8,6 +8,14 @@ struct qcom_smd;
struct qcom_smd_channel; struct qcom_smd_channel;
struct qcom_smd_lookup; struct qcom_smd_lookup;
/**
* struct qcom_smd_id - struct used for matching a smd device
* @name: name of the channel
*/
struct qcom_smd_id {
char name[20];
};
/** /**
* struct qcom_smd_device - smd device struct * struct qcom_smd_device - smd device struct
* @dev: the device struct * @dev: the device struct
...@@ -21,6 +29,7 @@ struct qcom_smd_device { ...@@ -21,6 +29,7 @@ struct qcom_smd_device {
/** /**
* struct qcom_smd_driver - smd driver struct * struct qcom_smd_driver - smd driver struct
* @driver: underlying device driver * @driver: underlying device driver
* @smd_match_table: static channel match table
* @probe: invoked when the smd channel is found * @probe: invoked when the smd channel is found
* @remove: invoked when the smd channel is closed * @remove: invoked when the smd channel is closed
* @callback: invoked when an inbound message is received on the channel, * @callback: invoked when an inbound message is received on the channel,
...@@ -29,6 +38,8 @@ struct qcom_smd_device { ...@@ -29,6 +38,8 @@ struct qcom_smd_device {
*/ */
struct qcom_smd_driver { struct qcom_smd_driver {
struct device_driver driver; struct device_driver driver;
const struct qcom_smd_id *smd_match_table;
int (*probe)(struct qcom_smd_device *dev); int (*probe)(struct qcom_smd_device *dev);
void (*remove)(struct qcom_smd_device *dev); void (*remove)(struct qcom_smd_device *dev);
int (*callback)(struct qcom_smd_device *, const void *, size_t); int (*callback)(struct qcom_smd_device *, const void *, size_t);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#define QCOM_SMEM_HOST_ANY -1 #define QCOM_SMEM_HOST_ANY -1
int qcom_smem_alloc(unsigned host, unsigned item, size_t size); int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size); void *qcom_smem_get(unsigned host, unsigned item, size_t *size);
int qcom_smem_get_free_space(unsigned host); int qcom_smem_get_free_space(unsigned host);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment