Commit 7d549995 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Nothing very exciting here, mainly just a bunch of irdma fixes. irdma
  is a new driver this cycle so it to be expected.

   - Many more irdma fixups from bots/etc

   - bnxt_re regression in their counters from a FW upgrade

   - User triggerable memory leak in rxe"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/irdma: Change returned type of irdma_setup_virt_qp to void
  RDMA/irdma: Change the returned type of irdma_set_hw_rsrc to void
  RDMA/irdma: change the returned type of irdma_sc_repost_aeq_entries to void
  RDMA/irdma: Check vsi pointer before using it
  RDMA/rxe: Fix memory leak in error path code
  RDMA/irdma: Change the returned type to void
  RDMA/irdma: Make spdxcheck.py happy
  RDMA/irdma: Fix unused variable total_size warning
  RDMA/bnxt_re: Fix stats counters
parents 51bbe7eb dc6afef7
...@@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) ...@@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
if (!chip_ctx) if (!chip_ctx)
return -ENOMEM; return -ENOMEM;
chip_ctx->chip_num = bp->chip_num; chip_ctx->chip_num = bp->chip_num;
chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
rdev->chip_ctx = chip_ctx; rdev->chip_ctx = chip_ctx;
/* rest members to follow eventually */ /* rest members to follow eventually */
...@@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, ...@@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
dma_addr_t dma_map, dma_addr_t dma_map,
u32 *fw_stats_ctx_id) u32 *fw_stats_ctx_id)
{ {
struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
struct hwrm_stat_ctx_alloc_output resp = {0}; struct hwrm_stat_ctx_alloc_output resp = {0};
struct hwrm_stat_ctx_alloc_input req = {0}; struct hwrm_stat_ctx_alloc_input req = {0};
struct bnxt_en_dev *en_dev = rdev->en_dev; struct bnxt_en_dev *en_dev = rdev->en_dev;
...@@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, ...@@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000); req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map); req.stats_dma_addr = cpu_to_le64(dma_map);
req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext)); req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats); struct bnxt_qplib_stats *stats);
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats); struct bnxt_qplib_stats *stats);
/* PBL */ /* PBL */
...@@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, ...@@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
goto fail; goto fail;
stats_alloc: stats_alloc:
/* Stats */ /* Stats */
rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats); rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
if (rc) if (rc)
goto fail; goto fail;
...@@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, ...@@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
} }
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats) struct bnxt_qplib_stats *stats)
{ {
memset(stats, 0, sizeof(*stats)); memset(stats, 0, sizeof(*stats));
stats->fw_id = -1; stats->fw_id = -1;
/* 128 byte aligned context memory is required only for 57500. stats->size = cctx->hw_stats_size;
* However making this unconditional, it does not harm previous
* generation.
*/
stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size, stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL); &stats->dma_map, GFP_KERNEL);
if (!stats->dma) { if (!stats->dma) {
......
...@@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx { ...@@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx {
u16 chip_num; u16 chip_num;
u8 chip_rev; u8 chip_rev;
u8 chip_metal; u8 chip_metal;
u16 hw_stats_size;
struct bnxt_qplib_drv_modes modes; struct bnxt_qplib_drv_modes modes;
}; };
......
...@@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf, ...@@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
* parses fpm commit info and copy base value * parses fpm commit info and copy base value
* of hmc objects in hmc_info * of hmc objects in hmc_info
*/ */
static enum irdma_status_code static void
irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf, irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
struct irdma_hmc_obj_info *info, u32 *sd) struct irdma_hmc_obj_info *info, u32 *sd)
{ {
...@@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf, ...@@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
else else
*sd = (u32)(size >> 21); *sd = (u32)(size >> 21);
return 0;
} }
/** /**
...@@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, ...@@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
* @dev: sc device struct * @dev: sc device struct
* @count: allocate count * @count: allocate count
*/ */
enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
{ {
writel(count, dev->hw_regs[IRDMA_AEQALLOC]); writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
return 0;
} }
/** /**
...@@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, ...@@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
&commit_fpm_mem, true, wait_type); &commit_fpm_mem, true, wait_type);
if (!ret_code) if (!ret_code)
ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
hmc_info->hmc_obj, hmc_info->hmc_obj,
&hmc_info->sd_table.sd_cnt); &hmc_info->sd_table.sd_cnt);
print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16, print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE, 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
false); false);
......
...@@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf) ...@@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
* irdma_set_hw_rsrc - set hw memory resources. * irdma_set_hw_rsrc - set hw memory resources.
* @rf: RDMA PCI function * @rf: RDMA PCI function
*/ */
static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf) static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
{ {
rf->allocated_qps = (void *)(rf->mem_rsrc + rf->allocated_qps = (void *)(rf->mem_rsrc +
(sizeof(struct irdma_arp_entry) * rf->arp_table_size)); (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
...@@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf) ...@@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
spin_lock_init(&rf->arp_lock); spin_lock_init(&rf->arp_lock);
spin_lock_init(&rf->qptable_lock); spin_lock_init(&rf->qptable_lock);
spin_lock_init(&rf->qh_list_lock); spin_lock_init(&rf->qh_list_lock);
return 0;
} }
/** /**
...@@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) ...@@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
ret = irdma_set_hw_rsrc(rf); irdma_set_hw_rsrc(rf);
if (ret)
goto set_hw_rsrc_fail;
set_bit(0, rf->allocated_mrs); set_bit(0, rf->allocated_mrs);
set_bit(0, rf->allocated_qps); set_bit(0, rf->allocated_qps);
...@@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) ...@@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0; return 0;
set_hw_rsrc_fail:
kfree(rf->mem_rsrc);
rf->mem_rsrc = NULL;
mem_rsrc_kzalloc_fail: mem_rsrc_kzalloc_fail:
kfree(rf->allocated_ws_nodes); kfree(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL; rf->allocated_ws_nodes = NULL;
......
...@@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev) ...@@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn)); pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
} }
static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf) static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
struct ice_vsi *vsi)
{ {
struct irdma_pci_f *rf = iwdev->rf; struct irdma_pci_f *rf = iwdev->rf;
struct ice_vsi *vsi = ice_get_main_vsi(pf);
rf->cdev = pf; rf->cdev = pf;
rf->gen_ops.register_qset = irdma_lan_register_qset; rf->gen_ops.register_qset = irdma_lan_register_qset;
...@@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ ...@@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
struct iidc_auxiliary_dev, struct iidc_auxiliary_dev,
adev); adev);
struct ice_pf *pf = iidc_adev->pf; struct ice_pf *pf = iidc_adev->pf;
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct iidc_qos_params qos_info = {}; struct iidc_qos_params qos_info = {};
struct irdma_device *iwdev; struct irdma_device *iwdev;
struct irdma_pci_f *rf; struct irdma_pci_f *rf;
struct irdma_l2params l2params = {}; struct irdma_l2params l2params = {};
int err; int err;
if (!vsi)
return -EIO;
iwdev = ib_alloc_device(irdma_device, ibdev); iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev) if (!iwdev)
return -ENOMEM; return -ENOMEM;
...@@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ ...@@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
return -ENOMEM; return -ENOMEM;
} }
irdma_fill_device_info(iwdev, pf); irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf; rf = iwdev->rf;
if (irdma_ctrl_init_hw(rf)) { if (irdma_ctrl_init_hw(rf)) {
......
...@@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, ...@@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
struct irdma_aeq_init_info *info); struct irdma_aeq_init_info *info);
enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
struct irdma_aeqe_info *info); struct irdma_aeqe_info *info);
enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
u32 count);
void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
int abi_ver); int abi_ver);
......
...@@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp, ...@@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
struct irdma_post_rq_info *info) struct irdma_post_rq_info *info)
{ {
u32 total_size = 0, wqe_idx, i, byte_off; u32 wqe_idx, i, byte_off;
u32 addl_frag_cnt; u32 addl_frag_cnt;
__le64 *wqe; __le64 *wqe;
u64 hdr; u64 hdr;
...@@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, ...@@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
if (qp->max_rq_frag_cnt < info->num_sges) if (qp->max_rq_frag_cnt < info->num_sges)
return IRDMA_ERR_INVALID_FRAG_COUNT; return IRDMA_ERR_INVALID_FRAG_COUNT;
for (i = 0; i < info->num_sges; i++)
total_size += info->sg_list[i].len;
wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx); wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
if (!wqe) if (!wqe)
return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
......
...@@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
* @iwqp: qp ptr * @iwqp: qp ptr
* @init_info: initialize info to return * @init_info: initialize info to return
*/ */
static int irdma_setup_virt_qp(struct irdma_device *iwdev, static void irdma_setup_virt_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp, struct irdma_qp *iwqp,
struct irdma_qp_init_info *init_info) struct irdma_qp_init_info *init_info)
{ {
...@@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev, ...@@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev,
init_info->sq_pa = qpmr->sq_pbl.addr; init_info->sq_pa = qpmr->sq_pbl.addr;
init_info->rq_pa = qpmr->rq_pbl.addr; init_info->rq_pa = qpmr->rq_pbl.addr;
} }
return 0;
} }
/** /**
...@@ -914,7 +912,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd, ...@@ -914,7 +912,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
} }
} }
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info); irdma_setup_virt_qp(iwdev, iwqp, &init_info);
} else { } else {
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
......
...@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf; int num_buf;
void *vaddr; void *vaddr;
int err; int err;
int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access); umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n", pr_warn("%s: Unable to pin memory region err = %d\n",
(int)PTR_ERR(umem)); __func__, (int)PTR_ERR(umem));
err = PTR_ERR(umem); err = PTR_ERR(umem);
goto err1; goto err_out;
} }
mr->umem = umem; mr->umem = umem;
...@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
err = rxe_mr_alloc(mr, num_buf); err = rxe_mr_alloc(mr, num_buf);
if (err) { if (err) {
pr_warn("err %d from rxe_mr_alloc\n", err); pr_warn("%s: Unable to allocate memory for map\n",
ib_umem_release(umem); __func__);
goto err1; goto err_release_umem;
} }
mr->page_shift = PAGE_SHIFT; mr->page_shift = PAGE_SHIFT;
...@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
vaddr = page_address(sg_page_iter_page(&sg_iter)); vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) { if (!vaddr) {
pr_warn("null vaddr\n"); pr_warn("%s: Unable to get virtual address\n",
ib_umem_release(umem); __func__);
err = -ENOMEM; err = -ENOMEM;
goto err1; goto err_cleanup_map;
} }
buf->addr = (uintptr_t)vaddr; buf->addr = (uintptr_t)vaddr;
...@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
return 0; return 0;
err1: err_cleanup_map:
for (i = 0; i < mr->num_map; i++)
kfree(mr->map[i]);
kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
return err; return err;
} }
......
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */
/* /*
* Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Topspin Communications. All rights reserved.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment