Commit 778e1cdd authored by Kees Cook's avatar Kees Cook

treewide: kvzalloc() -> kvcalloc()

The kvzalloc() function has a 2-factor argument form, kvcalloc(). This
patch replaces cases of:

        kvzalloc(a * b, gfp)

with:
        kvcalloc(a * b, gfp)

as well as handling cases of:

        kvzalloc(a * b * c, gfp)

with:

        kvzalloc(array3_size(a, b, c), gfp)

as it's slightly less ugly than:

        kvcalloc(array_size(a, b), c, gfp)

This does, however, attempt to ignore constant size factors like:

        kvzalloc(4 * 1024, gfp)

though any constants defined via macros get caught up in the conversion.

Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.

The Coccinelle script used for this was:

// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@

(
  kvzalloc(
-	(sizeof(TYPE)) * E
+	sizeof(TYPE) * E
  , ...)
|
  kvzalloc(
-	(sizeof(THING)) * E
+	sizeof(THING) * E
  , ...)
)

// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@

(
  kvzalloc(
-	sizeof(u8) * (COUNT)
+	COUNT
  , ...)
|
  kvzalloc(
-	sizeof(__u8) * (COUNT)
+	COUNT
  , ...)
|
  kvzalloc(
-	sizeof(char) * (COUNT)
+	COUNT
  , ...)
|
  kvzalloc(
-	sizeof(unsigned char) * (COUNT)
+	COUNT
  , ...)
|
  kvzalloc(
-	sizeof(u8) * COUNT
+	COUNT
  , ...)
|
  kvzalloc(
-	sizeof(__u8) * COUNT
+	COUNT
  , ...)
|
  kvzalloc(
-	sizeof(char) * COUNT
+	COUNT
  , ...)
|
  kvzalloc(
-	sizeof(unsigned char) * COUNT
+	COUNT
  , ...)
)

// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@

(
- kvzalloc
+ kvcalloc
  (
-	sizeof(TYPE) * (COUNT_ID)
+	COUNT_ID, sizeof(TYPE)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(TYPE) * COUNT_ID
+	COUNT_ID, sizeof(TYPE)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(TYPE) * (COUNT_CONST)
+	COUNT_CONST, sizeof(TYPE)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(TYPE) * COUNT_CONST
+	COUNT_CONST, sizeof(TYPE)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(THING) * (COUNT_ID)
+	COUNT_ID, sizeof(THING)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(THING) * COUNT_ID
+	COUNT_ID, sizeof(THING)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(THING) * (COUNT_CONST)
+	COUNT_CONST, sizeof(THING)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(THING) * COUNT_CONST
+	COUNT_CONST, sizeof(THING)
  , ...)
)

// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@

- kvzalloc
+ kvcalloc
  (
-	SIZE * COUNT
+	COUNT, SIZE
  , ...)

// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@

(
  kvzalloc(
-	sizeof(TYPE) * (COUNT) * (STRIDE)
+	array3_size(COUNT, STRIDE, sizeof(TYPE))
  , ...)
|
  kvzalloc(
-	sizeof(TYPE) * (COUNT) * STRIDE
+	array3_size(COUNT, STRIDE, sizeof(TYPE))
  , ...)
|
  kvzalloc(
-	sizeof(TYPE) * COUNT * (STRIDE)
+	array3_size(COUNT, STRIDE, sizeof(TYPE))
  , ...)
|
  kvzalloc(
-	sizeof(TYPE) * COUNT * STRIDE
+	array3_size(COUNT, STRIDE, sizeof(TYPE))
  , ...)
|
  kvzalloc(
-	sizeof(THING) * (COUNT) * (STRIDE)
+	array3_size(COUNT, STRIDE, sizeof(THING))
  , ...)
|
  kvzalloc(
-	sizeof(THING) * (COUNT) * STRIDE
+	array3_size(COUNT, STRIDE, sizeof(THING))
  , ...)
|
  kvzalloc(
-	sizeof(THING) * COUNT * (STRIDE)
+	array3_size(COUNT, STRIDE, sizeof(THING))
  , ...)
|
  kvzalloc(
-	sizeof(THING) * COUNT * STRIDE
+	array3_size(COUNT, STRIDE, sizeof(THING))
  , ...)
)

// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@

(
  kvzalloc(
-	sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+	array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
  , ...)
|
  kvzalloc(
-	sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+	array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
  , ...)
|
  kvzalloc(
-	sizeof(THING1) * sizeof(THING2) * COUNT
+	array3_size(COUNT, sizeof(THING1), sizeof(THING2))
  , ...)
|
  kvzalloc(
-	sizeof(THING1) * sizeof(THING2) * (COUNT)
+	array3_size(COUNT, sizeof(THING1), sizeof(THING2))
  , ...)
|
  kvzalloc(
-	sizeof(TYPE1) * sizeof(THING2) * COUNT
+	array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
  , ...)
|
  kvzalloc(
-	sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+	array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
  , ...)
)

// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@

(
  kvzalloc(
-	(COUNT) * STRIDE * SIZE
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kvzalloc(
-	COUNT * (STRIDE) * SIZE
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kvzalloc(
-	COUNT * STRIDE * (SIZE)
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kvzalloc(
-	(COUNT) * (STRIDE) * SIZE
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kvzalloc(
-	COUNT * (STRIDE) * (SIZE)
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kvzalloc(
-	(COUNT) * STRIDE * (SIZE)
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kvzalloc(
-	(COUNT) * (STRIDE) * (SIZE)
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kvzalloc(
-	COUNT * STRIDE * SIZE
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
)

// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@

(
  kvzalloc(C1 * C2 * C3, ...)
|
  kvzalloc(
-	(E1) * E2 * E3
+	array3_size(E1, E2, E3)
  , ...)
|
  kvzalloc(
-	(E1) * (E2) * E3
+	array3_size(E1, E2, E3)
  , ...)
|
  kvzalloc(
-	(E1) * (E2) * (E3)
+	array3_size(E1, E2, E3)
  , ...)
|
  kvzalloc(
-	E1 * E2 * E3
+	array3_size(E1, E2, E3)
  , ...)
)

// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@

(
  kvzalloc(sizeof(THING) * C2, ...)
|
  kvzalloc(sizeof(TYPE) * C2, ...)
|
  kvzalloc(C1 * C2 * C3, ...)
|
  kvzalloc(C1 * C2, ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(TYPE) * (E2)
+	E2, sizeof(TYPE)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(TYPE) * E2
+	E2, sizeof(TYPE)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(THING) * (E2)
+	E2, sizeof(THING)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	sizeof(THING) * E2
+	E2, sizeof(THING)
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	(E1) * E2
+	E1, E2
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	(E1) * (E2)
+	E1, E2
  , ...)
|
- kvzalloc
+ kvcalloc
  (
-	E1 * E2
+	E1, E2
  , ...)
)
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
parent 344476e1
...@@ -40,8 +40,9 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, ...@@ -40,8 +40,9 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
int i; int i;
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
slot->arch.gfn_track[i] = kvzalloc(npages * slot->arch.gfn_track[i] =
sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL); kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
GFP_KERNEL);
if (!slot->arch.gfn_track[i]) if (!slot->arch.gfn_track[i])
goto track_free; goto track_free;
} }
......
...@@ -8871,13 +8871,14 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -8871,13 +8871,14 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
slot->base_gfn, level) + 1; slot->base_gfn, level) + 1;
slot->arch.rmap[i] = slot->arch.rmap[i] =
kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL); kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
GFP_KERNEL);
if (!slot->arch.rmap[i]) if (!slot->arch.rmap[i])
goto out_free; goto out_free;
if (i == 0) if (i == 0)
continue; continue;
linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL); linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL);
if (!linfo) if (!linfo)
goto out_free; goto out_free;
......
...@@ -1274,19 +1274,22 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, ...@@ -1274,19 +1274,22 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL); GFP_KERNEL);
if (!rgb_user) if (!rgb_user)
goto rgb_user_alloc_fail; goto rgb_user_alloc_fail;
rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
sizeof(*rgb_regamma),
GFP_KERNEL); GFP_KERNEL);
if (!rgb_regamma) if (!rgb_regamma)
goto rgb_regamma_alloc_fail; goto rgb_regamma_alloc_fail;
axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), axix_x = kvcalloc(ramp->num_entries + 3, sizeof(*axix_x),
GFP_KERNEL); GFP_KERNEL);
if (!axix_x) if (!axix_x)
goto axix_x_alloc_fail; goto axix_x_alloc_fail;
coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
GFP_KERNEL);
if (!coeff) if (!coeff)
goto coeff_alloc_fail; goto coeff_alloc_fail;
...@@ -1482,19 +1485,21 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, ...@@ -1482,19 +1485,21 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL); GFP_KERNEL);
if (!rgb_user) if (!rgb_user)
goto rgb_user_alloc_fail; goto rgb_user_alloc_fail;
curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), curve = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*curve),
GFP_KERNEL); GFP_KERNEL);
if (!curve) if (!curve)
goto curve_alloc_fail; goto curve_alloc_fail;
axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
GFP_KERNEL); GFP_KERNEL);
if (!axix_x) if (!axix_x)
goto axix_x_alloc_fail; goto axix_x_alloc_fail;
coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
GFP_KERNEL);
if (!coeff) if (!coeff)
goto coeff_alloc_fail; goto coeff_alloc_fail;
...@@ -1571,8 +1576,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, ...@@ -1571,8 +1576,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
} }
ret = true; ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) { } else if (trans == TRANSFER_FUNCTION_PQ) {
rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
(MAX_HW_POINTS + _EXTRA_POINTS), sizeof(*rgb_regamma),
GFP_KERNEL); GFP_KERNEL);
if (!rgb_regamma) if (!rgb_regamma)
goto rgb_regamma_alloc_fail; goto rgb_regamma_alloc_fail;
...@@ -1596,8 +1601,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, ...@@ -1596,8 +1601,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
kvfree(rgb_regamma); kvfree(rgb_regamma);
} else if (trans == TRANSFER_FUNCTION_SRGB || } else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) { trans == TRANSFER_FUNCTION_BT709) {
rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
(MAX_HW_POINTS + _EXTRA_POINTS), sizeof(*rgb_regamma),
GFP_KERNEL); GFP_KERNEL);
if (!rgb_regamma) if (!rgb_regamma)
goto rgb_regamma_alloc_fail; goto rgb_regamma_alloc_fail;
...@@ -1640,8 +1645,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, ...@@ -1640,8 +1645,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
} }
ret = true; ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) { } else if (trans == TRANSFER_FUNCTION_PQ) {
rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
(MAX_HW_POINTS + _EXTRA_POINTS), sizeof(*rgb_degamma),
GFP_KERNEL); GFP_KERNEL);
if (!rgb_degamma) if (!rgb_degamma)
goto rgb_degamma_alloc_fail; goto rgb_degamma_alloc_fail;
...@@ -1660,8 +1665,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, ...@@ -1660,8 +1665,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
kvfree(rgb_degamma); kvfree(rgb_degamma);
} else if (trans == TRANSFER_FUNCTION_SRGB || } else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) { trans == TRANSFER_FUNCTION_BT709) {
rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
(MAX_HW_POINTS + _EXTRA_POINTS), sizeof(*rgb_degamma),
GFP_KERNEL); GFP_KERNEL);
if (!rgb_degamma) if (!rgb_degamma)
goto rgb_degamma_alloc_fail; goto rgb_degamma_alloc_fail;
......
...@@ -59,7 +59,7 @@ nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse, ...@@ -59,7 +59,7 @@ nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
pgt->sparse = sparse; pgt->sparse = sparse;
if (desc->type == PGD) { if (desc->type == PGD) {
pgt->pde = kvzalloc(sizeof(*pgt->pde) * pten, GFP_KERNEL); pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
if (!pgt->pde) { if (!pgt->pde) {
kfree(pgt); kfree(pgt);
return NULL; return NULL;
......
...@@ -127,7 +127,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, ...@@ -127,7 +127,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_umem; goto err_umem;
} }
in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL); in->pas = kvcalloc(ncont, sizeof(*in->pas), GFP_KERNEL);
if (!in->pas) { if (!in->pas) {
err = -ENOMEM; err = -ENOMEM;
goto err_umem; goto err_umem;
...@@ -189,7 +189,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, ...@@ -189,7 +189,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
} }
mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL); in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
if (!in->pas) { if (!in->pas) {
err = -ENOMEM; err = -ENOMEM;
goto err_buf; goto err_buf;
......
...@@ -797,8 +797,9 @@ static int verity_alloc_most_once(struct dm_verity *v) ...@@ -797,8 +797,9 @@ static int verity_alloc_most_once(struct dm_verity *v)
return -E2BIG; return -E2BIG;
} }
v->validated_blocks = kvzalloc(BITS_TO_LONGS(v->data_blocks) * v->validated_blocks = kvcalloc(BITS_TO_LONGS(v->data_blocks),
sizeof(unsigned long), GFP_KERNEL); sizeof(unsigned long),
GFP_KERNEL);
if (!v->validated_blocks) { if (!v->validated_blocks) {
ti->error = "failed to allocate bitset for check_at_most_once"; ti->error = "failed to allocate bitset for check_at_most_once";
return -ENOMEM; return -ENOMEM;
......
...@@ -304,7 +304,7 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, ...@@ -304,7 +304,7 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
for (i = 0; i < ctbl->clipt_size; ++i) for (i = 0; i < ctbl->clipt_size; ++i)
INIT_LIST_HEAD(&ctbl->hash_list[i]); INIT_LIST_HEAD(&ctbl->hash_list[i]);
cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL); cl_list = kvcalloc(clipt_size, sizeof(struct clip_entry), GFP_KERNEL);
if (!cl_list) { if (!cl_list) {
kvfree(ctbl); kvfree(ctbl);
return NULL; return NULL;
......
...@@ -5646,8 +5646,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -5646,8 +5646,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0; adapter->params.offload = 0;
} }
adapter->mps_encap = kvzalloc(sizeof(struct mps_encap_entry) * adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
adapter->params.arch.mps_tcam_size, sizeof(struct mps_encap_entry),
GFP_KERNEL); GFP_KERNEL);
if (!adapter->mps_encap) if (!adapter->mps_encap)
dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n"); dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
......
...@@ -457,7 +457,8 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) ...@@ -457,7 +457,8 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
unsigned int bmap_size; unsigned int bmap_size;
bmap_size = BITS_TO_LONGS(max_tids); bmap_size = BITS_TO_LONGS(max_tids);
link->tid_map = kvzalloc(sizeof(unsigned long) * bmap_size, GFP_KERNEL); link->tid_map = kvcalloc(bmap_size, sizeof(unsigned long),
GFP_KERNEL);
if (!link->tid_map) if (!link->tid_map)
goto out_no_mem; goto out_no_mem;
bitmap_zero(link->tid_map, max_tids); bitmap_zero(link->tid_map, max_tids);
......
...@@ -408,7 +408,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, ...@@ -408,7 +408,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
return -EINVAL; return -EINVAL;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL); table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
if (!table->icm) if (!table->icm)
return -ENOMEM; return -ENOMEM;
table->virt = virt; table->virt = virt;
......
...@@ -549,15 +549,17 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, ...@@ -549,15 +549,17 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
if (err) if (err)
goto out; goto out;
conn->qp.rq.bufs = kvzalloc(sizeof(conn->qp.rq.bufs[0]) * conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size,
conn->qp.rq.size, GFP_KERNEL); sizeof(conn->qp.rq.bufs[0]),
GFP_KERNEL);
if (!conn->qp.rq.bufs) { if (!conn->qp.rq.bufs) {
err = -ENOMEM; err = -ENOMEM;
goto err_wq; goto err_wq;
} }
conn->qp.sq.bufs = kvzalloc(sizeof(conn->qp.sq.bufs[0]) * conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size,
conn->qp.sq.size, GFP_KERNEL); sizeof(conn->qp.sq.bufs[0]),
GFP_KERNEL);
if (!conn->qp.sq.bufs) { if (!conn->qp.sq.bufs) {
err = -ENOMEM; err = -ENOMEM;
goto err_rq_bufs; goto err_rq_bufs;
......
...@@ -590,7 +590,7 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) ...@@ -590,7 +590,7 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->id = id; alink->id = id;
alink->parent = TC_H_ROOT; alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings; alink->total_queues = alink->vnic->max_rx_rings;
alink->qdiscs = kvzalloc(sizeof(*alink->qdiscs) * alink->total_queues, alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
GFP_KERNEL); GFP_KERNEL);
if (!alink->qdiscs) { if (!alink->qdiscs) {
err = -ENOMEM; err = -ENOMEM;
......
...@@ -2576,7 +2576,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) ...@@ -2576,7 +2576,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
* the array. */ * the array. */
if (items) if (items)
num_arrays++; num_arrays++;
q->pool = kvzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL);
if (q->pool == NULL) if (q->pool == NULL)
return -ENOMEM; return -ENOMEM;
......
...@@ -608,7 +608,7 @@ static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) ...@@ -608,7 +608,7 @@ static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
new_size = min_t(u32, BTF_MAX_TYPE, new_size = min_t(u32, BTF_MAX_TYPE,
btf->types_size + expand_by); btf->types_size + expand_by);
new_types = kvzalloc(new_size * sizeof(*new_types), new_types = kvcalloc(new_size, sizeof(*new_types),
GFP_KERNEL | __GFP_NOWARN); GFP_KERNEL | __GFP_NOWARN);
if (!new_types) if (!new_types)
return -ENOMEM; return -ENOMEM;
...@@ -698,17 +698,17 @@ static int env_resolve_init(struct btf_verifier_env *env) ...@@ -698,17 +698,17 @@ static int env_resolve_init(struct btf_verifier_env *env)
u8 *visit_states = NULL; u8 *visit_states = NULL;
/* +1 for btf_void */ /* +1 for btf_void */
resolved_sizes = kvzalloc((nr_types + 1) * sizeof(*resolved_sizes), resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
GFP_KERNEL | __GFP_NOWARN); GFP_KERNEL | __GFP_NOWARN);
if (!resolved_sizes) if (!resolved_sizes)
goto nomem; goto nomem;
resolved_ids = kvzalloc((nr_types + 1) * sizeof(*resolved_ids), resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
GFP_KERNEL | __GFP_NOWARN); GFP_KERNEL | __GFP_NOWARN);
if (!resolved_ids) if (!resolved_ids)
goto nomem; goto nomem;
visit_states = kvzalloc((nr_types + 1) * sizeof(*visit_states), visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
GFP_KERNEL | __GFP_NOWARN); GFP_KERNEL | __GFP_NOWARN);
if (!visit_states) if (!visit_states)
goto nomem; goto nomem;
......
...@@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd, ...@@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
struct page **pages; struct page **pages;
nr_pages = gup->size / PAGE_SIZE; nr_pages = gup->size / PAGE_SIZE;
pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL); pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
......
...@@ -122,12 +122,12 @@ static int alloc_swap_slot_cache(unsigned int cpu) ...@@ -122,12 +122,12 @@ static int alloc_swap_slot_cache(unsigned int cpu)
* as kvzalloc could trigger reclaim and get_swap_page, * as kvzalloc could trigger reclaim and get_swap_page,
* which can lock swap_slots_cache_mutex. * which can lock swap_slots_cache_mutex.
*/ */
slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE, slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
GFP_KERNEL); GFP_KERNEL);
if (!slots) if (!slots)
return -ENOMEM; return -ENOMEM;
slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE, slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
GFP_KERNEL); GFP_KERNEL);
if (!slots_ret) { if (!slots_ret) {
kvfree(slots); kvfree(slots);
......
...@@ -620,7 +620,7 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages) ...@@ -620,7 +620,7 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
unsigned int i, nr; unsigned int i, nr;
nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL); spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
if (!spaces) if (!spaces)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
......
...@@ -3196,7 +3196,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) ...@@ -3196,7 +3196,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
p->cluster_next = 1 + (prandom_u32() % p->highest_bit); p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
cluster_info = kvzalloc(nr_cluster * sizeof(*cluster_info), cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
GFP_KERNEL); GFP_KERNEL);
if (!cluster_info) { if (!cluster_info) {
error = -ENOMEM; error = -ENOMEM;
...@@ -3233,7 +3233,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) ...@@ -3233,7 +3233,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
} }
/* frontswap enabled? set up bit-per-page map for frontswap */ /* frontswap enabled? set up bit-per-page map for frontswap */
if (IS_ENABLED(CONFIG_FRONTSWAP)) if (IS_ENABLED(CONFIG_FRONTSWAP))
frontswap_map = kvzalloc(BITS_TO_LONGS(maxpages) * sizeof(long), frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
sizeof(long),
GFP_KERNEL); GFP_KERNEL);
if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
......
...@@ -489,11 +489,12 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -489,11 +489,12 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
return err; return err;
if (!q->flows) { if (!q->flows) {
q->flows = kvzalloc(q->flows_cnt * q->flows = kvcalloc(q->flows_cnt,
sizeof(struct fq_codel_flow), GFP_KERNEL); sizeof(struct fq_codel_flow),
GFP_KERNEL);
if (!q->flows) if (!q->flows)
return -ENOMEM; return -ENOMEM;
q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL); q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
if (!q->backlogs) if (!q->backlogs)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < q->flows_cnt; i++) { for (i = 0; i < q->flows_cnt; i++) {
......
...@@ -599,8 +599,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -599,8 +599,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
if (!q->hh_flows) { if (!q->hh_flows) {
/* Initialize heavy-hitter flow table. */ /* Initialize heavy-hitter flow table. */
q->hh_flows = kvzalloc(HH_FLOWS_CNT * q->hh_flows = kvcalloc(HH_FLOWS_CNT, sizeof(struct list_head),
sizeof(struct list_head), GFP_KERNEL); GFP_KERNEL);
if (!q->hh_flows) if (!q->hh_flows)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < HH_FLOWS_CNT; i++) for (i = 0; i < HH_FLOWS_CNT; i++)
...@@ -614,8 +614,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -614,8 +614,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
/* Initialize heavy-hitter filter arrays. */ /* Initialize heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) { for (i = 0; i < HHF_ARRAYS_CNT; i++) {
q->hhf_arrays[i] = kvzalloc(HHF_ARRAYS_LEN * q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN,
sizeof(u32), GFP_KERNEL); sizeof(u32),
GFP_KERNEL);
if (!q->hhf_arrays[i]) { if (!q->hhf_arrays[i]) {
/* Note: hhf_destroy() will be called /* Note: hhf_destroy() will be called
* by our caller. * by our caller.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment