{
int i;
- omap_mcbsp_devices = kzalloc_objs(struct platform_device *, size,
- GFP_KERNEL);
+ omap_mcbsp_devices = kzalloc_objs(struct platform_device *, size);
if (!omap_mcbsp_devices) {
printk(KERN_ERR "Could not register McBSP devices\n");
return;
vdso_info[abi].vdso_code_start) >>
PAGE_SHIFT;
- vdso_pagelist = kzalloc_objs(struct page *, vdso_info[abi].vdso_pages,
- GFP_KERNEL);
+ vdso_pagelist = kzalloc_objs(struct page *, vdso_info[abi].vdso_pages);
if (vdso_pagelist == NULL)
return -ENOMEM;
vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
vdso_info.code_mapping.pages =
- kzalloc_objs(struct page *, vdso_info.size / PAGE_SIZE,
- GFP_KERNEL);
+ kzalloc_objs(struct page *, vdso_info.size / PAGE_SIZE);
if (!vdso_info.code_mapping.pages)
return -ENOMEM;
vcpu->arch.book3s = vcpu_book3s;
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
- vcpu->arch.shadow_vcpu = kzalloc_obj(*vcpu->arch.shadow_vcpu,
- GFP_KERNEL);
+ vcpu->arch.shadow_vcpu = kzalloc_obj(*vcpu->arch.shadow_vcpu);
if (!vcpu->arch.shadow_vcpu)
goto free_vcpu3s;
#endif
{
int index;
- mm->context.hash_context = kmalloc_obj(struct hash_mm_context,
- GFP_KERNEL);
+ mm->context.hash_context = kmalloc_obj(struct hash_mm_context);
if (!mm->context.hash_context)
return -ENOMEM;
#ifdef CONFIG_PPC_SUBPAGE_PROT
/* inherit subpage prot details if we have one. */
if (current->mm->context.hash_context->spt) {
- mm->context.hash_context->spt = kmalloc_obj(struct subpage_prot_table,
- GFP_KERNEL);
+ mm->context.hash_context->spt = kmalloc_obj(struct subpage_prot_table);
if (!mm->context.hash_context->spt) {
kfree(mm->context.hash_context);
return -ENOMEM;
goto e_event_attrs;
}
- event_long_descs = kmalloc_objs(*event_long_descs, event_idx + 1,
- GFP_KERNEL);
+ event_long_descs = kmalloc_objs(*event_long_descs, event_idx + 1);
if (!event_long_descs) {
ret = -ENOMEM;
goto e_event_descs;
{
int nid, i, cpu;
- nest_imc_refc = kzalloc_objs(*nest_imc_refc, num_possible_nodes(),
- GFP_KERNEL);
+ nest_imc_refc = kzalloc_objs(*nest_imc_refc, num_possible_nodes());
if (!nest_imc_refc)
return -ENOMEM;
goto err;
nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
- pmu_ptr->mem_info = kzalloc_objs(struct imc_mem_info, nr_cores,
- GFP_KERNEL);
+ pmu_ptr->mem_info = kzalloc_objs(struct imc_mem_info, nr_cores);
if (!pmu_ptr->mem_info)
goto err;
- core_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores,
- GFP_KERNEL);
+ core_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores);
if (!core_imc_refc) {
kfree(pmu_ptr->mem_info);
return -ENOMEM;
nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
- trace_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores,
- GFP_KERNEL);
+ trace_imc_refc = kzalloc_objs(struct imc_pmu_ref, nr_cores);
if (!trace_imc_refc)
return -ENOMEM;
nr_idle_states = of_property_count_u32_elems(np,
"ibm,cpu-idle-state-flags");
- pnv_idle_states = kzalloc_objs(*pnv_idle_states, nr_idle_states,
- GFP_KERNEL);
+ pnv_idle_states = kzalloc_objs(*pnv_idle_states, nr_idle_states);
temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL);
temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL);
temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);
u32 nid;
u64 m;
- memtrace_array = kzalloc_objs(struct memtrace_entry, num_online_nodes(),
- GFP_KERNEL);
+ memtrace_array = kzalloc_objs(struct memtrace_entry, num_online_nodes());
if (!memtrace_array) {
pr_err("Failed to allocate memtrace_array\n");
return -EINVAL;
nr_chips))
goto error;
- pmu_ptr->mem_info = kzalloc_objs(*pmu_ptr->mem_info, nr_chips + 1,
- GFP_KERNEL);
+ pmu_ptr->mem_info = kzalloc_objs(*pmu_ptr->mem_info, nr_chips + 1);
if (!pmu_ptr->mem_info)
goto error;
has_cur = true;
}
- pcaps[i].pattrs = kzalloc_objs(struct powercap_attr, j,
- GFP_KERNEL);
+ pcaps[i].pattrs = kzalloc_objs(struct powercap_attr, j);
if (!pcaps[i].pattrs)
goto out_pcaps_pattrs;
- pcaps[i].pg.attrs = kzalloc_objs(struct attribute *, j + 1,
- GFP_KERNEL);
+ pcaps[i].pg.attrs = kzalloc_objs(struct attribute *, j + 1);
if (!pcaps[i].pg.attrs) {
kfree(pcaps[i].pattrs);
goto out_pcaps_pattrs;
return;
}
- psr_attrs = kzalloc_objs(*psr_attrs, of_get_child_count(psr),
- GFP_KERNEL);
+ psr_attrs = kzalloc_objs(*psr_attrs, of_get_child_count(psr));
if (!psr_attrs)
goto out_put_psr;
if (!nr_attrs)
continue;
- sgs[i].sgattrs = kzalloc_objs(*sgs[i].sgattrs, nr_attrs,
- GFP_KERNEL);
+ sgs[i].sgattrs = kzalloc_objs(*sgs[i].sgattrs, nr_attrs);
if (!sgs[i].sgattrs)
goto out_sgs_sgattrs;
- sgs[i].sg.attrs = kzalloc_objs(*sgs[i].sg.attrs, nr_attrs + 1,
- GFP_KERNEL);
+ sgs[i].sg.attrs = kzalloc_objs(*sgs[i].sg.attrs, nr_attrs + 1);
if (!sgs[i].sg.attrs) {
kfree(sgs[i].sgattrs);
#ifdef CONFIG_PM
/* allocate memory to save mpic state */
- mpic->save_data = kmalloc_objs(*mpic->save_data, mpic->num_sources,
- GFP_KERNEL);
+ mpic->save_data = kmalloc_objs(*mpic->save_data, mpic->num_sources);
BUG_ON(mpic->save_data == NULL);
#endif
dev_info(&dev->dev, "Found %d message registers\n",
mpic_msgr_count);
- mpic_msgrs = kzalloc_objs(*mpic_msgrs, mpic_msgr_count,
- GFP_KERNEL);
+ mpic_msgrs = kzalloc_objs(*mpic_msgrs, mpic_msgr_count);
if (!mpic_msgrs) {
dev_err(&dev->dev,
"No memory for message register blocks\n");
vdso_info->vdso_code_start) >>
PAGE_SHIFT;
- vdso_pagelist = kzalloc_objs(struct page *, vdso_info->vdso_pages,
- GFP_KERNEL);
+ vdso_pagelist = kzalloc_objs(struct page *, vdso_info->vdso_pages);
if (vdso_pagelist == NULL)
panic("vDSO kcalloc failed!\n");
return -EPERM;
mutex_lock(&aift->aift_lock);
- aift->kzdev = kzalloc_objs(struct kvm_zdev *, ZPCI_NR_DEVICES,
- GFP_KERNEL);
+ aift->kzdev = kzalloc_objs(struct kvm_zdev *, ZPCI_NR_DEVICES);
if (!aift->kzdev) {
rc = -ENOMEM;
goto unlock;
if (!zdev_fmb_cache)
goto error_fmb;
- zpci_iomap_start = kzalloc_objs(*zpci_iomap_start, ZPCI_IOMAP_ENTRIES,
- GFP_KERNEL);
+ zpci_iomap_start = kzalloc_objs(*zpci_iomap_start, ZPCI_IOMAP_ENTRIES);
if (!zpci_iomap_start)
goto error_iomap;
if (unlikely(nr_ports == 0))
return -ENODEV;
- sh7786_pcie_ports = kzalloc_objs(struct sh7786_pcie_port, nr_ports,
- GFP_KERNEL);
+ sh7786_pcie_ports = kzalloc_objs(struct sh7786_pcie_port, nr_ports);
if (unlikely(!sh7786_pcie_ports))
return -ENOMEM;
unsigned long hv_err;
int i;
- hdesc = kzalloc_flex(*hdesc, maps, num_kernel_image_mappings,
- GFP_KERNEL);
+ hdesc = kzalloc_flex(*hdesc, maps, num_kernel_image_mappings);
if (!hdesc) {
printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
"hvtramp_descr.\n");
}
if (!current_thread_info()->utraps) {
current_thread_info()->utraps =
- kzalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1,
- GFP_KERNEL);
+ kzalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1);
if (!current_thread_info()->utraps)
return -ENOMEM;
current_thread_info()->utraps[0] = 1;
unsigned long *p = current_thread_info()->utraps;
current_thread_info()->utraps =
- kmalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1,
- GFP_KERNEL);
+ kmalloc_objs(long, UT_TRAP_INSTRUCTION_31 + 1);
if (!current_thread_info()->utraps) {
current_thread_info()->utraps = p;
return -ENOMEM;
result->max_iov_frags = num_extra_frags;
for (i = 0; i < max_size; i++) {
if (vp->header_size > 0)
- iov = kmalloc_objs(struct iovec, 3 + num_extra_frags,
- GFP_KERNEL);
+ iov = kmalloc_objs(struct iovec, 3 + num_extra_frags);
else
- iov = kmalloc_objs(struct iovec, 2 + num_extra_frags,
- GFP_KERNEL);
+ iov = kmalloc_objs(struct iovec, 2 + num_extra_frags);
if (iov == NULL)
goto out_fail;
mmsg_vector->msg_hdr.msg_iov = iov;
goto err;
for (die = 0; die < uncore_max_dies(); die++) {
- topology[die] = kzalloc_objs(**topology, type->num_boxes,
- GFP_KERNEL);
+ topology[die] = kzalloc_objs(**topology, type->num_boxes);
if (!topology[die])
goto clear;
for (idx = 0; idx < type->num_boxes; idx++) {
if (hv_isolation_type_tdx())
hv_vp_assist_page = NULL;
else
- hv_vp_assist_page = kzalloc_objs(*hv_vp_assist_page, nr_cpu_ids,
- GFP_KERNEL);
+ hv_vp_assist_page = kzalloc_objs(*hv_vp_assist_page, nr_cpu_ids);
if (!hv_vp_assist_page) {
ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
amd_northbridges.num = amd_num_nodes();
- nb = kzalloc_objs(struct amd_northbridge, amd_northbridges.num,
- GFP_KERNEL);
+ nb = kzalloc_objs(struct amd_northbridge, amd_northbridges.num);
if (!nb)
return -ENOMEM;
int nid;
int i;
- sgx_numa_nodes = kmalloc_objs(*sgx_numa_nodes, num_possible_nodes(),
- GFP_KERNEL);
+ sgx_numa_nodes = kmalloc_objs(*sgx_numa_nodes, num_possible_nodes());
if (!sgx_numa_nodes)
return false;
if (sanity_check_entries(entries, cpuid->nent, type))
return -EINVAL;
- array.entries = kvzalloc_objs(struct kvm_cpuid_entry2, cpuid->nent,
- GFP_KERNEL);
+ array.entries = kvzalloc_objs(struct kvm_cpuid_entry2, cpuid->nent);
if (!array.entries)
return -ENOMEM;
if (nr_user_entries < td_conf->num_cpuid_config)
return -E2BIG;
- caps = kzalloc_flex(*caps, cpuid.entries, td_conf->num_cpuid_config,
- GFP_KERNEL);
+ caps = kzalloc_flex(*caps, cpuid.entries, td_conf->num_cpuid_config);
if (!caps)
return -ENOMEM;
goto out;
/* Dynamic allocation is needed because of lockdep_register_key(). */
- blk_crypto_fallback_profile = kzalloc_obj(*blk_crypto_fallback_profile,
- GFP_KERNEL);
+ blk_crypto_fallback_profile = kzalloc_obj(*blk_crypto_fallback_profile);
if (!blk_crypto_fallback_profile) {
err = -ENOMEM;
goto fail_free_bioset;
profile->log_slot_ht_size = ilog2(slot_hashtable_size);
profile->slot_hashtable =
- kvmalloc_objs(profile->slot_hashtable[0], slot_hashtable_size,
- GFP_KERNEL);
+ kvmalloc_objs(profile->slot_hashtable[0], slot_hashtable_size);
if (!profile->slot_hashtable)
goto err_destroy;
for (i = 0; i < slot_hashtable_size; i++)
return -ENOMEM;
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
- mapp->range.hmm_pfns = kvzalloc_objs(*mapp->range.hmm_pfns, nr_pages,
- GFP_KERNEL);
+ mapp->range.hmm_pfns = kvzalloc_objs(*mapp->range.hmm_pfns, nr_pages);
if (!mapp->range.hmm_pfns) {
ret = -ENOMEM;
goto free_map;
if (!try_module_get(THIS_MODULE))
return -EINVAL;
- struct ethosu_file_priv __free(kfree) *priv = kzalloc_obj(*priv,
- GFP_KERNEL);
+ struct ethosu_file_priv __free(kfree) *priv = kzalloc_obj(*priv);
if (!priv) {
ret = -ENOMEM;
goto err_put_mod;
struct ethosu_gem_object *bo,
u32 size)
{
- struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc_obj(*info,
- GFP_KERNEL);
+ struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc_obj(*info);
struct ethosu_device *edev = to_ethosu_device(ddev);
u32 *bocmds = bo->base.vaddr;
struct cmd_state st;
*cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks, GFP_ATOMIC);
if (!*cs_chunk_array)
- *cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks,
- GFP_KERNEL);
+ *cs_chunk_array = kmalloc_objs(**cs_chunk_array, num_chunks);
if (!*cs_chunk_array) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
int count = ARRAY_SIZE(hl_debugfs_list);
dev_entry->hdev = hdev;
- dev_entry->entry_arr = kmalloc_objs(struct hl_debugfs_entry, count,
- GFP_KERNEL);
+ dev_entry->entry_arr = kmalloc_objs(struct hl_debugfs_entry, count);
if (!dev_entry->entry_arr)
return -ENOMEM;
q->kernel_address = p;
- q->shadow_queue = kmalloc_objs(struct hl_cs_job *, HL_QUEUE_LENGTH,
- GFP_KERNEL);
+ q->shadow_queue = kmalloc_objs(struct hl_cs_job *, HL_QUEUE_LENGTH);
if (!q->shadow_queue) {
dev_err(hdev->dev,
"Failed to allocate shadow queue for H/W queue %d\n",
return -ENOMEM;
}
- hr_priv->mmu_asid_hop0 = kvzalloc_objs(struct pgt_info, prop->max_asid,
- GFP_KERNEL);
+ hr_priv->mmu_asid_hop0 = kvzalloc_objs(struct pgt_info, prop->max_asid);
if (ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n");
rc = -ENOMEM;
int i, j;
struct hl_block_glbl_sec *glbl_sec;
- glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size,
- GFP_KERNEL);
+ glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
if (!glbl_sec)
return -ENOMEM;
int i, j, rc = 0;
struct hl_block_glbl_sec *glbl_sec;
- glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size,
- GFP_KERNEL);
+ glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
if (!glbl_sec)
return -ENOMEM;
int i, rc = 0;
struct hl_block_glbl_sec *glbl_sec;
- glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size,
- GFP_KERNEL);
+ glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
if (!glbl_sec)
return -ENOMEM;
int i;
struct hl_block_glbl_sec *glbl_sec;
- glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size,
- GFP_KERNEL);
+ glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
if (!glbl_sec)
return -ENOMEM;
block_array_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0);
- glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, block_array_size,
- GFP_KERNEL);
+ glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, block_array_size);
if (!glbl_sec)
return -ENOMEM;
}
/* Buffer used to send MEMORY READ request to device via MHI */
- dump_info->read_buf_req = kzalloc_obj(*dump_info->read_buf_req,
- GFP_KERNEL);
+ dump_info->read_buf_req = kzalloc_obj(*dump_info->read_buf_req);
if (!dump_info->read_buf_req) {
ret = -ENOMEM;
goto free_dump_info;
{
char name[32];
- syndrome_data = kzalloc_objs(syndrome_data[0], max_nr_components,
- GFP_KERNEL);
+ syndrome_data = kzalloc_objs(syndrome_data[0], max_nr_components);
if (!syndrome_data)
return false;
if (pcc_data[pcc_ss_id]) {
pcc_data[pcc_ss_id]->refcount++;
} else {
- pcc_data[pcc_ss_id] = kzalloc_obj(struct cppc_pcc_data,
- GFP_KERNEL);
+ pcc_data[pcc_ss_id] = kzalloc_obj(struct cppc_pcc_data);
if (!pcc_data[pcc_ss_id])
return -ENOMEM;
pcc_data[pcc_ss_id]->refcount++;
return AE_OK;
}
- conn = kmalloc_flex(*conn, remote_name, csi2_res_src_length + 1,
- GFP_KERNEL);
+ conn = kmalloc_flex(*conn, remote_name, csi2_res_src_length + 1);
if (!conn)
return AE_OK;
!ops->profile_set || !ops->probe))
return ERR_PTR(-EINVAL);
- struct platform_profile_handler *pprof __free(kfree) = kzalloc_obj(*pprof,
- GFP_KERNEL);
+ struct platform_profile_handler *pprof __free(kfree) = kzalloc_obj(*pprof);
if (!pprof)
return ERR_PTR(-ENOMEM);
pr->performance->state_count = pss->package.count;
pr->performance->states =
- kmalloc_objs(struct acpi_processor_px, pss->package.count,
- GFP_KERNEL);
+ kmalloc_objs(struct acpi_processor_px, pss->package.count);
if (!pr->performance->states) {
result = -ENOMEM;
goto end;
pr->throttling.state_count = tss->package.count;
pr->throttling.states_tss =
- kmalloc_objs(struct acpi_processor_tx_tss, tss->package.count,
- GFP_KERNEL);
+ kmalloc_objs(struct acpi_processor_tx_tss, tss->package.count);
if (!pr->throttling.states_tss) {
result = -ENOMEM;
goto end;
riscv_acpi_irq_get_dep(handle, i, &gsi_handle);
i++) {
dep_devices.count = 1;
- dep_devices.handles = kzalloc_objs(*dep_devices.handles, 1,
- GFP_KERNEL);
+ dep_devices.handles = kzalloc_objs(*dep_devices.handles, 1);
if (!dep_devices.handles) {
acpi_handle_err(handle, "failed to allocate memory\n");
continue;
if (result)
goto err_unlock;
} else {
- acpi_device_bus_id = kzalloc_obj(*acpi_device_bus_id,
- GFP_KERNEL);
+ acpi_device_bus_id = kzalloc_obj(*acpi_device_bus_id);
if (!acpi_device_bus_id) {
result = -ENOMEM;
goto err_unlock;
* We cannot use devm_ here, since ahci_platform_put_resources() uses
* target_pwrs after devm_ have freed memory
*/
- hpriv->target_pwrs = kzalloc_objs(*hpriv->target_pwrs, hpriv->nports,
- GFP_KERNEL);
+ hpriv->target_pwrs = kzalloc_objs(*hpriv->target_pwrs, hpriv->nports);
if (!hpriv->target_pwrs) {
rc = -ENOMEM;
goto err_out;
}
/* rbpl_virt 64-bit pointers */
- he_dev->rbpl_virt = kmalloc_objs(*he_dev->rbpl_virt, RBPL_TABLE_SIZE,
- GFP_KERNEL);
+ he_dev->rbpl_virt = kmalloc_objs(*he_dev->rbpl_virt, RBPL_TABLE_SIZE);
if (!he_dev->rbpl_virt) {
hprintk("unable to allocate rbpl virt table\n");
goto out_free_rbpl_table;
buf_desc_ptr++;
tx_pkt_start += iadev->tx_buf_sz;
}
- iadev->tx_buf = kmalloc_objs(*iadev->tx_buf, iadev->num_tx_desc,
- GFP_KERNEL);
+ iadev->tx_buf = kmalloc_objs(*iadev->tx_buf, iadev->num_tx_desc);
if (!iadev->tx_buf) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_dle;
sizeof(*cpcs),
DMA_TO_DEVICE);
}
- iadev->desc_tbl = kmalloc_objs(*iadev->desc_tbl, iadev->num_tx_desc,
- GFP_KERNEL);
+ iadev->desc_tbl = kmalloc_objs(*iadev->desc_tbl, iadev->num_tx_desc);
if (!iadev->desc_tbl) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_all_tx_bufs;
memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
- iadev->testTable = kmalloc_objs(*iadev->testTable, iadev->num_vc,
- GFP_KERNEL);
+ iadev->testTable = kmalloc_objs(*iadev->testTable, iadev->num_vc);
if (!iadev->testTable) {
printk("Get freepage failed\n");
goto err_free_desc_tbl;
{
memset((caddr_t)vc, 0, sizeof(*vc));
memset((caddr_t)evc, 0, sizeof(*evc));
- iadev->testTable[i] = kmalloc_obj(struct testTable_t,
- GFP_KERNEL);
+ iadev->testTable[i] = kmalloc_obj(struct testTable_t);
if (!iadev->testTable[i])
goto err_free_test_tables;
iadev->testTable[i]->lastTime = 0;
if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
return false;
- dev->physical_location = kzalloc_obj(*dev->physical_location,
- GFP_KERNEL);
+ dev->physical_location = kzalloc_obj(*dev->physical_location);
if (!dev->physical_location) {
ACPI_FREE(pld);
return false;
if (my_usize != p_usize) {
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
- new_disk_conf = kzalloc_obj(struct disk_conf,
- GFP_KERNEL);
+ new_disk_conf = kzalloc_obj(struct disk_conf);
if (!new_disk_conf) {
put_ldev(device);
return -ENOMEM;
priv->cache.page_count = CACHE_PAGE_COUNT;
priv->cache.page_size = CACHE_PAGE_SIZE;
- priv->cache.tags = kzalloc_objs(struct ps3vram_tag, CACHE_PAGE_COUNT,
- GFP_KERNEL);
+ priv->cache.tags = kzalloc_objs(struct ps3vram_tag, CACHE_PAGE_COUNT);
if (!priv->cache.tags)
return -ENOMEM;
* We are using persistent grants, the grant is
* not mapped but we might have room for it.
*/
- persistent_gnt = kmalloc_obj(struct persistent_gnt,
- GFP_KERNEL);
+ persistent_gnt = kmalloc_obj(struct persistent_gnt);
if (!persistent_gnt) {
/*
* If we don't have enough memory to
{
unsigned int r;
- blkif->rings = kzalloc_objs(struct xen_blkif_ring, blkif->nr_rings,
- GFP_KERNEL);
+ blkif->rings = kzalloc_objs(struct xen_blkif_ring, blkif->nr_rings);
if (!blkif->rings)
return -ENOMEM;
goto fail;
list_add_tail(&req->free_list, &ring->pending_free);
for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
- req->segments[j] = kzalloc_obj(*req->segments[0],
- GFP_KERNEL);
+ req->segments[j] = kzalloc_obj(*req->segments[0]);
if (!req->segments[j])
goto fail;
}
for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
- req->indirect_pages[j] = kzalloc_obj(*req->indirect_pages[0],
- GFP_KERNEL);
+ req->indirect_pages[j] = kzalloc_obj(*req->indirect_pages[0]);
if (!req->indirect_pages[j])
goto fail;
}
for (i = 0; i < BLK_RING_SIZE(info); i++) {
rinfo->shadow[i].grants_used =
- kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants,
- GFP_KERNEL);
+ kvzalloc_objs(rinfo->shadow[i].grants_used[0], grants);
rinfo->shadow[i].sg = kvzalloc_objs(rinfo->shadow[i].sg[0],
psegs, GFP_KERNEL);
if (info->max_indirect_segments)
(unsigned long)z_remap_nocache_nonser(paddr, size);
#endif
z2ram_map =
- kmalloc_objs(z2ram_map[0], size / Z2RAM_CHUNKSIZE,
- GFP_KERNEL);
+ kmalloc_objs(z2ram_map[0], size / Z2RAM_CHUNKSIZE);
if (z2ram_map == NULL) {
printk(KERN_ERR DEVICE_NAME
": cannot get mem for z2ram_map\n");
if (ret)
return ret;
- mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS,
- GFP_KERNEL);
+ mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS);
if (!mhi_cntrl->mhi_cmd) {
ret = -ENOMEM;
goto err_free_ch;
return -ENOMEM;
/* Allocate memory for entries */
- img_info->mhi_buf = kzalloc_objs(*img_info->mhi_buf, segments,
- GFP_KERNEL);
+ img_info->mhi_buf = kzalloc_objs(*img_info->mhi_buf, segments);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;
num = config->num_events;
mhi_cntrl->total_ev_rings = num;
- mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, num,
- GFP_KERNEL);
+ mhi_cntrl->mhi_event = kzalloc_objs(*mhi_cntrl->mhi_event, num);
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
if (ret)
return -EINVAL;
- mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS,
- GFP_KERNEL);
+ mhi_cntrl->mhi_cmd = kzalloc_objs(*mhi_cntrl->mhi_cmd, NR_OF_CMD_RINGS);
if (!mhi_cntrl->mhi_cmd) {
ret = -ENOMEM;
goto err_free_event;
int retval = 0;
int i;
- tables = kzalloc_objs(struct serverworks_page_map *, nr_tables + 1,
- GFP_KERNEL);
+ tables = kzalloc_objs(struct serverworks_page_map *, nr_tables + 1);
if (tables == NULL)
return -ENOMEM;
if (table == NULL)
return -ENOMEM;
- uninorth_priv.pages_arr = kmalloc_objs(struct page *, 1 << page_order,
- GFP_KERNEL);
+ uninorth_priv.pages_arr = kmalloc_objs(struct page *, 1 << page_order);
if (uninorth_priv.pages_arr == NULL)
goto enomem;
vqs = kmalloc_objs(struct virtqueue *, nr_queues);
vqs_info = kzalloc_objs(*vqs_info, nr_queues);
portdev->in_vqs = kmalloc_objs(struct virtqueue *, nr_ports);
- portdev->out_vqs = kmalloc_objs(struct virtqueue *, nr_ports,
- GFP_KERNEL);
+ portdev->out_vqs = kmalloc_objs(struct virtqueue *, nr_ports);
if (!vqs || !vqs_info || !portdev->in_vqs || !portdev->out_vqs) {
err = -ENOMEM;
goto free;
if (!scu_base)
return;
- aspeed_clk_data = kzalloc_flex(*aspeed_clk_data, hws, ASPEED_NUM_CLKS,
- GFP_KERNEL);
+ aspeed_clk_data = kzalloc_flex(*aspeed_clk_data, hws, ASPEED_NUM_CLKS);
if (!aspeed_clk_data)
return;
aspeed_clk_data->num = ASPEED_NUM_CLKS;
if (WARN_ON(!asiu))
return;
- asiu->clk_data = kzalloc_flex(*asiu->clk_data, hws, num_clks,
- GFP_KERNEL);
+ asiu->clk_data = kzalloc_flex(*asiu->clk_data, hws, num_clks);
if (WARN_ON(!asiu->clk_data))
goto err_clks;
asiu->clk_data->num = num_clks;
int ret;
int i;
- gemini_clk_data = kzalloc_flex(*gemini_clk_data, hws, GEMINI_NUM_CLKS,
- GFP_KERNEL);
+ gemini_clk_data = kzalloc_flex(*gemini_clk_data, hws, GEMINI_NUM_CLKS);
if (!gemini_clk_data)
return;
gemini_clk_data->num = GEMINI_NUM_CLKS;
const char *parent_name;
struct clk_hw *hw;
- m10v_clk_data = kzalloc_flex(*m10v_clk_data, hws, M10V_NUM_CLKS,
- GFP_KERNEL);
+ m10v_clk_data = kzalloc_flex(*m10v_clk_data, hws, M10V_NUM_CLKS);
if (!m10v_clk_data)
return;
stm32fx_end_primary_clk = data->end_primary;
- clks = kmalloc_objs(*clks, data->gates_num + stm32fx_end_primary_clk,
- GFP_KERNEL);
+ clks = kmalloc_objs(*clks, data->gates_num + stm32fx_end_primary_clk);
if (!clks)
goto fail;
void __iomem *anatop_base, *base;
int ret;
- clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6QDL_CLK_END,
- GFP_KERNEL);
+ clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6QDL_CLK_END);
if (WARN_ON(!clk_hw_data))
return;
void __iomem *base;
int ret;
- clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SL_CLK_END,
- GFP_KERNEL);
+ clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SL_CLK_END);
if (WARN_ON(!clk_hw_data))
return;
struct device_node *np;
void __iomem *base;
- clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SLL_CLK_END,
- GFP_KERNEL);
+ clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SLL_CLK_END);
if (WARN_ON(!clk_hw_data))
return;
void __iomem *base;
bool lcdif1_assigned_clk;
- clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SX_CLK_CLK_END,
- GFP_KERNEL);
+ clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6SX_CLK_CLK_END);
if (WARN_ON(!clk_hw_data))
return;
struct device_node *np;
void __iomem *base;
- clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6UL_CLK_END,
- GFP_KERNEL);
+ clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX6UL_CLK_END);
if (WARN_ON(!clk_hw_data))
return;
struct clk_hw **hws;
void __iomem *base;
- clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SCG1_END,
- GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SCG1_END);
if (!clk_data)
return;
struct clk_hw **hws;
void __iomem *base;
- clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC2_END,
- GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC2_END);
if (!clk_data)
return;
struct clk_hw **hws;
void __iomem *base;
- clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC3_END,
- GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_PCC3_END);
if (!clk_data)
return;
struct clk_hw **hws;
void __iomem *base;
- clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SMC1_END,
- GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, IMX7ULP_CLK_SMC1_END);
if (!clk_data)
return;
void __iomem *base;
int ret;
- clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX8MM_CLK_END,
- GFP_KERNEL);
+ clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX8MM_CLK_END);
if (WARN_ON(!clk_hw_data))
return -ENOMEM;
unsigned i;
int err;
- cgu->clocks.clks = kzalloc_objs(struct clk *, cgu->clocks.clk_num,
- GFP_KERNEL);
+ cgu->clocks.clks = kzalloc_objs(struct clk *, cgu->clocks.clk_num);
if (!cgu->clocks.clks) {
err = -ENOMEM;
goto err_out;
if (desc->get_refclk_freq)
clk_data.clk_num += 1;
- clk_data.clks = kzalloc_objs(*clk_data.clks, clk_data.clk_num,
- GFP_KERNEL);
+ clk_data.clks = kzalloc_objs(*clk_data.clks, clk_data.clk_num);
if (WARN_ON(!clk_data.clks)) {
iounmap(base);
return;
} else
clk_data->clk_num = data->outputs_nb;
- clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num,
- GFP_KERNEL);
+ clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num);
if (!clk_data->clks)
goto err;
return;
clk_data->clk_num = num_odfs;
- clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num,
- GFP_KERNEL);
+ clk_data->clks = kzalloc_objs(struct clk *, clk_data->clk_num);
if (!clk_data->clks)
goto err;
if (WARN_ON(banks > ARRAY_SIZE(periph_regs)))
return NULL;
- periph_clk_enb_refcnt = kzalloc_objs(*periph_clk_enb_refcnt, 32 * banks,
- GFP_KERNEL);
+ periph_clk_enb_refcnt = kzalloc_objs(*periph_clk_enb_refcnt, 32 * banks);
if (!periph_clk_enb_refcnt)
return NULL;
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
void __iomem *base, int irq, unsigned long freq)
{
- struct dw_apb_clock_event_device *dw_ced = kzalloc_obj(*dw_ced,
- GFP_KERNEL);
+ struct dw_apb_clock_event_device *dw_ced = kzalloc_obj(*dw_ced);
int err;
if (!dw_ced)
/* Allocate and setup the channels. */
cmt->num_channels = hweight8(cmt->hw_channels);
- cmt->channels = kzalloc_objs(*cmt->channels, cmt->num_channels,
- GFP_KERNEL);
+ cmt->channels = kzalloc_objs(*cmt->channels, cmt->num_channels);
if (cmt->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;
mtu->num_channels = min_t(unsigned int, ret,
ARRAY_SIZE(sh_mtu2_channel_offsets));
- mtu->channels = kzalloc_objs(*mtu->channels, mtu->num_channels,
- GFP_KERNEL);
+ mtu->channels = kzalloc_objs(*mtu->channels, mtu->num_channels);
if (mtu->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;
}
/* Allocate and setup the channels. */
- tmu->channels = kzalloc_objs(*tmu->channels, tmu->num_channels,
- GFP_KERNEL);
+ tmu->channels = kzalloc_objs(*tmu->channels, tmu->num_channels);
if (tmu->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;
if (s->n_chan == 32) {
const struct comedi_lrange **range_table_list;
- range_table_list = kmalloc_objs(*range_table_list, 32,
- GFP_KERNEL);
+ range_table_list = kmalloc_objs(*range_table_list, 32);
if (!range_table_list)
return -ENOMEM;
s->range_table_list = range_table_list;
if (nsp->sk) {
sk = nsp->sk;
if (sk->sk_user_data == NULL) {
- sk->sk_user_data = kzalloc_obj(struct proc_input,
- GFP_KERNEL);
+ sk->sk_user_data = kzalloc_obj(struct proc_input);
if (sk->sk_user_data == NULL) {
err = ENOMEM;
goto out;
goto err_unreg;
}
- freq_table = kzalloc_objs(*freq_table, perf->state_count + 1,
- GFP_KERNEL);
+ freq_table = kzalloc_objs(*freq_table, perf->state_count + 1);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
return -EINVAL;
}
- armada37xx_cpufreq_state = kmalloc_obj(*armada37xx_cpufreq_state,
- GFP_KERNEL);
+ armada37xx_cpufreq_state = kmalloc_obj(*armada37xx_cpufreq_state);
if (!armada37xx_cpufreq_state) {
clk_put(clk);
return -ENOMEM;
return -EINVAL;
}
- longhaul_table = kzalloc_objs(*longhaul_table, numscales + 1,
- GFP_KERNEL);
+ longhaul_table = kzalloc_objs(*longhaul_table, numscales + 1);
if (!longhaul_table)
return -ENOMEM;
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
- us2e_freq_table = kzalloc_objs(*us2e_freq_table, NR_CPUS,
- GFP_KERNEL);
+ us2e_freq_table = kzalloc_objs(*us2e_freq_table, NR_CPUS);
if (!us2e_freq_table)
return -ENOMEM;
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
- us3_freq_table = kzalloc_objs(*us3_freq_table, NR_CPUS,
- GFP_KERNEL);
+ us3_freq_table = kzalloc_objs(*us3_freq_table, NR_CPUS);
if (!us3_freq_table)
return -ENOMEM;
if (!dev->pdr)
return -ENOMEM;
- dev->pdr_uinfo = kzalloc_objs(struct pd_uinfo, PPC4XX_NUM_PD,
- GFP_KERNEL);
+ dev->pdr_uinfo = kzalloc_objs(struct pd_uinfo, PPC4XX_NUM_PD);
if (!dev->pdr_uinfo) {
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
struct nitrox_vfdev *vfdev;
int i;
- ndev->iov.vfdev = kzalloc_objs(struct nitrox_vfdev, ndev->iov.num_vfs,
- GFP_KERNEL);
+ ndev->iov.vfdev = kzalloc_objs(struct nitrox_vfdev, ndev->iov.num_vfs);
if (!ndev->iov.vfdev)
return -ENOMEM;
if (!qm->qp_array)
return -ENOMEM;
- qm->poll_data = kzalloc_objs(struct hisi_qm_poll_data, qm->qp_num,
- GFP_KERNEL);
+ qm->poll_data = kzalloc_objs(struct hisi_qm_poll_data, qm->qp_num);
if (!qm->poll_data) {
kfree(qm->qp_array);
return -ENOMEM;
if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
- qm->factor = kzalloc_objs(struct qm_shaper_factor, total_func,
- GFP_KERNEL);
+ qm->factor = kzalloc_objs(struct qm_shaper_factor, total_func);
if (!qm->factor)
return -ENOMEM;
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
- ctx->qp_ctx = kzalloc_objs(struct sec_qp_ctx, sec->ctx_q_num,
- GFP_KERNEL);
+ ctx->qp_ctx = kzalloc_objs(struct sec_qp_ctx, sec->ctx_q_num);
if (!ctx->qp_ctx) {
ret = -ENOMEM;
goto err_destroy_qps;
}
spin_lock_init(&req_q->req_lock);
- req_q->q = kzalloc_objs(struct hisi_zip_req, req_q->size,
- GFP_KERNEL);
+ req_q->q = kzalloc_objs(struct hisi_zip_req, req_q->size);
if (!req_q->q) {
ret = -ENOMEM;
if (i == 0)
if (unlikely(!ae_count))
return ERR_PTR(-EINVAL);
- fw_counters = kmalloc_flex(*fw_counters, ae_counters, ae_count,
- GFP_KERNEL);
+ fw_counters = kmalloc_flex(*fw_counters, ae_counters, ae_count);
if (!fw_counters)
return ERR_PTR(-ENOMEM);
goto err_del_cfg;
/* Allocate memory for VF info structs */
- accel_dev->pf.vf_info = kzalloc_objs(struct adf_accel_vf_info, totalvfs,
- GFP_KERNEL);
+ accel_dev->pf.vf_info = kzalloc_objs(struct adf_accel_vf_info, totalvfs);
ret = -ENOMEM;
if (!accel_dev->pf.vf_info)
goto err_del_cfg;
if (!cptpf->flr_wq)
return -ENOMEM;
- cptpf->flr_work = kzalloc_objs(struct cptpf_flr_work, num_vfs,
- GFP_KERNEL);
+ cptpf->flr_work = kzalloc_objs(struct cptpf_flr_work, num_vfs);
if (!cptpf->flr_work)
goto destroy_wq;
return alloc_sg_len;
/* We allocate to much sg entry, but it is easier */
- *new_sg = kmalloc_objs(struct scatterlist, (size_t)alloc_sg_len,
- GFP_KERNEL);
+ *new_sg = kmalloc_objs(struct scatterlist, (size_t)alloc_sg_len);
if (!*new_sg)
return -ENOMEM;
static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
{
- vi->data_vq = kzalloc_objs(*vi->data_vq, vi->max_data_queues,
- GFP_KERNEL);
+ vi->data_vq = kzalloc_objs(*vi->data_vq, vi->max_data_queues);
if (!vi->data_vq)
return -ENOMEM;
static int userspace_init(struct devfreq *devfreq)
{
int err = 0;
- struct userspace_data *data = kzalloc_obj(struct userspace_data,
- GFP_KERNEL);
+ struct userspace_data *data = kzalloc_obj(struct userspace_data);
if (!data) {
err = -ENOMEM;
if (!ubuf->offsets)
return -ENOMEM;
- ubuf->pinned_folios = kvmalloc_objs(*ubuf->pinned_folios, pgcnt,
- GFP_KERNEL);
+ ubuf->pinned_folios = kvmalloc_objs(*ubuf->pinned_folios, pgcnt);
if (!ubuf->pinned_folios)
return -ENOMEM;
}
/* Initialize physical channels */
- pl08x->phy_chans = kzalloc_objs(*pl08x->phy_chans, vd->channels,
- GFP_KERNEL);
+ pl08x->phy_chans = kzalloc_objs(*pl08x->phy_chans, vd->channels);
if (!pl08x->phy_chans) {
ret = -ENOMEM;
goto out_no_phychans;
struct plx_dma_desc *desc;
int i;
- plxdev->desc_ring = kzalloc_objs(*plxdev->desc_ring, PLX_DMA_RING_COUNT,
- GFP_KERNEL);
+ plxdev->desc_ring = kzalloc_objs(*plxdev->desc_ring, PLX_DMA_RING_COUNT);
if (!plxdev->desc_ring)
return -ENOMEM;
if (ret < 0)
return ret;
- chan->sw_desc_pool = kzalloc_objs(*desc, ZYNQMP_DMA_NUM_DESCS,
- GFP_KERNEL);
+ chan->sw_desc_pool = kzalloc_objs(*desc, ZYNQMP_DMA_NUM_DESCS);
if (!chan->sw_desc_pool)
return -ENOMEM;
if (!dev_ctl)
return NULL;
- dev_inst = kzalloc_objs(struct edac_device_instance, nr_instances,
- GFP_KERNEL);
+ dev_inst = kzalloc_objs(struct edac_device_instance, nr_instances);
if (!dev_inst)
goto free;
if (!ctx)
return -ENOMEM;
- ras_attr_groups = kzalloc_objs(*ras_attr_groups, attr_gcnt + 1,
- GFP_KERNEL);
+ ras_attr_groups = kzalloc_objs(*ras_attr_groups, attr_gcnt + 1);
if (!ras_attr_groups)
goto ctx_free;
}
if (mem_repair_cnt) {
- ctx->mem_repair = kzalloc_objs(*ctx->mem_repair, mem_repair_cnt,
- GFP_KERNEL);
+ ctx->mem_repair = kzalloc_objs(*ctx->mem_repair, mem_repair_cnt);
if (!ctx->mem_repair)
goto data_mem_free;
}
csr->csrow_idx = row;
csr->mci = mci;
csr->nr_channels = tot_channels;
- csr->channels = kzalloc_objs(*csr->channels, tot_channels,
- GFP_KERNEL);
+ csr->channels = kzalloc_objs(*csr->channels, tot_channels);
if (!csr->channels)
return -ENOMEM;
if (!i7core_dev)
return NULL;
- i7core_dev->pdev = kzalloc_objs(*i7core_dev->pdev, table->n_devs,
- GFP_KERNEL);
+ i7core_dev->pdev = kzalloc_objs(*i7core_dev->pdev, table->n_devs);
if (!i7core_dev->pdev) {
kfree(i7core_dev);
return NULL;
goto err_put_addrmatch;
if (!pvt->is_registered) {
- pvt->chancounts_dev = kzalloc_obj(*pvt->chancounts_dev,
- GFP_KERNEL);
+ pvt->chancounts_dev = kzalloc_obj(*pvt->chancounts_dev);
if (!pvt->chancounts_dev) {
rc = -ENOMEM;
goto err_del_addrmatch;
if (!edev->max_supported)
return 0;
- edev->cables = kzalloc_objs(*edev->cables, edev->max_supported,
- GFP_KERNEL);
+ edev->cables = kzalloc_objs(*edev->cables, edev->max_supported);
if (!edev->cables)
return -ENOMEM;
for (index = 0; edev->mutually_exclusive[index]; index++)
;
- edev->attrs_muex = kzalloc_objs(*edev->attrs_muex, index + 1,
- GFP_KERNEL);
+ edev->attrs_muex = kzalloc_objs(*edev->attrs_muex, index + 1);
if (!edev->attrs_muex)
return -ENOMEM;
- edev->d_attrs_muex = kzalloc_objs(*edev->d_attrs_muex, index,
- GFP_KERNEL);
+ edev->d_attrs_muex = kzalloc_objs(*edev->d_attrs_muex, index);
if (!edev->d_attrs_muex) {
kfree(edev->attrs_muex);
return -ENOMEM;
spin_lock_init(&edev->lock);
if (edev->max_supported) {
- edev->nh = kzalloc_objs(*edev->nh, edev->max_supported,
- GFP_KERNEL);
+ edev->nh = kzalloc_objs(*edev->nh, edev->max_supported);
if (!edev->nh) {
ret = -ENOMEM;
goto err_alloc_nh;
goto skip_device;
}
- entry = kzalloc_objs(*entry, dev_header->prop_count + 1,
- GFP_KERNEL);
+ entry = kzalloc_objs(*entry, dev_header->prop_count + 1);
if (!entry) {
dev_err(dev, "cannot allocate properties\n");
goto skip_device;
if (qcaps.capsule_count == ULONG_MAX)
return -EINVAL;
- capsules = kzalloc_objs(efi_capsule_header_t, qcaps.capsule_count + 1,
- GFP_KERNEL);
+ capsules = kzalloc_objs(efi_capsule_header_t, qcaps.capsule_count + 1);
if (!capsules)
return -ENOMEM;
return ERR_PTR(-EINVAL);
}
- struct qcom_tzmem_pool *pool __free(kfree) = kzalloc_obj(*pool,
- GFP_KERNEL);
+ struct qcom_tzmem_pool *pool __free(kfree) = kzalloc_obj(*pool);
if (!pool)
return ERR_PTR(-ENOMEM);
/* then add irq resource */
if (feature->nr_irqs) {
- ddev->irqs = kzalloc_objs(*ddev->irqs, feature->nr_irqs,
- GFP_KERNEL);
+ ddev->irqs = kzalloc_objs(*ddev->irqs, feature->nr_irqs);
if (!ddev->irqs) {
ret = -ENOMEM;
goto put_dev;
if (binfo->len - ofst < size)
return -EINVAL;
- finfo = kzalloc_flex(*finfo, params, dfh_psize / sizeof(u64),
- GFP_KERNEL);
+ finfo = kzalloc_flex(*finfo, params, dfh_psize / sizeof(u64));
if (!finfo)
return -ENOMEM;
static int agilent_82350b_allocate_private(struct gpib_board *board)
{
- board->private_data = kzalloc_obj(struct agilent_82350b_priv,
- GFP_KERNEL);
+ board->private_data = kzalloc_obj(struct agilent_82350b_priv);
if (!board->private_data)
return -ENOMEM;
return 0;
{
struct agilent_82357a_priv *a_priv;
- board->private_data = kzalloc_obj(struct agilent_82357a_priv,
- GFP_KERNEL);
+ board->private_data = kzalloc_obj(struct agilent_82357a_priv);
if (!board->private_data)
return -ENOMEM;
a_priv = board->private_data;
mutex_unlock(&file_priv->descriptors_mutex);
return -ERANGE;
}
- file_priv->descriptors[i] = kmalloc_obj(struct gpib_descriptor,
- GFP_KERNEL);
+ file_priv->descriptors[i] = kmalloc_obj(struct gpib_descriptor);
if (!file_priv->descriptors[i]) {
mutex_unlock(&file_priv->descriptors_mutex);
return -ENOMEM;
{
int id;
- struct gpio_sim_device *dev __free(kfree) = kzalloc_obj(*dev,
- GFP_KERNEL);
+ struct gpio_sim_device *dev __free(kfree) = kzalloc_obj(*dev);
if (!dev)
return ERR_PTR(-ENOMEM);
gpio_virtuser_config_make_device_group(struct config_group *group,
const char *name)
{
- struct gpio_virtuser_device *dev __free(kfree) = kzalloc_obj(*dev,
- GFP_KERNEL);
+ struct gpio_virtuser_device *dev __free(kfree) = kzalloc_obj(*dev);
if (!dev)
return ERR_PTR(-ENOMEM);
{
char *con_id_cpy __free(kfree) = NULL;
- struct gpio_shared_ref *ref __free(kfree) = kzalloc_obj(*ref,
- GFP_KERNEL);
+ struct gpio_shared_ref *ref __free(kfree) = kzalloc_obj(*ref);
if (!ref)
return NULL;
switch (acp_machine_id) {
case ST_JADEITE:
{
- adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, 2,
- GFP_KERNEL);
+ adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, 2);
if (!adev->acp.acp_cell) {
r = -ENOMEM;
goto failure;
goto failure;
}
- i2s_pdata = kzalloc_objs(struct i2s_platform_data, 1,
- GFP_KERNEL);
+ i2s_pdata = kzalloc_objs(struct i2s_platform_data, 1);
if (!i2s_pdata) {
r = -ENOMEM;
goto failure;
break;
}
default:
- adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, ACP_DEVS,
- GFP_KERNEL);
+ adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, ACP_DEVS);
if (!adev->acp.acp_cell) {
r = -ENOMEM;
goto failure;
}
- i2s_pdata = kzalloc_objs(struct i2s_platform_data, 3,
- GFP_KERNEL);
+ i2s_pdata = kzalloc_objs(struct i2s_platform_data, 3);
if (!i2s_pdata) {
r = -ENOMEM;
goto failure;
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
{
- struct amdgpu_cgs_device *cgs_device = kmalloc_obj(*cgs_device,
- GFP_KERNEL);
+ struct amdgpu_cgs_device *cgs_device = kmalloc_obj(*cgs_device);
if (!cgs_device) {
drm_err(adev_to_drm(adev), "Couldn't allocate CGS device structure\n");
}
if (is_dp_bridge) {
- amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
- GFP_KERNEL);
+ amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
- amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
- GFP_KERNEL);
+ amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
- amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
- GFP_KERNEL);
+ amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
- GFP_KERNEL);
+ amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_eDP:
- amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
- GFP_KERNEL);
+ amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
- amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
- GFP_KERNEL);
+ amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
- p->chunks = kvmalloc_objs(struct amdgpu_cs_chunk, p->nchunks,
- GFP_KERNEL);
+ p->chunks = kvmalloc_objs(struct amdgpu_cs_chunk, p->nchunks);
if (!p->chunks) {
ret = -ENOMEM;
goto free_chunk;
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
case 1:
- mem_ranges = kvzalloc_objs(*mem_ranges, nps_info->v1.count,
- GFP_KERNEL);
+ mem_ranges = kvzalloc_objs(*mem_ranges, nps_info->v1.count);
if (!mem_ranges)
return -ENOMEM;
*nps_type = nps_info->v1.nps_type;
mux->real_ring = ring;
mux->num_ring_entries = 0;
- mux->ring_entry = kzalloc_objs(struct amdgpu_mux_entry, entry_size,
- GFP_KERNEL);
+ mux->ring_entry = kzalloc_objs(struct amdgpu_mux_entry, entry_size);
if (!mux->ring_entry)
return -ENOMEM;
}
/* Allocate for init_data_hdr */
- init_data_hdr = kzalloc_obj(struct amd_sriov_msg_init_data_header,
- GFP_KERNEL);
+ init_data_hdr = kzalloc_obj(struct amd_sriov_msg_init_data_header);
if (!init_data_hdr)
return -ENOMEM;
amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder)
{
int encoder_enum = (amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- struct amdgpu_encoder_atom_dig *dig = kzalloc_obj(struct amdgpu_encoder_atom_dig,
- GFP_KERNEL);
+ struct amdgpu_encoder_atom_dig *dig = kzalloc_obj(struct amdgpu_encoder_atom_dig);
if (!dig)
return NULL;
/* DCE10 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt,
- GFP_KERNEL);
+ adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt);
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
/* DCE6 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt,
- GFP_KERNEL);
+ adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt);
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
/* DCE8 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt,
- GFP_KERNEL);
+ adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt);
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
adev->vm_manager.vram_base_offset = 0;
}
- adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info,
- GFP_KERNEL);
+ adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
adev->vm_manager.vram_base_offset = 0;
}
- adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info,
- GFP_KERNEL);
+ adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
return -EINVAL;
- device_buckets = kmalloc_objs(*device_buckets, args->num_devices,
- GFP_KERNEL);
+ device_buckets = kmalloc_objs(*device_buckets, args->num_devices);
if (!device_buckets)
return -ENOMEM;
struct kfd_event_waiter *event_waiters;
uint32_t i;
- event_waiters = kzalloc_objs(struct kfd_event_waiter, num_events,
- GFP_KERNEL);
+ event_waiters = kzalloc_objs(struct kfd_event_waiter, num_events);
if (!event_waiters)
return NULL;
wave_cnt = 0;
max_waves_per_cu = 0;
- cu_occupancy = kzalloc_objs(*cu_occupancy, AMDGPU_MAX_QUEUES,
- GFP_KERNEL);
+ cu_occupancy = kzalloc_objs(*cu_occupancy, AMDGPU_MAX_QUEUES);
if (!cu_occupancy)
return -ENOMEM;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
- hpd_rx_offload_wq = kzalloc_objs(*hpd_rx_offload_wq, max_caps,
- GFP_KERNEL);
+ hpd_rx_offload_wq = kzalloc_objs(*hpd_rx_offload_wq, max_caps);
if (!hpd_rx_offload_wq)
return NULL;
}
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
- adev->dm.dmub_notify = kzalloc_obj(struct dmub_notification,
- GFP_KERNEL);
+ adev->dm.dmub_notify = kzalloc_obj(struct dmub_notification);
if (!adev->dm.dmub_notify) {
drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
goto error;
static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
- struct hpd_rx_irq_offload_work *offload_work = kzalloc_obj(*offload_work,
- GFP_KERNEL);
+ struct hpd_rx_irq_offload_work *offload_work = kzalloc_obj(*offload_work);
if (!offload_work) {
drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n");
link = dc_get_link_at_index(dm->dc, i);
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
- struct amdgpu_dm_wb_connector *wbcon = kzalloc_obj(*wbcon,
- GFP_KERNEL);
+ struct amdgpu_dm_wb_connector *wbcon = kzalloc_obj(*wbcon);
if (!wbcon) {
drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
if (!sclk)
goto free_yclk;
- tiling_mode = kzalloc_objs(*tiling_mode, maximum_number_of_surfaces,
- GFP_KERNEL);
+ tiling_mode = kzalloc_objs(*tiling_mode, maximum_number_of_surfaces);
if (!tiling_mode)
goto free_sclk;
- surface_type = kzalloc_objs(*surface_type, maximum_number_of_surfaces,
- GFP_KERNEL);
+ surface_type = kzalloc_objs(*surface_type, maximum_number_of_surfaces);
if (!surface_type)
goto free_tiling_mode;
int pipe_count,
struct dce_bw_output *calcs_output)
{
- struct bw_calcs_data *data = kzalloc_obj(struct bw_calcs_data,
- GFP_KERNEL);
+ struct bw_calcs_data *data = kzalloc_obj(struct bw_calcs_data);
if (!data)
return false;
switch (asic_id.chip_family) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case FAMILY_SI: {
- struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
#endif
case FAMILY_CI:
case FAMILY_KV: {
- struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return &clk_mgr->base;
}
case FAMILY_CZ: {
- struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return &clk_mgr->base;
}
case FAMILY_VI: {
- struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return &clk_mgr->base;
}
case FAMILY_AI: {
- struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
}
#if defined(CONFIG_DRM_AMD_DC_FP)
case FAMILY_RV: {
- struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return &clk_mgr->base;
}
case FAMILY_NV: {
- struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
}
case FAMILY_VGH:
if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev)) {
- struct clk_mgr_vgh *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_vgh *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
break;
case FAMILY_YELLOW_CARP: {
- struct clk_mgr_dcn31 *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_dcn31 *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
}
break;
case AMDGPU_FAMILY_GC_10_3_6: {
- struct clk_mgr_dcn315 *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_dcn315 *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
}
break;
case AMDGPU_FAMILY_GC_10_3_7: {
- struct clk_mgr_dcn316 *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_dcn316 *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
}
break;
case AMDGPU_FAMILY_GC_11_0_0: {
- struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_internal *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
}
case AMDGPU_FAMILY_GC_11_0_1: {
- struct clk_mgr_dcn314 *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_dcn314 *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
break;
case AMDGPU_FAMILY_GC_11_5_0: {
- struct clk_mgr_dcn35 *clk_mgr = kzalloc_obj(*clk_mgr,
- GFP_KERNEL);
+ struct clk_mgr_dcn35 *clk_mgr = kzalloc_obj(*clk_mgr);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
dce_clock_read_ss_info(clk_mgr);
- clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params,
- GFP_KERNEL);
+ clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
return;
clk_mgr->smu_present = false;
- clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params,
- GFP_KERNEL);
+ clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
return;
struct dccg *dccg)
{
struct clk_log_info log_info = {0};
- struct dcn401_clk_mgr *clk_mgr401 = kzalloc_obj(struct dcn401_clk_mgr,
- GFP_KERNEL);
+ struct dcn401_clk_mgr *clk_mgr401 = kzalloc_obj(struct dcn401_clk_mgr);
struct clk_mgr_internal *clk_mgr;
if (!clk_mgr401)
clk_mgr->smu_present = false;
- clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params,
- GFP_KERNEL);
+ clk_mgr->base.bw_params = kzalloc_obj(*clk_mgr->base.bw_params);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
kfree(clk_mgr401);
enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
struct gpio_pin_info pin_info;
struct gpio *generic;
- struct gpio_generic_mux_config *config = kzalloc_obj(struct gpio_generic_mux_config,
- GFP_KERNEL);
+ struct gpio_generic_mux_config *config = kzalloc_obj(struct gpio_generic_mux_config);
if (!config)
return false;
if (*ss_entries_num == 0)
return;
- ss_info = kzalloc_objs(struct spread_spectrum_info, *ss_entries_num,
- GFP_KERNEL);
+ ss_info = kzalloc_objs(struct spread_spectrum_info, *ss_entries_num);
ss_info_cur = ss_info;
if (ss_info == NULL)
return;
- ss_data = kzalloc_objs(struct spread_spectrum_data, *ss_entries_num,
- GFP_KERNEL);
+ ss_data = kzalloc_objs(struct spread_spectrum_data, *ss_entries_num);
if (ss_data == NULL)
goto out_free_info;
struct dc_context *ctx,
uint32_t inst)
{
- struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input,
- GFP_KERNEL);
+ struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input);
if (!dce_mi) {
BREAK_TO_DEBUGGER();
struct dc_context *ctx,
uint32_t inst)
{
- struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input,
- GFP_KERNEL);
+ struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input);
if (!dce_mi) {
BREAK_TO_DEBUGGER();
static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
{
- struct dce110_timing_generator *dce110_tgv = kzalloc_obj(*dce110_tgv,
- GFP_KERNEL);
- struct dce_transform *dce110_xfmv = kzalloc_obj(*dce110_xfmv,
- GFP_KERNEL);
+ struct dce110_timing_generator *dce110_tgv = kzalloc_obj(*dce110_tgv);
+ struct dce_transform *dce110_xfmv = kzalloc_obj(*dce110_xfmv);
struct dce_mem_input *dce110_miv = kzalloc_obj(*dce110_miv);
struct dce110_opp *dce110_oppv = kzalloc_obj(*dce110_oppv);
struct dc_context *ctx,
uint32_t inst)
{
- struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input,
- GFP_KERNEL);
+ struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input);
if (!dce_mi) {
BREAK_TO_DEBUGGER();
struct dc_context *ctx,
uint32_t inst)
{
- struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input,
- GFP_KERNEL);
+ struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input);
if (!dce_mi) {
BREAK_TO_DEBUGGER();
struct dc_context *ctx,
uint32_t inst)
{
- struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input,
- GFP_KERNEL);
+ struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input);
if (!dce_mi) {
BREAK_TO_DEBUGGER();
struct dc_context *ctx,
uint32_t inst)
{
- struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input,
- GFP_KERNEL);
+ struct dce_mem_input *dce_mi = kzalloc_obj(struct dce_mem_input);
if (!dce_mi) {
BREAK_TO_DEBUGGER();
static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx)
{
- struct dcn10_hubbub *dcn10_hubbub = kzalloc_obj(struct dcn10_hubbub,
- GFP_KERNEL);
+ struct dcn10_hubbub *dcn10_hubbub = kzalloc_obj(struct dcn10_hubbub);
if (!dcn10_hubbub)
return NULL;
struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
{
int i;
- struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub)
return NULL;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn20_dwbc *dwbc20 = kzalloc_obj(struct dcn20_dwbc,
- GFP_KERNEL);
+ struct dcn20_dwbc *dwbc20 = kzalloc_obj(struct dcn20_dwbc);
if (!dwbc20) {
dm_error("DC: failed to create dwbc20!\n");
ASSERT(pipe_count > 0);
for (i = 0; i < pipe_count; i++) {
- struct dcn20_mmhubbub *mcif_wb20 = kzalloc_obj(struct dcn20_mmhubbub,
- GFP_KERNEL);
+ struct dcn20_mmhubbub *mcif_wb20 = kzalloc_obj(struct dcn20_mmhubbub);
if (!mcif_wb20) {
dm_error("DC: failed to create mcif_wb20!\n");
static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx)
{
- struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub)
return NULL;
{
int i;
- struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub)
return NULL;
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
static struct clock_source *dcn302_clock_source_create(struct dc_context *ctx, struct dc_bios *bios,
enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src)
{
- struct dce110_clk_src *clk_src = kzalloc_obj(struct dce110_clk_src,
- GFP_KERNEL);
+ struct dce110_clk_src *clk_src = kzalloc_obj(struct dce110_clk_src);
if (!clk_src)
return NULL;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
static struct dce_aux *dcn302_aux_engine_create(struct dc_context *ctx, uint32_t inst)
{
- struct aux_engine_dce110 *aux_engine = kzalloc_obj(struct aux_engine_dce110,
- GFP_KERNEL);
+ struct aux_engine_dce110 *aux_engine = kzalloc_obj(struct aux_engine_dce110);
if (!aux_engine)
return NULL;
static struct dce_i2c_hw *dcn302_i2c_hw_create(struct dc_context *ctx, uint32_t inst)
{
- struct dce_i2c_hw *dce_i2c_hw = kzalloc_obj(struct dce_i2c_hw,
- GFP_KERNEL);
+ struct dce_i2c_hw *dce_i2c_hw = kzalloc_obj(struct dce_i2c_hw);
if (!dce_i2c_hw)
return NULL;
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
- struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder,
- GFP_KERNEL);
+ struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder);
if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
static struct panel_cntl *dcn302_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
- struct dce_panel_cntl *panel_cntl = kzalloc_obj(struct dce_panel_cntl,
- GFP_KERNEL);
+ struct dce_panel_cntl *panel_cntl = kzalloc_obj(struct dce_panel_cntl);
if (!panel_cntl)
return NULL;
struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc)
{
- struct resource_pool *pool = kzalloc_obj(struct resource_pool,
- GFP_KERNEL);
+ struct resource_pool *pool = kzalloc_obj(struct resource_pool);
if (!pool)
return NULL;
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
static struct clock_source *dcn303_clock_source_create(struct dc_context *ctx, struct dc_bios *bios,
enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src)
{
- struct dce110_clk_src *clk_src = kzalloc_obj(struct dce110_clk_src,
- GFP_KERNEL);
+ struct dce110_clk_src *clk_src = kzalloc_obj(struct dce110_clk_src);
if (!clk_src)
return NULL;
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
static struct dce_aux *dcn303_aux_engine_create(struct dc_context *ctx, uint32_t inst)
{
- struct aux_engine_dce110 *aux_engine = kzalloc_obj(struct aux_engine_dce110,
- GFP_KERNEL);
+ struct aux_engine_dce110 *aux_engine = kzalloc_obj(struct aux_engine_dce110);
if (!aux_engine)
return NULL;
static struct dce_i2c_hw *dcn303_i2c_hw_create(struct dc_context *ctx, uint32_t inst)
{
- struct dce_i2c_hw *dce_i2c_hw = kzalloc_obj(struct dce_i2c_hw,
- GFP_KERNEL);
+ struct dce_i2c_hw *dce_i2c_hw = kzalloc_obj(struct dce_i2c_hw);
if (!dce_i2c_hw)
return NULL;
struct dc_context *ctx,
const struct encoder_init_data *enc_init_data)
{
- struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder,
- GFP_KERNEL);
+ struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder);
if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
static struct panel_cntl *dcn303_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
- struct dce_panel_cntl *panel_cntl = kzalloc_obj(struct dce_panel_cntl,
- GFP_KERNEL);
+ struct dce_panel_cntl *panel_cntl = kzalloc_obj(struct dce_panel_cntl);
if (!panel_cntl)
return NULL;
struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc)
{
- struct resource_pool *pool = kzalloc_obj(struct resource_pool,
- GFP_KERNEL);
+ struct resource_pool *pool = kzalloc_obj(struct resource_pool);
if (!pool)
return NULL;
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub2)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn32_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
uint32_t dwb_count = pool->res_cap->num_dwb;
for (i = 0; i < dwb_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t dwb_count = pool->res_cap->num_dwb;
for (i = 0; i < dwb_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub2)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn321_vpg_create(ctx, vpg_inst);
apg = dcn321_apg_create(ctx, apg_inst);
uint32_t dwb_count = pool->res_cap->num_dwb;
for (i = 0; i < dwb_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t dwb_count = pool->res_cap->num_dwb;
for (i = 0; i < dwb_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub3 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub3)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn31_vpg_create(ctx, vpg_inst);
apg = dcn31_apg_create(ctx, apg_inst);
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc30 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc30) {
dm_error("DC: failed to create dwbc30!\n");
uint32_t pipe_count = pool->res_cap->num_dwb;
for (i = 0; i < pipe_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
{
int i;
- struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub,
- GFP_KERNEL);
+ struct dcn20_hubbub *hubbub2 = kzalloc_obj(struct dcn20_hubbub);
if (!hubbub2)
return NULL;
apg_inst = hpo_dp_inst;
/* allocate HPO stream encoder and create VPG sub-block */
- hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder,
- GFP_KERNEL);
+ hpo_dp_enc31 = kzalloc_obj(struct dcn31_hpo_dp_stream_encoder);
vpg = dcn401_vpg_create(ctx, vpg_inst);
apg = dcn401_apg_create(ctx, apg_inst);
uint32_t dwb_count = pool->res_cap->num_dwb;
for (i = 0; i < dwb_count; i++) {
- struct dcn30_dwbc *dwbc401 = kzalloc_obj(struct dcn30_dwbc,
- GFP_KERNEL);
+ struct dcn30_dwbc *dwbc401 = kzalloc_obj(struct dcn30_dwbc);
if (!dwbc401) {
dm_error("DC: failed to create dwbc401!\n");
uint32_t dwb_count = pool->res_cap->num_dwb;
for (i = 0; i < dwb_count; i++) {
- struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub,
- GFP_KERNEL);
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc_obj(struct dcn30_mmhubbub);
if (!mcif_wb30) {
dm_error("DC: failed to create mcif_wb30!\n");
if (!rgb_user)
goto rgb_user_alloc_fail;
- axis_x = kvzalloc_objs(*axis_x, ramp->num_entries + 3,
- GFP_KERNEL);
+ axis_x = kvzalloc_objs(*axis_x, ramp->num_entries + 3);
if (!axis_x)
goto axis_x_alloc_fail;
scale_gamma_dx(rgb_user, ramp, dividers);
}
- rgb_regamma = kvzalloc_objs(*rgb_regamma, MAX_HW_POINTS + _EXTRA_POINTS,
- GFP_KERNEL);
+ rgb_regamma = kvzalloc_objs(*rgb_regamma, MAX_HW_POINTS + _EXTRA_POINTS);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
ATOM_PowerTune_Table *pt;
adev->pm.dpm.dyn_state.cac_tdp_table =
- kzalloc_obj(struct amdgpu_cac_tdp_table,
- GFP_KERNEL);
+ kzalloc_obj(struct amdgpu_cac_tdp_table);
if (!adev->pm.dpm.dyn_state.cac_tdp_table)
return -ENOMEM;
if (rev > 0) {
return ret;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc_objs(struct amdgpu_clock_voltage_dependency_entry, 4,
- GFP_KERNEL);
+ kzalloc_objs(struct amdgpu_clock_voltage_dependency_entry, 4);
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
return -ENOMEM;
PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
"Invalid PowerPlay Table!", return -1);
- table = kzalloc_flex(*table, values, clk_volt_pp_table->count,
- GFP_KERNEL);
+ table = kzalloc_flex(*table, values, clk_volt_pp_table->count);
if (!table)
return -ENOMEM;
unsigned long i;
struct phm_clock_voltage_dependency_table *dep_table;
- dep_table = kzalloc_flex(*dep_table, entries, table->ucNumEntries,
- GFP_KERNEL);
+ dep_table = kzalloc_flex(*dep_table, entries, table->ucNumEntries);
if (NULL == dep_table)
return -ENOMEM;
unsigned long i;
struct phm_clock_array *clock_table;
- clock_table = kzalloc_flex(*clock_table, values, table->count,
- GFP_KERNEL);
+ clock_table = kzalloc_flex(*clock_table, values, table->count);
if (!clock_table)
return -ENOMEM;
unsigned long i;
struct phm_uvd_clock_voltage_dependency_table *uvd_table;
- uvd_table = kzalloc_flex(*uvd_table, entries, table->numEntries,
- GFP_KERNEL);
+ uvd_table = kzalloc_flex(*uvd_table, entries, table->numEntries);
if (!uvd_table)
return -ENOMEM;
unsigned long i;
struct phm_vce_clock_voltage_dependency_table *vce_table;
- vce_table = kzalloc_flex(*vce_table, entries, table->numEntries,
- GFP_KERNEL);
+ vce_table = kzalloc_flex(*vce_table, entries, table->numEntries);
if (!vce_table)
return -ENOMEM;
unsigned long i;
struct phm_samu_clock_voltage_dependency_table *samu_table;
- samu_table = kzalloc_flex(*samu_table, entries, table->numEntries,
- GFP_KERNEL);
+ samu_table = kzalloc_flex(*samu_table, entries, table->numEntries);
if (!samu_table)
return -ENOMEM;
unsigned long i;
struct phm_acp_clock_voltage_dependency_table *acp_table;
- acp_table = kzalloc_flex(*acp_table, entries, table->numEntries,
- GFP_KERNEL);
+ acp_table = kzalloc_flex(*acp_table, entries, table->numEntries);
if (!acp_table)
return -ENOMEM;
static int get_platform_power_management_table(struct pp_hwmgr *hwmgr,
ATOM_PPLIB_PPM_Table *atom_ppm_table)
{
- struct phm_ppm_table *ptr = kzalloc_obj(struct phm_ppm_table,
- GFP_KERNEL);
+ struct phm_ppm_table *ptr = kzalloc_obj(struct phm_ppm_table);
if (NULL == ptr)
return -ENOMEM;
PP_ASSERT_WITH_CODE(clk_volt_pp_table->count,
"Invalid PowerPlay Table!", return -1);
- table = kzalloc_flex(*table, values, clk_volt_pp_table->count,
- GFP_KERNEL);
+ table = kzalloc_flex(*table, values, clk_volt_pp_table->count);
if (!table)
return -ENOMEM;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_dpm_policy *policy;
- smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context,
- GFP_KERNEL);
+ smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context,
- GFP_KERNEL);
+ smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context,
- GFP_KERNEL);
+ smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context,
- GFP_KERNEL);
+ smu_dpm->dpm_context = kzalloc_obj(struct smu_11_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_dpm_policy *policy;
- smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context,
- GFP_KERNEL);
+ smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
- smu_power->power_context = kzalloc_obj(struct smu_13_0_power_context,
- GFP_KERNEL);
+ smu_power->power_context = kzalloc_obj(struct smu_13_0_power_context);
if (!smu_power->power_context)
return -ENOMEM;
smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context,
- GFP_KERNEL);
+ smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context,
- GFP_KERNEL);
+ smu_dpm->dpm_context = kzalloc_obj(struct smu_13_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
- smu_power->power_context = kzalloc_obj(struct smu_14_0_dpm_context,
- GFP_KERNEL);
+ smu_power->power_context = kzalloc_obj(struct smu_14_0_dpm_context);
if (!smu_power->power_context)
return -ENOMEM;
smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context);
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- smu_dpm->dpm_context = kzalloc_obj(struct smu_14_0_dpm_context,
- GFP_KERNEL);
+ smu_dpm->dpm_context = kzalloc_obj(struct smu_14_0_dpm_context);
if (!smu_dpm->dpm_context)
return -ENOMEM;
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
- smu_power->power_context = kzalloc_obj(struct smu_15_0_dpm_context,
- GFP_KERNEL);
+ smu_power->power_context = kzalloc_obj(struct smu_15_0_dpm_context);
if (!smu_power->power_context)
return -ENOMEM;
smu_power->power_context_size = sizeof(struct smu_15_0_dpm_context);
{
struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
- ras_mgr->virt_ras_cmd = kzalloc_obj(struct amdgpu_virt_ras_cmd,
- GFP_KERNEL);
+ ras_mgr->virt_ras_cmd = kzalloc_obj(struct amdgpu_virt_ras_cmd);
if (!ras_mgr->virt_ras_cmd)
return -ENOMEM;
static void malidp_mw_connector_reset(struct drm_connector *connector)
{
- struct malidp_mw_connector_state *mw_state = kzalloc_obj(*mw_state,
- GFP_KERNEL);
+ struct malidp_mw_connector_state *mw_state = kzalloc_obj(*mw_state);
if (connector->state)
__drm_atomic_helper_connector_destroy_state(connector->state);
static void ast_astdp_connector_reset(struct drm_connector *connector)
{
- struct ast_astdp_connector_state *astdp_state = kzalloc_obj(*astdp_state,
- GFP_KERNEL);
+ struct ast_astdp_connector_state *astdp_state = kzalloc_obj(*astdp_state);
if (connector->state)
connector->funcs->atomic_destroy_state(connector, connector->state);
*/
state->allow_modeset = true;
- state->crtcs = kzalloc_objs(*state->crtcs, dev->mode_config.num_crtc,
- GFP_KERNEL);
+ state->crtcs = kzalloc_objs(*state->crtcs, dev->mode_config.num_crtc);
if (!state->crtcs)
goto fail;
state->planes = kzalloc_objs(*state->planes,
}
if (!state->fake_commit) {
- state->fake_commit = kzalloc_obj(*state->fake_commit,
- GFP_KERNEL);
+ state->fake_commit = kzalloc_obj(*state->fake_commit);
if (!state->fake_commit)
return NULL;
*/
void drm_atomic_helper_connector_reset(struct drm_connector *connector)
{
- struct drm_connector_state *conn_state = kzalloc_obj(*conn_state,
- GFP_KERNEL);
+ struct drm_connector_state *conn_state = kzalloc_obj(*conn_state);
if (connector->state)
__drm_atomic_helper_connector_destroy_state(connector->state);
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
- mm->free_trees = kmalloc_objs(*mm->free_trees, DRM_BUDDY_MAX_FREE_TREES,
- GFP_KERNEL);
+ mm->free_trees = kmalloc_objs(*mm->free_trees, DRM_BUDDY_MAX_FREE_TREES);
if (!mm->free_trees)
return -ENOMEM;
mm->n_roots = hweight64(size);
- mm->roots = kmalloc_objs(struct drm_buddy_block *, mm->n_roots,
- GFP_KERNEL);
+ mm->roots = kmalloc_objs(struct drm_buddy_block *, mm->n_roots);
if (!mm->roots)
goto out_free_tree;
int i = 0;
/* Add terminating zero entry to enable index less iteration */
- client->modesets = kzalloc_objs(*client->modesets, num_crtc + 1,
- GFP_KERNEL);
+ client->modesets = kzalloc_objs(*client->modesets, num_crtc + 1);
if (!client->modesets)
return -ENOMEM;
if (!plane_state)
return NULL;
- new_shadow_plane_state = kzalloc_obj(*new_shadow_plane_state,
- GFP_KERNEL);
+ new_shadow_plane_state = kzalloc_obj(*new_shadow_plane_state);
if (!new_shadow_plane_state)
return NULL;
__drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
int ret;
bool universal_planes = READ_ONCE(lessor_priv->universal_planes);
- objects = kzalloc_objs(struct drm_mode_object *, object_count,
- GFP_KERNEL);
+ objects = kzalloc_objs(struct drm_mode_object *, object_count);
if (!objects)
return -ENOMEM;
int len = 0;
int i;
- all_pipelines = kzalloc_objs(*all_pipelines, num_pipelines + 1,
- GFP_KERNEL);
+ all_pipelines = kzalloc_objs(*all_pipelines, num_pipelines + 1);
if (!all_pipelines) {
drm_err(plane->dev, "failed to allocate color pipeline\n");
/* Find current connectors for CRTC */
num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
BUG_ON(num_connectors == 0);
- connector_list = kzalloc_objs(*connector_list, num_connectors,
- GFP_KERNEL);
+ connector_list = kzalloc_objs(*connector_list, num_connectors);
if (!connector_list)
return -ENOMEM;
get_connectors_for_crtc(crtc, connector_list, num_connectors);
WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
if (!conn_state->writeback_job) {
- conn_state->writeback_job = kzalloc_obj(*conn_state->writeback_job,
- GFP_KERNEL);
+ conn_state->writeback_job = kzalloc_obj(*conn_state->writeback_job);
if (!conn_state->writeback_job)
return -ENOMEM;
* (input and output image, and shader), we keep this buffer
* for the whole life time the driver is bound
*/
- priv->flop_reset_data_ppu = kzalloc_obj(*priv->flop_reset_data_ppu,
- GFP_KERNEL);
+ priv->flop_reset_data_ppu = kzalloc_obj(*priv->flop_reset_data_ppu);
if (!priv->flop_reset_data_ppu)
return -ENOMEM;
if (!submit)
return NULL;
- submit->pmrs = kzalloc_objs(struct etnaviv_perfmon_request, nr_pmrs,
- GFP_KERNEL);
+ submit->pmrs = kzalloc_objs(struct etnaviv_perfmon_request, nr_pmrs);
if (!submit->pmrs) {
kfree(submit);
return NULL;
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
- g2d_userptr->pages = kvmalloc_objs(*g2d_userptr->pages, npages,
- GFP_KERNEL);
+ g2d_userptr->pages = kvmalloc_objs(*g2d_userptr->pages, npages);
if (!g2d_userptr->pages) {
ret = -ENOMEM;
goto err_free;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret, i;
- dev_priv->gmbus = kzalloc_objs(struct intel_gmbus, GMBUS_NUM_PORTS,
- GFP_KERNEL);
+ dev_priv->gmbus = kzalloc_objs(struct intel_gmbus, GMBUS_NUM_PORTS);
if (dev_priv->gmbus == NULL)
return -ENOMEM;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
- psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector,
- GFP_KERNEL);
+ psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector);
if (!psb_intel_sdvo_connector)
return false;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
- psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector,
- GFP_KERNEL);
+ psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector);
if (!psb_intel_sdvo_connector)
return false;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
- psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector,
- GFP_KERNEL);
+ psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector);
if (!psb_intel_sdvo_connector)
return false;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
- psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector,
- GFP_KERNEL);
+ psb_intel_sdvo_connector = kzalloc_obj(struct psb_intel_sdvo_connector);
if (!psb_intel_sdvo_connector)
return false;
if (drm_edid && edid_ctx.edid_override)
goto out;
- reqmodes = kmalloc_objs(*reqmodes, GUD_CONNECTOR_MAX_NUM_MODES,
- GFP_KERNEL);
+ reqmodes = kmalloc_objs(*reqmodes, GUD_CONNECTOR_MAX_NUM_MODES);
if (!reqmodes)
goto out;
unsigned int i, num_properties;
int ret;
- properties = kzalloc_objs(*properties, GUD_CONNECTOR_PROPERTIES_MAX_NUM,
- GFP_KERNEL);
+ properties = kzalloc_objs(*properties, GUD_CONNECTOR_PROPERTIES_MAX_NUM);
if (!properties)
return -ENOMEM;
unsigned int i, num_properties;
int ret;
- properties = kzalloc_objs(*properties, GUD_PROPERTIES_MAX_NUM,
- GFP_KERNEL);
+ properties = kzalloc_objs(*properties, GUD_PROPERTIES_MAX_NUM);
if (!properties)
return -ENOMEM;
if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
block_size += 5;
- entry = kzalloc_flex(*entry, data, max(min_size, block_size) + 3,
- GFP_KERNEL);
+ entry = kzalloc_flex(*entry, data, max(min_size, block_size) + 3);
if (!entry) {
kfree(temp_block);
return;
}
if (!state->inherited_dp_tunnels) {
- state->inherited_dp_tunnels = kzalloc_obj(*state->inherited_dp_tunnels,
- GFP_KERNEL);
+ state->inherited_dp_tunnels = kzalloc_obj(*state->inherited_dp_tunnels);
if (!state->inherited_dp_tunnels)
return -ENOMEM;
}
obj->mm.placements = &i915->mm.regions[mr->id];
obj->mm.n_placements = 1;
} else {
- arr = kmalloc_objs(struct intel_memory_region *, n_placements,
- GFP_KERNEL);
+ arr = kmalloc_objs(struct intel_memory_region *, n_placements);
if (!arr)
return -ENOMEM;
if (intel_vgpu_active(i915))
num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
- ggtt->fence_regs = kzalloc_objs(*ggtt->fence_regs, num_fences,
- GFP_KERNEL);
+ ggtt->fence_regs = kzalloc_objs(*ggtt->fence_regs, num_fences);
if (!ggtt->fence_regs)
num_fences = 0;
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
- list = kmalloc_objs(*list, ALIGN(wal->count + 1, grow),
- GFP_KERNEL);
+ list = kmalloc_objs(*list, ALIGN(wal->count + 1, grow));
if (!list) {
drm_err(&i915->drm, "No space for workaround init!\n");
return;
u32 *cs;
int n;
- smoke.contexts = kmalloc_objs(*smoke.contexts, smoke.ncontext,
- GFP_KERNEL);
+ smoke.contexts = kmalloc_objs(*smoke.contexts, smoke.ncontext);
if (!smoke.contexts)
return -ENOMEM;
* independently to each of their breadcrumb slots.
*/
- timelines = kvmalloc_objs(*timelines, NUM_TIMELINES * I915_NUM_ENGINES,
- GFP_KERNEL);
+ timelines = kvmalloc_objs(*timelines, NUM_TIMELINES * I915_NUM_ENGINES);
if (!timelines)
return -ENOMEM;
* engines.
*/
- timelines = kvmalloc_objs(*timelines, NUM_TIMELINES * I915_NUM_ENGINES,
- GFP_KERNEL);
+ timelines = kvmalloc_objs(*timelines, NUM_TIMELINES * I915_NUM_ENGINES);
if (!timelines)
return -ENOMEM;
return;
/* allocate an extra for an end marker */
- extlists = kzalloc_objs(struct __guc_mmio_reg_descr_group, 2,
- GFP_KERNEL);
+ extlists = kzalloc_objs(struct __guc_mmio_reg_descr_group, 2);
if (!extlists)
return;
for (j = 0; j < table->count; j++) {
const struct drm_i915_cmd_descriptor *desc =
&table->table[j];
- struct cmd_node *desc_node = kmalloc_obj(*desc_node,
- GFP_KERNEL);
+ struct cmd_node *desc_node = kmalloc_obj(*desc_node);
if (!desc_node)
return -ENOMEM;
smoke[0].request_alloc = __live_request_alloc;
smoke[0].ncontexts = 64;
- smoke[0].contexts = kzalloc_objs(*smoke[0].contexts, smoke[0].ncontexts,
- GFP_KERNEL);
+ smoke[0].contexts = kzalloc_objs(*smoke[0].contexts, smoke[0].ncontexts);
if (!smoke[0].contexts) {
ret = -ENOMEM;
goto out_threads;
if (!crtc_state)
return NULL;
- new_mgag200_crtc_state = kzalloc_obj(*new_mgag200_crtc_state,
- GFP_KERNEL);
+ new_mgag200_crtc_state = kzalloc_obj(*new_mgag200_crtc_state);
if (!new_mgag200_crtc_state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &new_mgag200_crtc_state->base);
static void mdp5_crtc_reset(struct drm_crtc *crtc)
{
- struct mdp5_crtc_state *mdp5_cstate = kzalloc_obj(*mdp5_cstate,
- GFP_KERNEL);
+ struct mdp5_crtc_state *mdp5_cstate = kzalloc_obj(*mdp5_cstate);
if (crtc->state)
mdp5_crtc_destroy_state(crtc, crtc->state);
nv10_overlay_init(struct drm_device *device)
{
struct nouveau_drm *drm = nouveau_drm(device);
- struct nouveau_plane *plane = kzalloc_obj(struct nouveau_plane,
- GFP_KERNEL);
+ struct nouveau_plane *plane = kzalloc_obj(struct nouveau_plane);
unsigned int num_formats = ARRAY_SIZE(formats);
int ret;
nv04_overlay_init(struct drm_device *device)
{
struct nouveau_drm *drm = nouveau_drm(device);
- struct nouveau_plane *plane = kzalloc_obj(struct nouveau_plane,
- GFP_KERNEL);
+ struct nouveau_plane *plane = kzalloc_obj(struct nouveau_plane);
int ret;
if (!plane)
if (ret)
return ret;
- buffer->fault = kvzalloc_objs(*buffer->fault, buffer->entries,
- GFP_KERNEL);
+ buffer->fault = kvzalloc_objs(*buffer->fault, buffer->entries);
if (!buffer->fault)
return -ENOMEM;
goto done;
device->runlists = fls64(a->v.runlists.data);
- device->runlist = kzalloc_objs(*device->runlist, device->runlists,
- GFP_KERNEL);
+ device->runlist = kzalloc_objs(*device->runlist, device->runlists);
if (!device->runlist) {
ret = -ENOMEM;
goto done;
list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
}
- omap_dmm->tcm = kzalloc_objs(*omap_dmm->tcm, omap_dmm->num_lut,
- GFP_KERNEL);
+ omap_dmm->tcm = kzalloc_objs(*omap_dmm->tcm, omap_dmm->num_lut);
if (!omap_dmm->tcm) {
ret = -ENOMEM;
goto fail;
qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
memset(qdev->monitors_config, 0, monitors_config_size);
- qdev->dumb_heads = kzalloc_objs(qdev->dumb_heads[0], qxl_num_crtc,
- GFP_KERNEL);
+ qdev->dumb_heads = kzalloc_objs(qdev->dumb_heads[0], qxl_num_crtc);
if (!qdev->dumb_heads) {
qxl_destroy_monitors_object(qdev);
return -ENOMEM;
cmd->command_size))
return -EFAULT;
- reloc_info = kmalloc_objs(struct qxl_reloc_info, cmd->relocs_num,
- GFP_KERNEL);
+ reloc_info = kmalloc_objs(struct qxl_reloc_info, cmd->relocs_num);
if (!reloc_info)
return -ENOMEM;
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder_atom_dac *dac = kzalloc_obj(struct radeon_encoder_atom_dac,
- GFP_KERNEL);
+ struct radeon_encoder_atom_dac *dac = kzalloc_obj(struct radeon_encoder_atom_dac);
if (!dac)
return NULL;
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- struct radeon_encoder_atom_dig *dig = kzalloc_obj(struct radeon_encoder_atom_dig,
- GFP_KERNEL);
+ struct radeon_encoder_atom_dig *dig = kzalloc_obj(struct radeon_encoder_atom_dig);
if (!dig)
return NULL;
return ret;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4,
- GFP_KERNEL);
+ kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
r600_free_extended_power_table(rdev);
return -ENOMEM;
ci_set_private_data_variables_based_on_pptable(rdev);
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4,
- GFP_KERNEL);
+ kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
ci_dpm_fini(rdev);
return -ENOMEM;
return ret;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4,
- GFP_KERNEL);
+ kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
r600_free_extended_power_table(rdev);
return -ENOMEM;
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
ATOM_PowerTune_Table *pt;
rdev->pm.dpm.dyn_state.cac_tdp_table =
- kzalloc_obj(struct radeon_cac_tdp_table,
- GFP_KERNEL);
+ kzalloc_obj(struct radeon_cac_tdp_table);
if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
r600_free_extended_power_table(rdev);
return -ENOMEM;
dac_info = (struct _COMPASSIONATE_DATA *)
(mode_info->atom_context->bios + data_offset);
- p_dac = kzalloc_obj(struct radeon_encoder_primary_dac,
- GFP_KERNEL);
+ p_dac = kzalloc_obj(struct radeon_encoder_primary_dac);
if (!p_dac)
return NULL;
/* avoid memory leaks from invalid modes or unknown frev. */
if (!rdev->pm.power_state[state_index].clock_info) {
rdev->pm.power_state[state_index].clock_info =
- kzalloc_obj(struct radeon_pm_clock_info,
- GFP_KERNEL);
+ kzalloc_obj(struct radeon_pm_clock_info);
}
if (!rdev->pm.power_state[state_index].clock_info)
goto out;
}
if (state_index == 0) {
- rdev->pm.power_state = kzalloc_obj(struct radeon_power_state,
- GFP_KERNEL);
+ rdev->pm.power_state = kzalloc_obj(struct radeon_power_state);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
- kzalloc_objs(struct radeon_pm_clock_info, 1,
- GFP_KERNEL);
+ kzalloc_objs(struct radeon_pm_clock_info, 1);
if (rdev->pm.power_state[0].clock_info) {
/* add the default mode */
rdev->pm.power_state[state_index].type =
rdev->pm.default_power_state_index = -1;
/* allocate 2 power states */
- rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, 2,
- GFP_KERNEL);
+ rdev->pm.power_state = kzalloc_objs(struct radeon_power_state, 2);
if (rdev->pm.power_state) {
/* allocate 1 clock mode per state */
rdev->pm.power_state[0].clock_info =
}
if (is_dp_bridge) {
- radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig,
- GFP_KERNEL);
+ radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
- radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig,
- GFP_KERNEL);
+ radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
- radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig,
- GFP_KERNEL);
+ radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig,
- GFP_KERNEL);
+ radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_eDP:
- radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig,
- GFP_KERNEL);
+ radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
- radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig,
- GFP_KERNEL);
+ radeon_dig_connector = kzalloc_obj(struct radeon_connector_atom_dig);
if (!radeon_dig_connector)
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
}
p->cs_flags = 0;
p->nchunks = cs->num_chunks;
- p->chunks = kvzalloc_objs(struct radeon_cs_chunk, p->nchunks,
- GFP_KERNEL);
+ p->chunks = kvzalloc_objs(struct radeon_cs_chunk, p->nchunks);
if (p->chunks == NULL) {
return -ENOMEM;
}
BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
for (i = 0; i < num_afmt; i++) {
- rdev->mode_info.afmt[i] = kzalloc_obj(struct radeon_afmt,
- GFP_KERNEL);
+ rdev->mode_info.afmt[i] = kzalloc_obj(struct radeon_afmt);
if (rdev->mode_info.afmt[i]) {
rdev->mode_info.afmt[i]->offset = eg_offsets[i];
rdev->mode_info.afmt[i]->id = i;
}
} else if (ASIC_IS_DCE3(rdev)) {
/* DCE3.x has 2 audio blocks tied to DIG encoders */
- rdev->mode_info.afmt[0] = kzalloc_obj(struct radeon_afmt,
- GFP_KERNEL);
+ rdev->mode_info.afmt[0] = kzalloc_obj(struct radeon_afmt);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
rdev->mode_info.afmt[0]->id = 0;
}
- rdev->mode_info.afmt[1] = kzalloc_obj(struct radeon_afmt,
- GFP_KERNEL);
+ rdev->mode_info.afmt[1] = kzalloc_obj(struct radeon_afmt);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
rdev->mode_info.afmt[1]->id = 1;
}
} else if (ASIC_IS_DCE2(rdev)) {
/* DCE2 has at least 1 routable audio block */
- rdev->mode_info.afmt[0] = kzalloc_obj(struct radeon_afmt,
- GFP_KERNEL);
+ rdev->mode_info.afmt[0] = kzalloc_obj(struct radeon_afmt);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
rdev->mode_info.afmt[0]->id = 0;
}
/* r6xx has 2 routable audio blocks */
if (rdev->family >= CHIP_R600) {
- rdev->mode_info.afmt[1] = kzalloc_obj(struct radeon_afmt,
- GFP_KERNEL);
+ rdev->mode_info.afmt[1] = kzalloc_obj(struct radeon_afmt);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
rdev->mode_info.afmt[1]->id = 1;
struct radeon_bo_list *list;
unsigned i, idx;
- list = kvmalloc_objs(struct radeon_bo_list, vm->max_pde_used + 2,
- GFP_KERNEL);
+ list = kvmalloc_objs(struct radeon_bo_list, vm->max_pde_used + 2);
if (!list)
return NULL;
return ret;
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4,
- GFP_KERNEL);
+ kzalloc_objs(struct radeon_clock_voltage_dependency_entry, 4);
if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
r600_free_extended_power_table(rdev);
return -ENOMEM;
static void vop_crtc_reset(struct drm_crtc *crtc)
{
- struct rockchip_crtc_state *crtc_state = kzalloc_obj(*crtc_state,
- GFP_KERNEL);
+ struct rockchip_crtc_state *crtc_state = kzalloc_obj(*crtc_state);
if (crtc->state)
vop_crtc_destroy_state(crtc, crtc->state);
goto Out_check_own;
sched->num_rqs = args->num_rqs;
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
- sched->sched_rq[i] = kzalloc_obj(*sched->sched_rq[i],
- GFP_KERNEL);
+ sched->sched_rq[i] = kzalloc_obj(*sched->sched_rq[i]);
if (!sched->sched_rq[i])
goto Out_unroll;
drm_sched_rq_init(sched, sched->sched_rq[i]);
if (!appletbdrm_state->request)
return -ENOMEM;
- appletbdrm_state->response = kzalloc_obj(*appletbdrm_state->response,
- GFP_KERNEL);
+ appletbdrm_state->response = kzalloc_obj(*appletbdrm_state->response);
if (!appletbdrm_state->response)
return -ENOMEM;
static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->dma_address = kvzalloc_objs(*ttm->dma_address, ttm->num_pages,
- GFP_KERNEL);
+ ttm->dma_address = kvzalloc_objs(*ttm->dma_address, ttm->num_pages);
if (!ttm->dma_address)
return -ENOMEM;
return 0;
se->out_syncs = (struct v3d_submit_outsync *)
- kvmalloc_objs(struct v3d_submit_outsync, count,
- GFP_KERNEL);
+ kvmalloc_objs(struct v3d_submit_outsync, count);
if (!se->out_syncs)
return -ENOMEM;
job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
query_info->queries =
- kvmalloc_objs(struct v3d_performance_query, reset.count,
- GFP_KERNEL);
+ kvmalloc_objs(struct v3d_performance_query, reset.count);
if (!query_info->queries)
return -ENOMEM;
job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
query_info->queries =
- kvmalloc_objs(struct v3d_performance_query, copy.count,
- GFP_KERNEL);
+ kvmalloc_objs(struct v3d_performance_query, copy.count);
if (!query_info->queries)
return -ENOMEM;
goto fail;
}
- job->base.bo = kzalloc_objs(*job->base.bo, ARRAY_SIZE(args->bo_handles),
- GFP_KERNEL);
+ job->base.bo = kzalloc_objs(*job->base.bo, ARRAY_SIZE(args->bo_handles));
if (!job->base.bo) {
ret = -ENOMEM;
goto fail;
* use. This lets us avoid a bunch of string reallocation in
* the kernel's draw and BO allocation paths.
*/
- vc4->bo_labels = kzalloc_objs(*vc4->bo_labels, VC4_BO_TYPE_COUNT,
- GFP_KERNEL);
+ vc4->bo_labels = kzalloc_objs(*vc4->bo_labels, VC4_BO_TYPE_COUNT);
if (!vc4->bo_labels)
return -ENOMEM;
vc4->num_labels = VC4_BO_TYPE_COUNT;
{
uint64_t fence_context = base_fence_ctx + ring_idx;
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct virtio_gpu_fence *fence = kzalloc_obj(struct virtio_gpu_fence,
- GFP_KERNEL);
+ struct virtio_gpu_fence *fence = kzalloc_obj(struct virtio_gpu_fence);
if (!fence)
return fence;
if (IS_ERR(sgt))
return PTR_ERR(sgt);
- *ents = kvmalloc_objs(struct virtio_gpu_mem_entry, sgt->nents,
- GFP_KERNEL);
+ *ents = kvmalloc_objs(struct virtio_gpu_mem_entry, sgt->nents);
if (!(*ents)) {
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
return -ENOMEM;
static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
{
- struct vkms_crtc_state *vkms_state = kzalloc_obj(*vkms_state,
- GFP_KERNEL);
+ struct vkms_crtc_state *vkms_state = kzalloc_obj(*vkms_state);
if (crtc->state)
vkms_atomic_crtc_destroy_state(crtc, crtc->state);
i++;
}
- vkms_state->active_planes = kzalloc_objs(*vkms_state->active_planes, i,
- GFP_KERNEL);
+ vkms_state->active_planes = kzalloc_objs(*vkms_state->active_planes, i);
if (!vkms_state->active_planes)
return -ENOMEM;
vkms_state->num_active_planes = i;
w, h, diff);
if (!src->ttm->pages && src->ttm->sg) {
- src_pages = kvmalloc_objs(struct page *, src->ttm->num_pages,
- GFP_KERNEL);
+ src_pages = kvmalloc_objs(struct page *, src->ttm->num_pages);
if (!src_pages)
return -ENOMEM;
ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
goto out;
}
if (!dst->ttm->pages && dst->ttm->sg) {
- dst_pages = kvmalloc_objs(struct page *, dst->ttm->num_pages,
- GFP_KERNEL);
+ dst_pages = kvmalloc_objs(struct page *, dst->ttm->num_pages);
if (!dst_pages) {
ret = -ENOMEM;
goto out;
uint32_t i;
int ret = 0;
- rects = kzalloc_objs(struct drm_rect, dev->mode_config.num_crtc,
- GFP_KERNEL);
+ rects = kzalloc_objs(struct drm_rect, dev->mode_config.num_crtc);
if (!rects)
return -ENOMEM;
ret = PTR_ERR(metadata->sizes);
goto out_no_sizes;
}
- srf->offsets = kmalloc_objs(*srf->offsets, metadata->num_sizes,
- GFP_KERNEL);
+ srf->offsets = kmalloc_objs(*srf->offsets, metadata->num_sizes);
if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
struct xe_bo *bo;
u32 size;
- stream->xecore_buf = kzalloc_objs(*stream->xecore_buf, last_xecore,
- GFP_KERNEL);
+ stream->xecore_buf = kzalloc_objs(*stream->xecore_buf, last_xecore);
if (!stream->xecore_buf)
return -ENOMEM;
}
if (param->num_syncs) {
- param->syncs = kzalloc_objs(*param->syncs, param->num_syncs,
- GFP_KERNEL);
+ param->syncs = kzalloc_objs(*param->syncs, param->num_syncs);
if (!param->syncs) {
ret = -ENOMEM;
goto exit;
entry->pt_bo->update_index = -1;
if (alloc_entries) {
- entry->pt_entries = kmalloc_objs(*entry->pt_entries, XE_PDES,
- GFP_KERNEL);
+ entry->pt_entries = kmalloc_objs(*entry->pt_entries, XE_PDES);
if (!entry->pt_entries)
return -ENOMEM;
}
lockdep_assert_held(&vm->lock);
madvise_range->num_vmas = 0;
- madvise_range->vmas = kmalloc_objs(*madvise_range->vmas, max_vmas,
- GFP_KERNEL);
+ madvise_range->vmas = kmalloc_objs(*madvise_range->vmas, max_vmas);
if (!madvise_range->vmas)
return -ENOMEM;
size_t buf_size)
{
xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
- xen_obj->pages = kvmalloc_objs(struct page *, xen_obj->num_pages,
- GFP_KERNEL);
+ xen_obj->pages = kvmalloc_objs(struct page *, xen_obj->num_pages);
return !xen_obj->pages ? -ENOMEM : 0;
}
int host1x_channel_list_init(struct host1x_channel_list *chlist,
unsigned int num_channels)
{
- chlist->channels = kzalloc_objs(struct host1x_channel, num_channels,
- GFP_KERNEL);
+ chlist->channels = kzalloc_objs(struct host1x_channel, num_channels);
if (!chlist->channels)
return -ENOMEM;
if (!count)
return 0;
- bundle->cport_desc = kzalloc_objs(*bundle->cport_desc, count,
- GFP_KERNEL);
+ bundle->cport_desc = kzalloc_objs(*bundle->cport_desc, count);
if (!bundle->cport_desc)
goto exit;
if (!rail_names)
goto err_pwrmon_debugfs;
- svc->pwrmon_rails = kzalloc_objs(*svc->pwrmon_rails, rail_count,
- GFP_KERNEL);
+ svc->pwrmon_rails = kzalloc_objs(*svc->pwrmon_rails, rail_count);
if (!svc->pwrmon_rails)
goto err_pwrmon_debugfs_free;
struct cros_ec_command *msg;
int ret;
- msg = kzalloc_flex(*msg, data, max(sizeof(u32), sizeof(*params)),
- GFP_KERNEL);
+ msg = kzalloc_flex(*msg, data, max(sizeof(u32), sizeof(*params)));
if (!msg)
return -ENOMEM;
ret = -ENOMEM;
goto duration_map;
}
- haptic->effect = kzalloc_objs(struct hid_haptic_effect, FF_MAX_EFFECTS,
- GFP_KERNEL);
+ haptic->effect = kzalloc_objs(struct hid_haptic_effect, FF_MAX_EFFECTS);
if (!haptic->effect) {
ret = -ENOMEM;
goto output_queue;
return;
/* allocate storage for fw clients representation */
- clients = kzalloc_objs(struct ishtp_fw_client, dev->fw_clients_num,
- GFP_KERNEL);
+ clients = kzalloc_objs(struct ishtp_fw_client, dev->fw_clients_num);
if (!clients) {
dev->dev_state = ISHTP_DEV_RESETTING;
ish_hw_reset(dev);
memset(hv_cpu, 0, sizeof(*hv_cpu));
}
- hv_context.hv_numa_map = kzalloc_objs(struct cpumask, nr_node_ids,
- GFP_KERNEL);
+ hv_context.hv_numa_map = kzalloc_objs(struct cpumask, nr_node_ids);
if (!hv_context.hv_numa_map) {
pr_err("Unable to allocate NUMA map\n");
goto err;
* First page holds struct hv_ring_buffer, do wraparound mapping for
* the rest.
*/
- pages_wraparound = kzalloc_objs(struct page *, page_cnt * 2 - 1,
- GFP_KERNEL);
+ pages_wraparound = kzalloc_objs(struct page *, page_cnt * 2 - 1);
if (!pages_wraparound)
return -ENOMEM;
return -ENODEV;
max_zones = topology_max_packages() * topology_max_dies_per_package();
- zone_devices = kzalloc_objs(struct platform_device *, max_zones,
- GFP_KERNEL);
+ zone_devices = kzalloc_objs(struct platform_device *, max_zones);
if (!zone_devices)
return -ENOMEM;
return -ENOENT;
data->num_sensors = err;
- data->sensors = kzalloc_objs(*data->sensors, data->num_sensors,
- GFP_KERNEL);
+ data->sensors = kzalloc_objs(*data->sensors, data->num_sensors);
if (!data->sensors)
return -ENOMEM;
struct device *real_dev = dev->parent;
nr_pages = tmc_pages->nr_pages;
- tmc_pages->daddrs = kzalloc_objs(*tmc_pages->daddrs, nr_pages,
- GFP_KERNEL);
+ tmc_pages->daddrs = kzalloc_objs(*tmc_pages->daddrs, nr_pages);
if (!tmc_pages->daddrs)
return -ENOMEM;
tmc_pages->pages = kzalloc_objs(*tmc_pages->pages, nr_pages);
if (gi2c->is_tx_multi_desc_xfer) {
tx_multi_xfer->dma_buf = kcalloc(num, sizeof(void *), GFP_KERNEL);
- tx_multi_xfer->dma_addr = kzalloc_objs(dma_addr_t, num,
- GFP_KERNEL);
+ tx_multi_xfer->dma_addr = kzalloc_objs(dma_addr_t, num);
if (!tx_multi_xfer->dma_buf || !tx_multi_xfer->dma_addr) {
ret = -ENOMEM;
goto err;
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;
- rdwr_pa = kmalloc_objs(struct i2c_msg, rdwr_arg.nmsgs,
- GFP_KERNEL);
+ rdwr_pa = kmalloc_objs(struct i2c_msg, rdwr_arg.nmsgs);
if (!rdwr_pa)
return -ENOMEM;
void *bounce __free(kfree) = NULL;
void *dma_buf = buf;
- struct i3c_dma *dma_xfer __free(kfree) = kzalloc_obj(*dma_xfer,
- GFP_KERNEL);
+ struct i3c_dma *dma_xfer __free(kfree) = kzalloc_obj(*dma_xfer);
if (!dma_xfer)
return NULL;
rh->resp = dma_alloc_coherent(rings->sysdev, resps_sz,
&rh->resp_dma, GFP_KERNEL);
rh->src_xfers =
- kmalloc_objs(*rh->src_xfers, rh->xfer_entries,
- GFP_KERNEL);
+ kmalloc_objs(*rh->src_xfers, rh->xfer_entries);
ret = -ENOMEM;
if (!rh->xfer || !rh->resp || !rh->src_xfers)
goto err_out;
ndev = rcu_dereference_protected(attr->ndev, 1);
if (ndev) {
- entry->ndev_storage = kzalloc_obj(*entry->ndev_storage,
- GFP_KERNEL);
+ entry->ndev_storage = kzalloc_obj(*entry->ndev_storage);
if (!entry->ndev_storage) {
kfree(entry);
return NULL;
int count = 0;
u32 i;
- cm_dev = kzalloc_flex(*cm_dev, port, ib_device->phys_port_cnt,
- GFP_KERNEL);
+ cm_dev = kzalloc_flex(*cm_dev, port, ib_device->phys_port_cnt);
if (!cm_dev)
return -ENOMEM;
rt = &id->route;
rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
- rt->path_rec = kmalloc_objs(*rt->path_rec, rt->num_pri_alt_paths,
- GFP_KERNEL);
+ rt->path_rec = kmalloc_objs(*rt->path_rec, rt->num_pri_alt_paths);
if (!rt->path_rec)
goto err;
struct rdma_route *route = &work->id->id.route;
if (!route->path_rec_inbound) {
- route->path_rec_inbound = kzalloc_obj(*route->path_rec_inbound,
- GFP_KERNEL);
+ route->path_rec_inbound = kzalloc_obj(*route->path_rec_inbound);
if (!route->path_rec_inbound)
return -ENOMEM;
}
struct rdma_route *route = &work->id->id.route;
if (!route->path_rec_outbound) {
- route->path_rec_outbound = kzalloc_obj(*route->path_rec_outbound,
- GFP_KERNEL);
+ route->path_rec_outbound = kzalloc_obj(*route->path_rec_outbound);
if (!route->path_rec_outbound)
return -ENOMEM;
}
struct net_device *ndev)
{
unsigned int i;
- struct netdev_event_work *ndev_work = kmalloc_obj(*ndev_work,
- GFP_KERNEL);
+ struct netdev_event_work *ndev_work = kmalloc_obj(*ndev_work);
if (!ndev_work)
return NOTIFY_DONE;
int i, ret, count = 0;
u32 nents = 0;
- ctx->reg = kzalloc_objs(*ctx->reg, DIV_ROUND_UP(nr_bvec, pages_per_mr),
- GFP_KERNEL);
+ ctx->reg = kzalloc_objs(*ctx->reg, DIV_ROUND_UP(nr_bvec, pages_per_mr));
if (!ctx->reg)
return -ENOMEM;
* Build scatterlist from bvecs using the iterator. This follows
* the pattern from __blk_rq_map_sg.
*/
- ctx->reg[0].sgt.sgl = kmalloc_objs(*ctx->reg[0].sgt.sgl, nr_bvec,
- GFP_KERNEL);
+ ctx->reg[0].sgt.sgl = kmalloc_objs(*ctx->reg[0].sgt.sgl, nr_bvec);
if (!ctx->reg[0].sgt.sgl) {
ret = -ENOMEM;
goto out_free_reg;
s = rdma_start_port(device);
e = rdma_end_port(device);
- sa_dev = kzalloc_flex(*sa_dev, port, size_add(size_sub(e, s), 1),
- GFP_KERNEL);
+ sa_dev = kzalloc_flex(*sa_dev, port, size_add(size_sub(e, s), 1));
if (!sa_dev)
return -ENOMEM;
* Two extra attribue elements here, one for the lifespan entry and
* one to NULL terminate the list for the sysfs core code
*/
- data = kzalloc_flex(*data, attrs, size_add(stats->num_counters, 1),
- GFP_KERNEL);
+ data = kzalloc_flex(*data, attrs, size_add(stats->num_counters, 1));
if (!data)
goto err_free_stats;
data->group.attrs = kzalloc_objs(*data->group.attrs,
* Two extra attribue elements here, one for the lifespan entry and
* one to NULL terminate the list for the sysfs core code
*/
- data = kzalloc_flex(*data, attrs, size_add(stats->num_counters, 1),
- GFP_KERNEL);
+ data = kzalloc_flex(*data, attrs, size_add(stats->num_counters, 1));
if (!data)
goto err_free_stats;
- group->attrs = kzalloc_objs(*group->attrs, stats->num_counters + 2,
- GFP_KERNEL);
+ group->attrs = kzalloc_objs(*group->attrs, stats->num_counters + 2);
if (!group->attrs)
goto err_free_data;
s = rdma_start_port(device);
e = rdma_end_port(device);
- umad_dev = kzalloc_flex(*umad_dev, ports, size_add(size_sub(e, s), 1),
- GFP_KERNEL);
+ umad_dev = kzalloc_flex(*umad_dev, ports, size_add(size_sub(e, s), 1));
if (!umad_dev)
return -ENOMEM;
goto err_put;
}
- flow_attr = kzalloc_flex(*flow_attr, flows, cmd.flow_attr.num_of_specs,
- GFP_KERNEL);
+ flow_attr = kzalloc_flex(*flow_attr, flows, cmd.flow_attr.num_of_specs);
if (!flow_attr) {
err = -ENOMEM;
goto err_put;
goto fail;
}
- rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements,
- GFP_KERNEL);
+ rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements);
if (!rcfw->crsqe_tbl)
goto fail;
rdev = qp->rdev;
/* Create a shadow QP to handle the QP1 traffic */
- sqp_tbl = kzalloc_objs(*sqp_tbl, BNXT_RE_MAX_GSI_SQP_ENTRIES,
- GFP_KERNEL);
+ sqp_tbl = kzalloc_objs(*sqp_tbl, BNXT_RE_MAX_GSI_SQP_ENTRIES);
if (!sqp_tbl)
return -ENOMEM;
rdev->gsi_ctx.sqp_tbl = sqp_tbl;
cq->qplib_cq.dpi = &uctx->dpi;
} else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
- cq->cql = kzalloc_objs(struct bnxt_qplib_cqe, cq->max_cql,
- GFP_KERNEL);
+ cq->cql = kzalloc_objs(struct bnxt_qplib_cqe, cq->max_cql);
if (!cq->cql) {
rc = -ENOMEM;
goto fail;
srq->start_idx = 0;
srq->last_idx = srq->hwq.max_elements - 1;
if (!srq->hwq.is_user) {
- srq->swq = kzalloc_objs(*srq->swq, srq->hwq.max_elements,
- GFP_KERNEL);
+ srq->swq = kzalloc_objs(*srq->swq, srq->hwq.max_elements);
if (!srq->swq) {
rc = -ENOMEM;
goto fail;
goto fail;
}
- rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements,
- GFP_KERNEL);
+ rcfw->crsqe_tbl = kzalloc_objs(*rcfw->crsqe_tbl, cmdq->hwq.max_elements);
if (!rcfw->crsqe_tbl)
goto fail;
}
if (!user) {
- wq->sq.sw_sq = kzalloc_objs(*wq->sq.sw_sq, wq->sq.size,
- GFP_KERNEL);
+ wq->sq.sw_sq = kzalloc_objs(*wq->sq.sw_sq, wq->sq.size);
if (!wq->sq.sw_sq) {
ret = -ENOMEM;
goto free_rq_qid;//FIXME
}
if (need_rq) {
- wq->rq.sw_rq = kzalloc_objs(*wq->rq.sw_rq, wq->rq.size,
- GFP_KERNEL);
+ wq->rq.sw_rq = kzalloc_objs(*wq->rq.sw_rq, wq->rq.size);
if (!wq->rq.sw_rq) {
ret = -ENOMEM;
goto free_sw_sq;
}
memset(&uresp, 0, sizeof(uresp));
if (t4_sq_onchip(&qhp->wq.sq)) {
- ma_sync_key_mm = kmalloc_obj(*ma_sync_key_mm,
- GFP_KERNEL);
+ ma_sync_key_mm = kmalloc_obj(*ma_sync_key_mm);
if (!ma_sync_key_mm) {
ret = -ENOMEM;
goto err_free_rq_db_key;
chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
chunk_list->size = chunk_list_size;
- chunk_list->chunks = kzalloc_objs(*chunk_list->chunks, chunk_list_size,
- GFP_KERNEL);
+ chunk_list->chunks = kzalloc_objs(*chunk_list->chunks, chunk_list_size);
if (!chunk_list->chunks)
return -ENOMEM;
vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
}
/* build new map */
- newmap = kzalloc_flex(*newmap, map, roundup_pow_of_two(num_vls),
- GFP_KERNEL);
+ newmap = kzalloc_flex(*newmap, map, roundup_pow_of_two(num_vls));
if (!newmap)
goto bail;
newmap->actual_vls = num_vls;
int sz = roundup_pow_of_two(vl_scontexts[i]);
/* only allocate once */
- newmap->map[i] = kzalloc_flex(*newmap->map[i], ksc, sz,
- GFP_KERNEL);
+ newmap->map[i] = kzalloc_flex(*newmap->map[i], ksc, sz);
if (!newmap->map[i])
goto bail;
newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
{
int ret = 0;
- fd->entry_to_rb = kzalloc_objs(*fd->entry_to_rb, uctxt->expected_count,
- GFP_KERNEL);
+ fd->entry_to_rb = kzalloc_objs(*fd->entry_to_rb, uctxt->expected_count);
if (!fd->entry_to_rb)
return -ENOMEM;
static int hns_roce_alloc_dfx_cnt(struct hns_roce_dev *hr_dev)
{
- hr_dev->dfx_cnt = kvzalloc_objs(atomic64_t, HNS_ROCE_DFX_CNT_TOTAL,
- GFP_KERNEL);
+ hr_dev->dfx_cnt = kvzalloc_objs(atomic64_t, HNS_ROCE_DFX_CNT_TOTAL);
if (!hr_dev->dfx_cnt)
return -ENOMEM;
goto out;
}
- dev->eq_vec = kmalloc_objs(*dev->eq_vec, dev->lif_cfg.eq_count,
- GFP_KERNEL);
+ dev->eq_vec = kmalloc_objs(*dev->eq_vec, dev->lif_cfg.eq_count);
if (!dev->eq_vec) {
rc = -ENOMEM;
goto out;
dev->lif_cfg.eq_count = eq_i;
- dev->aq_vec = kmalloc_objs(*dev->aq_vec, dev->lif_cfg.aq_count,
- GFP_KERNEL);
+ dev->aq_vec = kmalloc_objs(*dev->aq_vec, dev->lif_cfg.aq_count);
if (!dev->aq_vec) {
rc = -ENOMEM;
goto out;
ionic_queue_dbell_init(&qp->sq, qp->qpid);
- qp->sq_meta = kmalloc_objs(*qp->sq_meta, (u32)qp->sq.mask + 1,
- GFP_KERNEL);
+ qp->sq_meta = kmalloc_objs(*qp->sq_meta, (u32)qp->sq.mask + 1);
if (!qp->sq_meta) {
rc = -ENOMEM;
goto err_sq_meta;
ionic_queue_dbell_init(&qp->rq, qp->qpid);
- qp->rq_meta = kmalloc_objs(*qp->rq_meta, (u32)qp->rq.mask + 1,
- GFP_KERNEL);
+ qp->rq_meta = kmalloc_objs(*qp->rq_meta, (u32)qp->rq.mask + 1);
if (!qp->rq_meta) {
rc = -ENOMEM;
goto err_rq_meta;
dev->hw_stats_count = hw_stats_count;
/* alloc and init array of names, for alloc_hw_stats */
- dev->hw_stats_hdrs = kzalloc_objs(*dev->hw_stats_hdrs, hw_stats_count,
- GFP_KERNEL);
+ dev->hw_stats_hdrs = kzalloc_objs(*dev->hw_stats_hdrs, hw_stats_count);
if (!dev->hw_stats_hdrs) {
rc = -ENOMEM;
goto err_dma;
cs->queue_stats_count = hw_stats_count;
/* alloc and init array of names */
- cs->stats_hdrs = kzalloc_objs(*cs->stats_hdrs, hw_stats_count,
- GFP_KERNEL);
+ cs->stats_hdrs = kzalloc_objs(*cs->stats_hdrs, hw_stats_count);
if (!cs->stats_hdrs) {
rc = -ENOMEM;
goto err_dma;
}
if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
- dev->counter_stats = kzalloc_obj(*dev->counter_stats,
- GFP_KERNEL);
+ dev->counter_stats = kzalloc_obj(*dev->counter_stats);
if (!dev->counter_stats)
return;
ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI6, vlan_id=%d, MAC=%pM\n",
&ifp->addr, rdma_vlan_dev_vlan_id(ip_dev),
ip_dev->dev_addr);
- child_listen_node = kzalloc_obj(*child_listen_node,
- GFP_KERNEL);
+ child_listen_node = kzalloc_obj(*child_listen_node);
ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
child_listen_node);
if (!child_listen_node) {
"CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
&ifa->ifa_address, rdma_vlan_dev_vlan_id(ip_dev),
ip_dev->dev_addr);
- child_listen_node = kzalloc_obj(*child_listen_node,
- GFP_KERNEL);
+ child_listen_node = kzalloc_obj(*child_listen_node);
cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
child_listen_node);
int i;
rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
- rf->msix_entries = kzalloc_objs(*rf->msix_entries, rf->msix_count,
- GFP_KERNEL);
+ rf->msix_entries = kzalloc_objs(*rf->msix_entries, rf->msix_count);
if (!rf->msix_entries)
return -ENOMEM;
return -ENOMEM;
hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions);
- hw->io_regs = kzalloc_objs(struct irdma_mmio_region, hw->num_io_regions,
- GFP_KERNEL);
+ hw->io_regs = kzalloc_objs(struct irdma_mmio_region, hw->num_io_regions);
if (!hw->io_regs) {
iounmap(hw->rdma_reg.addr);
return status;
iwqp->kqp.sq_wrid_mem =
- kzalloc_objs(*iwqp->kqp.sq_wrid_mem, ukinfo->sq_depth,
- GFP_KERNEL);
+ kzalloc_objs(*iwqp->kqp.sq_wrid_mem, ukinfo->sq_depth);
if (!iwqp->kqp.sq_wrid_mem)
return -ENOMEM;
iwqp->kqp.rq_wrid_mem =
- kzalloc_objs(*iwqp->kqp.rq_wrid_mem, ukinfo->rq_depth,
- GFP_KERNEL);
+ kzalloc_objs(*iwqp->kqp.rq_wrid_mem, ukinfo->rq_depth);
if (!iwqp->kqp.rq_wrid_mem) {
kfree(iwqp->kqp.sq_wrid_mem);
if (!mlx4_is_master(dev->dev))
return 0;
- dev->sriov.alias_guid.sa_client = kzalloc_obj(*dev->sriov.alias_guid.sa_client,
- GFP_KERNEL);
+ dev->sriov.alias_guid.sa_client = kzalloc_obj(*dev->sriov.alias_guid.sa_client);
if (!dev->sriov.alias_guid.sa_client)
return -ENOMEM;
if (!tun_qp->ring)
return -ENOMEM;
- tun_qp->tx_ring = kzalloc_objs(struct mlx4_ib_tun_tx_buf, nmbr_bufs,
- GFP_KERNEL);
+ tun_qp->tx_ring = kzalloc_objs(struct mlx4_ib_tun_tx_buf, nmbr_bufs);
if (!tun_qp->tx_ring) {
kfree(tun_qp->ring);
tun_qp->ring = NULL;
if (mlx4_is_bonded(dev))
for (i = 1; i < ibdev->num_ports ; ++i) {
new_counter_index =
- kmalloc_obj(struct counter_index,
- GFP_KERNEL);
+ kmalloc_obj(struct counter_index);
if (!new_counter_index) {
err = -ENOMEM;
goto err_counter;
* gids (operational)
* mcg_table
*/
- port->dentr_ar = kzalloc_obj(struct mlx4_ib_iov_sysfs_attr_ar,
- GFP_KERNEL);
+ port->dentr_ar = kzalloc_obj(struct mlx4_ib_iov_sysfs_attr_ar);
if (!port->dentr_ar) {
ret = -ENOMEM;
goto err;
skip_non_qcounters:
cnts->num_op_counters = num_op_counters;
num_counters += num_op_counters;
- cnts->descs = kzalloc_objs(struct rdma_stat_desc, num_counters,
- GFP_KERNEL);
+ cnts->descs = kzalloc_objs(struct rdma_stat_desc, num_counters);
if (!cnts->descs)
return -ENOMEM;
if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
return -EINVAL;
- desc_data = kzalloc_objs(*desc_data, cntrs_data->ncounters,
- GFP_KERNEL);
+ desc_data = kzalloc_objs(*desc_data, cntrs_data->ncounters);
if (!desc_data)
return -ENOMEM;
for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
dev->flow_db->rdma_transport_rx[i] =
- kzalloc_objs(struct mlx5_ib_flow_prio, dev->num_ports,
- GFP_KERNEL);
+ kzalloc_objs(struct mlx5_ib_flow_prio, dev->num_ports);
if (!dev->flow_db->rdma_transport_rx[i])
goto free_rdma_transport_rx;
}
for (j = 0; j < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; j++) {
dev->flow_db->rdma_transport_tx[j] =
- kzalloc_objs(struct mlx5_ib_flow_prio, dev->num_ports,
- GFP_KERNEL);
+ kzalloc_objs(struct mlx5_ib_flow_prio, dev->num_ports);
if (!dev->flow_db->rdma_transport_tx[j])
goto free_rdma_transport_tx;
}
return -ENOMEM;
gsi->outstanding_wrs =
- kzalloc_objs(*gsi->outstanding_wrs, attr->cap.max_send_wr,
- GFP_KERNEL);
+ kzalloc_objs(*gsi->outstanding_wrs, attr->cap.max_send_wr);
if (!gsi->outstanding_wrs) {
ret = -ENOMEM;
goto err_free_tx;
int i;
for (i = 0; i < num; i++) {
- async_create = kzalloc_obj(struct mlx5r_async_create_mkey,
- GFP_KERNEL);
+ async_create = kzalloc_obj(struct mlx5r_async_create_mkey);
if (!async_create)
return -ENOMEM;
mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
sizeof(*qp->sq.wr_data), GFP_KERNEL);
qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
sizeof(*qp->rq.wrid), GFP_KERNEL);
- qp->sq.w_list = kvmalloc_objs(*qp->sq.w_list, qp->sq.wqe_cnt,
- GFP_KERNEL);
+ qp->sq.w_list = kvmalloc_objs(*qp->sq.w_list, qp->sq.wqe_cnt);
qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
sizeof(*qp->sq.wqe_head), GFP_KERNEL);
if (!dma_list)
return -ENOMEM;
- buf->page_list = kmalloc_objs(*buf->page_list, npages,
- GFP_KERNEL);
+ buf->page_list = kmalloc_objs(*buf->page_list, npages);
if (!buf->page_list)
goto err_out;
buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
- buddy->num_free = kzalloc_objs(*buddy->num_free, (buddy->max_order + 1),
- GFP_KERNEL);
+ buddy->num_free = kzalloc_objs(*buddy->num_free, (buddy->max_order + 1));
if (!buddy->bits || !buddy->num_free)
goto err_out;
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
{
mutex_init(&dev->dev_lock);
- dev->cq_tbl = kzalloc_objs(struct ocrdma_cq *, OCRDMA_MAX_CQ,
- GFP_KERNEL);
+ dev->cq_tbl = kzalloc_objs(struct ocrdma_cq *, OCRDMA_MAX_CQ);
if (!dev->cq_tbl)
goto alloc_err;
if (dev->attr.max_qp) {
- dev->qp_tbl = kzalloc_objs(struct ocrdma_qp *, OCRDMA_MAX_QP,
- GFP_KERNEL);
+ dev->qp_tbl = kzalloc_objs(struct ocrdma_qp *, OCRDMA_MAX_QP);
if (!dev->qp_tbl)
goto alloc_err;
}
void *va;
dma_addr_t pa;
- mr->pbl_table = kzalloc_objs(struct ocrdma_pbl, mr->num_pbls,
- GFP_KERNEL);
+ mr->pbl_table = kzalloc_objs(struct ocrdma_pbl, mr->num_pbls);
if (!mr->pbl_table)
return -ENOMEM;
mutex_init(&dev->port_mutex);
spin_lock_init(&dev->desc_lock);
- dev->cq_tbl = kzalloc_objs(struct pvrdma_cq *, dev->dsr->caps.max_cq,
- GFP_KERNEL);
+ dev->cq_tbl = kzalloc_objs(struct pvrdma_cq *, dev->dsr->caps.max_cq);
if (!dev->cq_tbl)
return ret;
spin_lock_init(&dev->cq_tbl_lock);
- dev->qp_tbl = kzalloc_objs(struct pvrdma_qp *, dev->dsr->caps.max_qp,
- GFP_KERNEL);
+ dev->qp_tbl = kzalloc_objs(struct pvrdma_qp *, dev->dsr->caps.max_qp);
if (!dev->qp_tbl)
goto err_cq_free;
spin_lock_init(&dev->qp_tbl_lock);
}
/* Allocate GID table */
- dev->sgid_tbl = kzalloc_objs(union ib_gid, dev->dsr->caps.gid_tbl_len,
- GFP_KERNEL);
+ dev->sgid_tbl = kzalloc_objs(union ib_gid, dev->dsr->caps.gid_tbl_len);
if (!dev->sgid_tbl) {
ret = -ENOMEM;
goto err_free_uar_table;
ipoib_napi_add(dev);
/* Allocate RX/TX "rings" to hold queued skbs */
- priv->rx_ring = kzalloc_objs(*priv->rx_ring, ipoib_recvq_size,
- GFP_KERNEL);
+ priv->rx_ring = kzalloc_objs(*priv->rx_ring, ipoib_recvq_size);
if (!priv->rx_ring)
goto out;
{
int ret;
- isert_conn->login_desc = kzalloc_obj(*isert_conn->login_desc,
- GFP_KERNEL);
+ isert_conn->login_desc = kzalloc_obj(*isert_conn->login_desc);
if (!isert_conn->login_desc)
return -ENOMEM;
enum ib_mr_type mr_type;
int i, err = -ENOMEM;
- clt_path->reqs = kzalloc_objs(*clt_path->reqs, clt_path->queue_depth,
- GFP_KERNEL);
+ clt_path->reqs = kzalloc_objs(*clt_path->reqs, clt_path->queue_depth);
if (!clt_path->reqs)
return -ENOMEM;
struct rtrs_srv_op *id;
int i, ret;
- srv_path->ops_ids = kzalloc_objs(*srv_path->ops_ids, srv->queue_depth,
- GFP_KERNEL);
+ srv_path->ops_ids = kzalloc_objs(*srv_path->ops_ids, srv->queue_depth);
if (!srv_path->ops_ids)
goto err;
srv_path->stats->srv_path = srv_path;
- srv_path->dma_addr = kzalloc_objs(*srv_path->dma_addr, srv->queue_depth,
- GFP_KERNEL);
+ srv_path->dma_addr = kzalloc_objs(*srv_path->dma_addr, srv->queue_depth);
if (!srv_path->dma_addr)
goto err_free_percpu;
if (nbufs == 1) {
ioctx->rw_ctxs = &ioctx->s_rw_ctx;
} else {
- ioctx->rw_ctxs = kmalloc_objs(*ioctx->rw_ctxs, nbufs,
- GFP_KERNEL);
+ ioctx->rw_ctxs = kmalloc_objs(*ioctx->rw_ctxs, nbufs);
if (!ioctx->rw_ctxs)
return -ENOMEM;
}
int ret = 0;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
- master->streams = kzalloc_objs(*master->streams, fwspec->num_ids,
- GFP_KERNEL);
+ master->streams = kzalloc_objs(*master->streams, fwspec->num_ids);
if (!master->streams)
return -ENOMEM;
master->num_streams = fwspec->num_ids;
{
int res;
- tpci200->slots = kzalloc_objs(struct tpci200_slot, TPCI200_NB_SLOT,
- GFP_KERNEL);
+ tpci200->slots = kzalloc_objs(struct tpci200_slot, TPCI200_NB_SLOT);
if (tpci200->slots == NULL)
return -ENOMEM;
static int alpine_msix_init(struct device_node *node, struct device_node *parent)
{
- struct alpine_msix_data *priv __free(kfree) = kzalloc_obj(*priv,
- GFP_KERNEL);
+ struct alpine_msix_data *priv __free(kfree) = kzalloc_obj(*priv);
struct resource res;
int ret;
else if (intc->n_words != n_words)
return -EINVAL;
- cpu = intc->cpus[idx] = kzalloc_flex(*cpu, enable_cache, n_words,
- GFP_KERNEL);
+ cpu = intc->cpus[idx] = kzalloc_flex(*cpu, enable_cache, n_words);
if (!cpu)
return -ENOMEM;
return -EINVAL;
}
- cpu = intc->cpus[idx] = kzalloc_flex(*cpu, mask_cache, n_words,
- GFP_KERNEL);
+ cpu = intc->cpus[idx] = kzalloc_flex(*cpu, mask_cache, n_words);
if (!cpu)
return -ENOMEM;
goto out_unmap;
}
- data->l1_data = kzalloc_objs(*data->l1_data, data->num_parent_irqs,
- GFP_KERNEL);
+ data->l1_data = kzalloc_objs(*data->l1_data, data->num_parent_irqs);
if (!data->l1_data) {
ret = -ENOMEM;
goto out_free_l1_data;
{
int i;
- its->collections = kzalloc_objs(*its->collections, nr_cpu_ids,
- GFP_KERNEL);
+ its->collections = kzalloc_objs(*its->collections, nr_cpu_ids);
if (!its->collections)
return -ENOMEM;
unsigned int n;
int ret;
- struct gicv5_iwb_chip_data *iwb_node __free(kfree) = kzalloc_obj(*iwb_node,
- GFP_KERNEL);
+ struct gicv5_iwb_chip_data *iwb_node __free(kfree) = kzalloc_obj(*iwb_node);
if (!iwb_node)
return ERR_PTR(-ENOMEM);
if (count <= 0)
return -EINVAL;
- rintc_acpi_data = kzalloc_objs(*rintc_acpi_data, count,
- GFP_KERNEL);
+ rintc_acpi_data = kzalloc_objs(*rintc_acpi_data, count);
if (!rintc_acpi_data)
return -ENOMEM;
}
if (capi_ttyminors <= 0)
capi_ttyminors = CAPINC_NR_PORTS;
- capiminors = kzalloc_objs(struct capiminor *, capi_ttyminors,
- GFP_KERNEL);
+ capiminors = kzalloc_objs(struct capiminor *, capi_ttyminors);
if (!capiminors)
return -ENOMEM;
control = controls[param->control_id];
/* Alloc & initialize state */
- pm121_sys_state[loop_id] = kmalloc_obj(struct pm121_sys_state,
- GFP_KERNEL);
+ pm121_sys_state[loop_id] = kmalloc_obj(struct pm121_sys_state);
if (pm121_sys_state[loop_id] == NULL) {
printk(KERN_WARNING "pm121: Memory allocation error\n");
goto fail;
};
/* Alloc & initialize state */
- wf_smu_drive_fans = kmalloc_obj(struct wf_smu_drive_fans_state,
- GFP_KERNEL);
+ wf_smu_drive_fans = kmalloc_obj(struct wf_smu_drive_fans_state);
if (wf_smu_drive_fans == NULL) {
printk(KERN_WARNING "windfarm: Memory allocation error"
" max fan speed\n");
};
/* Alloc & initialize state */
- wf_smu_slots_fans = kmalloc_obj(struct wf_smu_slots_fans_state,
- GFP_KERNEL);
+ wf_smu_slots_fans = kmalloc_obj(struct wf_smu_slots_fans_state);
if (wf_smu_slots_fans == NULL) {
printk(KERN_WARNING "windfarm: Memory allocation error"
" max fan speed\n");
*/
ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
sizeof(struct disk_exception);
- ps->callbacks = kvzalloc_objs(*ps->callbacks, ps->exceptions_per_area,
- GFP_KERNEL);
+ ps->callbacks = kvzalloc_objs(*ps->callbacks, ps->exceptions_per_area);
if (!ps->callbacks)
return -ENOMEM;
for (i = 0; i < ORIGIN_HASH_SIZE; i++)
INIT_LIST_HEAD(_origins + i);
- _dm_origins = kmalloc_objs(struct list_head, ORIGIN_HASH_SIZE,
- GFP_KERNEL);
+ _dm_origins = kmalloc_objs(struct list_head, ORIGIN_HASH_SIZE);
if (!_dm_origins) {
DMERR("unable to allocate memory for _dm_origins");
kfree(_origins);
unsigned int bzone_id;
/* Metadata block array for the chunk mapping table */
- zmd->map_mblk = kzalloc_objs(struct dmz_mblock *, zmd->nr_map_blocks,
- GFP_KERNEL);
+ zmd->map_mblk = kzalloc_objs(struct dmz_mblock *, zmd->nr_map_blocks);
if (!zmd->map_mblk)
return -ENOMEM;
}
err = -ENOMEM;
- conf->strip_zone = kzalloc_objs(struct strip_zone, conf->nr_strip_zones,
- GFP_KERNEL);
+ conf->strip_zone = kzalloc_objs(struct strip_zone, conf->nr_strip_zones);
if (!conf->strip_zone)
goto abort;
conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
if (!conf)
goto abort;
- conf->nr_pending = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR,
- GFP_KERNEL);
+ conf->nr_pending = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR);
if (!conf->nr_pending)
goto abort;
- conf->nr_waiting = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR,
- GFP_KERNEL);
+ conf->nr_waiting = kzalloc_objs(atomic_t, BARRIER_BUCKETS_NR);
if (!conf->nr_waiting)
goto abort;
goto err;
ppl_conf->count = conf->raid_disks;
- ppl_conf->child_logs = kzalloc_objs(struct ppl_log, ppl_conf->count,
- GFP_KERNEL);
+ ppl_conf->child_logs = kzalloc_objs(struct ppl_log, ppl_conf->count);
if (!ppl_conf->child_logs) {
ret = -ENOMEM;
goto err;
#endif
INIT_LIST_HEAD(&conf->free_list);
INIT_LIST_HEAD(&conf->pending_list);
- conf->pending_data = kzalloc_objs(struct r5pending_data, PENDING_IO_MAX,
- GFP_KERNEL);
+ conf->pending_data = kzalloc_objs(struct r5pending_data, PENDING_IO_MAX);
if (!conf->pending_data)
goto abort;
for (i = 0; i < PENDING_IO_MAX; i++)
struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len)
{
void *bus;
- struct flexcop_device *fc = kzalloc_obj(struct flexcop_device,
- GFP_KERNEL);
+ struct flexcop_device *fc = kzalloc_obj(struct flexcop_device);
if (!fc) {
err("no memory");
return NULL;
{
int i;
- dvbdev->tsout_pads = kzalloc_objs(*dvbdev->tsout_pads, npads,
- GFP_KERNEL);
+ dvbdev->tsout_pads = kzalloc_objs(*dvbdev->tsout_pads, npads);
if (!dvbdev->tsout_pads)
return -ENOMEM;
- dvbdev->tsout_entity = kzalloc_objs(*dvbdev->tsout_entity, npads,
- GFP_KERNEL);
+ dvbdev->tsout_entity = kzalloc_objs(*dvbdev->tsout_entity, npads);
if (!dvbdev->tsout_entity)
return -ENOMEM;
struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg)
{
- struct dib0070_state *state = kzalloc_obj(struct dib0070_state,
- GFP_KERNEL);
+ struct dib0070_state *state = kzalloc_obj(struct dib0070_state);
if (state == NULL)
return NULL;
struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
{
- struct dib0090_fw_state *st = kzalloc_obj(struct dib0090_fw_state,
- GFP_KERNEL);
+ struct dib0090_fw_state *st = kzalloc_obj(struct dib0090_fw_state);
if (st == NULL)
return NULL;
struct i2c_adapter *i2c)
{
/* allocate memory for the internal state */
- struct s5h1420_state *state = kzalloc_obj(struct s5h1420_state,
- GFP_KERNEL);
+ struct s5h1420_state *state = kzalloc_obj(struct s5h1420_state);
u8 i;
if (state == NULL)
while (new_node->next_inode != NULL)
new_node = new_node->next_inode;
- new_node->next_inode = kmalloc_obj(struct stv0900_inode,
- GFP_KERNEL);
+ new_node->next_inode = kmalloc_obj(struct stv0900_inode);
if (new_node->next_inode != NULL)
new_node = new_node->next_inode;
else
dprintk("%s: Find Internal Structure!\n", __func__);
return STV0900_NO_ERROR;
} else {
- state->internal = kmalloc_obj(struct stv0900_internal,
- GFP_KERNEL);
+ state->internal = kmalloc_obj(struct stv0900_internal);
if (state->internal == NULL)
return STV0900_INVALID_HANDLE;
temp_int = append_internal(state->internal);
isi->pdata = of_device_get_match_data(dev);
- isi->pipes = kzalloc_objs(isi->pipes[0], isi->pdata->num_channels,
- GFP_KERNEL);
+ isi->pipes = kzalloc_objs(isi->pipes[0], isi->pdata->num_channels);
if (!isi->pipes)
return -ENOMEM;
}
pipe->partitions = DIV_ROUND_UP(format->width, div_size);
- pipe->part_table = kzalloc_objs(*pipe->part_table, pipe->partitions,
- GFP_KERNEL);
+ pipe->part_table = kzalloc_objs(*pipe->part_table, pipe->partitions);
if (!pipe->part_table)
return -ENOMEM;
program = program->next;
}
- pmt_secs = kzalloc_objs(struct vidtv_psi_table_pmt *, num_pmt,
- GFP_KERNEL);
+ pmt_secs = kzalloc_objs(struct vidtv_psi_table_pmt *, num_pmt);
if (!pmt_secs)
return NULL;
return ret;
}
/* allocate ent_devs */
- vimc->ent_devs = kzalloc_objs(*vimc->ent_devs, vimc->pipe_cfg->num_ents,
- GFP_KERNEL);
+ vimc->ent_devs = kzalloc_objs(*vimc->ent_devs, vimc->pipe_cfg->num_ents);
if (!vimc->ent_devs) {
ret = -ENOMEM;
goto err_v4l2_unregister;
struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
{
- struct cinergyt2_fe_state *s = kzalloc_obj(struct cinergyt2_fe_state,
- GFP_KERNEL);
+ struct cinergyt2_fe_state *s = kzalloc_obj(struct cinergyt2_fe_state);
if (s == NULL)
return NULL;
struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d)
{
- struct vp702x_fe_state *s = kzalloc_obj(struct vp702x_fe_state,
- GFP_KERNEL);
+ struct vp702x_fe_state *s = kzalloc_obj(struct vp702x_fe_state);
if (s == NULL)
goto error;
struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d)
{
- struct vp7045_fe_state *s = kzalloc_obj(struct vp7045_fe_state,
- GFP_KERNEL);
+ struct vp7045_fe_state *s = kzalloc_obj(struct vp7045_fe_state);
if (s == NULL)
goto error;
hdw->control_cnt = CTRLDEF_COUNT;
hdw->control_cnt += MPEGDEF_COUNT;
- hdw->controls = kzalloc_objs(struct pvr2_ctrl, hdw->control_cnt,
- GFP_KERNEL);
+ hdw->controls = kzalloc_objs(struct pvr2_ctrl, hdw->control_cnt);
if (!hdw->controls) goto fail;
hdw->hdw_desc = hdw_desc;
hdw->ir_scheme_active = hdw->hdw_desc->ir_scheme;
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
- hdl->buckets = kvzalloc_objs(hdl->buckets[0], hdl->nr_of_buckets,
- GFP_KERNEL);
+ hdl->buckets = kvzalloc_objs(hdl->buckets[0], hdl->nr_of_buckets);
hdl->error = hdl->buckets ? 0 : -ENOMEM;
v4l2_ctrl_handler_init_request(hdl);
return hdl->error;
return -ENOMEM;
/* allocate memory dynamically so as not to exceed stack frame size */
- ctrl_init_data = kzalloc_objs(*ctrl_init_data, NUM_FLASH_CTRLS,
- GFP_KERNEL);
+ ctrl_init_data = kzalloc_objs(*ctrl_init_data, NUM_FLASH_CTRLS);
if (!ctrl_init_data)
return -ENOMEM;
/* Drivers that support streams do not need the legacy pad config */
if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
- state->pads = kvzalloc_objs(*state->pads, sd->entity.num_pads,
- GFP_KERNEL);
+ state->pads = kvzalloc_objs(*state->pads, sd->entity.num_pads);
if (!state->pads) {
ret = -ENOMEM;
goto err;
goto out;
priv->mpt_txfidx_tail = -1;
- priv->SendCtl = kzalloc_objs(struct BufferControl, priv->tx_max_out,
- GFP_KERNEL);
+ priv->SendCtl = kzalloc_objs(struct BufferControl, priv->tx_max_out);
if (priv->SendCtl == NULL)
goto out_mpt_txfidx;
for (i = 0; i < priv->tx_max_out; i++)
goto out_SendCtl;
priv->mpt_rxfidx_tail = -1;
- priv->RcvCtl = kzalloc_objs(struct BufferControl, priv->max_buckets_out,
- GFP_KERNEL);
+ priv->RcvCtl = kzalloc_objs(struct BufferControl, priv->max_buckets_out);
if (priv->RcvCtl == NULL)
goto out_mpt_rxfidx;
for (i = 0; i < priv->max_buckets_out; i++)
* Forming a port
*/
if (!port_details) {
- port_details = kzalloc_obj(struct mptsas_portinfo_details,
- GFP_KERNEL);
+ port_details = kzalloc_obj(struct mptsas_portinfo_details);
if (!port_details)
goto out;
port_details->num_phys = 1;
goto err_config;
}
- msix_entries = kzalloc_objs(*msix_entries, TIMBERDALE_NR_IRQS,
- GFP_KERNEL);
+ msix_entries = kzalloc_objs(*msix_entries, TIMBERDALE_NR_IRQS);
if (!msix_entries)
goto err_config;
/* Allocate a writable buffer for this array */
count = var_size[variable_id];
long_tmp = vars[variable_id];
- longptr_tmp = kzalloc_objs(long, count,
- GFP_KERNEL);
+ longptr_tmp = kzalloc_objs(long, count);
vars[variable_id] = (long)longptr_tmp;
if (vars[variable_id] == 0) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
- ctx->olaps = kzalloc_objs(*ctx->olaps, ctx->nscalars,
- GFP_KERNEL);
+ ctx->olaps = kzalloc_objs(*ctx->olaps, ctx->nscalars);
if (!ctx->olaps) {
kfree(ctx->maps);
kfree(ctx);
} inbuf;
u32 sc;
- args = kzalloc_objs(*args, FASTRPC_CREATE_STATIC_PROCESS_NARGS,
- GFP_KERNEL);
+ args = kzalloc_objs(*args, FASTRPC_CREATE_STATIC_PROCESS_NARGS);
if (!args)
return -ENOMEM;
"[%s] **err: could not allocate DDCB **\n", __func__);
return -ENOMEM;
}
- queue->ddcb_req = kzalloc_objs(struct ddcb_requ *, queue->ddcb_max,
- GFP_KERNEL);
+ queue->ddcb_req = kzalloc_objs(struct ddcb_requ *, queue->ddcb_max);
if (!queue->ddcb_req) {
rc = -ENOMEM;
goto free_ddcbs;
}
- queue->ddcb_waitqs = kzalloc_objs(wait_queue_head_t, queue->ddcb_max,
- GFP_KERNEL);
+ queue->ddcb_waitqs = kzalloc_objs(wait_queue_head_t, queue->ddcb_max);
if (!queue->ddcb_waitqs) {
rc = -ENOMEM;
goto free_requs;
if (!aux_bus)
return -ENOMEM;
- aux_bus->aux_device_wrapper[0] = kzalloc_obj(*aux_bus->aux_device_wrapper[0],
- GFP_KERNEL);
+ aux_bus->aux_device_wrapper[0] = kzalloc_obj(*aux_bus->aux_device_wrapper[0]);
if (!aux_bus->aux_device_wrapper[0])
return -ENOMEM;
if (retval)
goto err_aux_dev_add_0;
- aux_bus->aux_device_wrapper[1] = kzalloc_obj(*aux_bus->aux_device_wrapper[1],
- GFP_KERNEL);
+ aux_bus->aux_device_wrapper[1] = kzalloc_obj(*aux_bus->aux_device_wrapper[1]);
if (!aux_bus->aux_device_wrapper[1]) {
retval = -ENOMEM;
goto err_aux_dev_add_0;
break;
case MEI_EXT_HDR_GSC:
gsc_f2h = (struct mei_ext_hdr_gsc_f2h *)ext;
- cb->ext_hdr = (struct mei_ext_hdr *) kzalloc_obj(*gsc_f2h,
- GFP_KERNEL);
+ cb->ext_hdr = (struct mei_ext_hdr *) kzalloc_obj(*gsc_f2h);
if (!cb->ext_hdr) {
cb->status = -ENOMEM;
goto discard;
* memory.
*/
DBUG_ON(part->channels != NULL);
- part->channels = kzalloc_objs(struct xpc_channel, XPC_MAX_NCHANNELS,
- GFP_KERNEL);
+ part->channels = kzalloc_objs(struct xpc_channel, XPC_MAX_NCHANNELS);
if (part->channels == NULL) {
dev_err(xpc_chan, "can't get memory for channels\n");
return xpNoMemory;
short partid;
struct xpc_partition *part;
- xpc_partitions = kzalloc_objs(struct xpc_partition, xp_max_npartitions,
- GFP_KERNEL);
+ xpc_partitions = kzalloc_objs(struct xpc_partition, xp_max_npartitions);
if (xpc_partitions == NULL) {
dev_err(xpc_part, "can't get memory for partition structure\n");
return -ENOMEM;
DBUG_ON(ch->flags & XPC_C_SETUP);
- ch_uv->cached_notify_gru_mq_desc = kmalloc_obj(struct gru_message_queue_desc,
- GFP_KERNEL);
+ ch_uv->cached_notify_gru_mq_desc = kmalloc_obj(struct gru_message_queue_desc);
if (ch_uv->cached_notify_gru_mq_desc == NULL)
return xpNoMemory;
newcfi = kmalloc_flex(*newcfi, chips, numvirtchips);
if (!newcfi)
return -ENOMEM;
- shared = kmalloc_objs(struct flchip_shared, cfi->numchips,
- GFP_KERNEL);
+ shared = kmalloc_objs(struct flchip_shared, cfi->numchips);
if (!shared) {
kfree(newcfi);
return -ENOMEM;
if (!num_erase_regions)
return 0;
- cfi->cfiq = kmalloc_flex(*cfi->cfiq, EraseRegionInfo, num_erase_regions,
- GFP_KERNEL);
+ cfi->cfiq = kmalloc_flex(*cfi->cfiq, EraseRegionInfo, num_erase_regions);
if (!cfi->cfiq)
return 0;
num_erase_regions = jedec_table[index].nr_regions;
- cfi->cfiq = kmalloc_flex(*cfi->cfiq, EraseRegionInfo, num_erase_regions,
- GFP_KERNEL);
+ cfi->cfiq = kmalloc_flex(*cfi->cfiq, EraseRegionInfo, num_erase_regions);
if (!cfi->cfiq) {
//xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
return 0;
for (i = 0; i < part->DataUnits; i++)
part->EUNInfo[i].Offset = 0xffffffff;
part->XferInfo =
- kmalloc_objs(struct xfer_info_t, part->header.NumTransferUnits,
- GFP_KERNEL);
+ kmalloc_objs(struct xfer_info_t, part->header.NumTransferUnits);
if (!part->XferInfo)
goto out_EUNInfo;
concat->mtd.erasesize = max_erasesize;
concat->mtd.numeraseregions = num_erase_region;
concat->mtd.eraseregions = erase_region_p =
- kmalloc_objs(struct mtd_erase_region_info, num_erase_region,
- GFP_KERNEL);
+ kmalloc_objs(struct mtd_erase_region_info, num_erase_region);
if (!erase_region_p) {
kfree(concat);
printk
blocksize = 0x1000;
/* Alloc */
- parts = kzalloc_objs(struct mtd_partition, BCM47XXPART_MAX_PARTS,
- GFP_KERNEL);
+ parts = kzalloc_objs(struct mtd_partition, BCM47XXPART_MAX_PARTS);
if (!parts)
return -ENOMEM;
if (err != 0 && err != -EINVAL)
pr_err("failed to parse \"brcm,trx-magic\" DT attribute, using default: %d\n", err);
- parts = kzalloc_objs(struct mtd_partition, TRX_PARSER_MAX_PARTS,
- GFP_KERNEL);
+ parts = kzalloc_objs(struct mtd_partition, TRX_PARSER_MAX_PARTS);
if (!parts)
return -ENOMEM;
goto free;
}
- parts = kzalloc_objs(*parts, of_get_child_count(ofpart_node),
- GFP_KERNEL);
+ parts = kzalloc_objs(*parts, of_get_child_count(ofpart_node));
if (!parts) {
res = -ENOMEM;
goto free;
if (!part->header_cache)
goto err;
- part->blocks = kzalloc_objs(struct block, part->total_blocks,
- GFP_KERNEL);
+ part->blocks = kzalloc_objs(struct block, part->total_blocks);
if (!part->blocks)
goto err;
/* Create array of pointers to the attributes */
- attributes = kzalloc_objs(struct attribute *, NUM_ATTRIBUTES + 1,
- GFP_KERNEL);
+ attributes = kzalloc_objs(struct attribute *, NUM_ATTRIBUTES + 1);
if (!attributes)
goto error3;
attributes[0] = &vendor_attribute->dev_attr.attr;
if (!vol)
continue;
- scan_eba[i] = kmalloc_objs(**scan_eba, vol->reserved_pebs,
- GFP_KERNEL);
+ scan_eba[i] = kmalloc_objs(**scan_eba, vol->reserved_pebs);
if (!scan_eba[i]) {
ret = -ENOMEM;
goto out_free;
}
- fm_eba[i] = kmalloc_objs(**fm_eba, vol->reserved_pebs,
- GFP_KERNEL);
+ fm_eba[i] = kmalloc_objs(**fm_eba, vol->reserved_pebs);
if (!fm_eba[i]) {
ret = -ENOMEM;
kfree(scan_eba[i]);
return NULL;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- SLAVE_AD_INFO(slave) = kzalloc_obj(struct ad_slave_info,
- GFP_KERNEL);
+ SLAVE_AD_INFO(slave) = kzalloc_obj(struct ad_slave_info);
if (!SLAVE_AD_INFO(slave)) {
kobject_put(&slave->kobj);
return NULL;
might_sleep();
- usable_slaves = kzalloc_flex(*usable_slaves, arr, bond->slave_cnt,
- GFP_KERNEL);
+ usable_slaves = kzalloc_flex(*usable_slaves, arr, bond->slave_cnt);
all_slaves = kzalloc_flex(*all_slaves, arr, bond->slave_cnt);
if (!usable_slaves || !all_slaves) {
ret = -ENOMEM;
ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->devlink = felix->ds->devlink;
- port_phy_modes = kzalloc_objs(phy_interface_t, num_phys_ports,
- GFP_KERNEL);
+ port_phy_modes = kzalloc_objs(phy_interface_t, num_phys_ports);
if (!port_phy_modes)
return -ENOMEM;
struct devlink_region *region;
u64 size;
- priv->regions = kzalloc_objs(struct devlink_region *, num_regions,
- GFP_KERNEL);
+ priv->regions = kzalloc_objs(struct devlink_region *, num_regions);
if (!priv->regions)
return -ENOMEM;
if (!lp->rx_dma_addr)
return -ENOMEM;
- lp->tx_skbuff = kzalloc_objs(struct sk_buff *, lp->tx_ring_size,
- GFP_KERNEL);
+ lp->tx_skbuff = kzalloc_objs(struct sk_buff *, lp->tx_ring_size);
if (!lp->tx_skbuff)
return -ENOMEM;
- lp->rx_skbuff = kzalloc_objs(struct sk_buff *, lp->rx_ring_size,
- GFP_KERNEL);
+ lp->rx_skbuff = kzalloc_objs(struct sk_buff *, lp->rx_ring_size);
if (!lp->rx_skbuff)
return -ENOMEM;
if (!ring->desc_addr)
goto err;
- ring->pkt_info = kzalloc_objs(*ring->pkt_info, XGENE_ENET_NUM_DESC,
- GFP_KERNEL);
+ ring->pkt_info = kzalloc_objs(*ring->pkt_info, XGENE_ENET_NUM_DESC);
if (!ring->pkt_info)
goto err;
if (!info->n_pins)
return;
- info->pin_config = kzalloc_objs(struct ptp_pin_desc, info->n_pins,
- GFP_KERNEL);
+ info->pin_config = kzalloc_objs(struct ptp_pin_desc, info->n_pins);
if (!info->pin_config)
return;
if (!intf->tx_spb_cpu)
goto free_rx_edpkt_dma;
- intf->tx_cbs = kzalloc_objs(struct bcmasp_tx_cb, DESC_RING_COUNT,
- GFP_KERNEL);
+ intf->tx_cbs = kzalloc_objs(struct bcmasp_tx_cb, DESC_RING_COUNT);
if (!intf->tx_cbs)
goto free_tx_spb_dma;
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
- priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size,
- GFP_KERNEL);
+ priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size);
if (!priv->tx_skb) {
ret = -ENOMEM;
goto out_free_tx_ring;
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
- priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size,
- GFP_KERNEL);
+ priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size);
if (!priv->tx_skb) {
dev_err(kdev, "cannot allocate tx skb queue\n");
ret = -ENOMEM;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
- priv->rx_cbs = kzalloc_objs(struct bcm_sysport_cb, priv->num_rx_bds,
- GFP_KERNEL);
+ priv->rx_cbs = kzalloc_objs(struct bcm_sysport_cb, priv->num_rx_bds);
if (!priv->rx_cbs) {
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
return -ENOMEM;
for (i = 0; i < bd->rx_nr_rings; i++) {
struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
- rxr->rx_tpa = kzalloc_objs(struct bnge_tpa_info, bn->max_tpa,
- GFP_KERNEL);
+ rxr->rx_tpa = kzalloc_objs(struct bnge_tpa_info, bn->max_tpa);
if (!rxr->rx_tpa)
goto err_free_tpa_info;
goto err_free_tpa_info;
rxr->rx_tpa[j].agg_arr = agg;
}
- rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map,
- GFP_KERNEL);
+ rxr->rx_tpa_idx_map = kzalloc_obj(*rxr->rx_tpa_idx_map);
if (!rxr->rx_tpa_idx_map)
goto err_free_tpa_info;
}
*/
num_vnics = 1;
- bn->vnic_info = kzalloc_objs(struct bnge_vnic_info, num_vnics,
- GFP_KERNEL);
+ bn->vnic_info = kzalloc_objs(struct bnge_vnic_info, num_vnics);
if (!bn->vnic_info)
return -ENOMEM;
struct bnge_dev *bd = bn->bd;
int i;
- bn->grp_info = kzalloc_objs(struct bnge_ring_grp_info, bd->nq_nr_rings,
- GFP_KERNEL);
+ bn->grp_info = kzalloc_objs(struct bnge_ring_grp_info, bd->nq_nr_rings);
if (!bn->grp_info)
return -ENOMEM;
for (i = 0; i < bd->nq_nr_rings; i++) {
nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
}
- bn->rx_ring = kzalloc_objs(struct bnge_rx_ring_info, bd->rx_nr_rings,
- GFP_KERNEL);
+ bn->rx_ring = kzalloc_objs(struct bnge_rx_ring_info, bd->rx_nr_rings);
if (!bn->rx_ring)
goto err_free_core;
bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
}
- bn->tx_ring = kzalloc_objs(struct bnge_tx_ring_info, bd->tx_nr_rings,
- GFP_KERNEL);
+ bn->tx_ring = kzalloc_objs(struct bnge_tx_ring_info, bd->tx_nr_rings);
if (!bn->tx_ring)
goto err_free_core;
int nr_tbls, i;
rmem->depth = 2;
- ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES,
- GFP_KERNEL);
+ ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES);
if (!ctx_pg->ctx_pg_tbl)
return -ENOMEM;
nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
bp->fp = fp;
/* allocate sp objs */
- bp->sp_objs = kzalloc_objs(struct bnx2x_sp_objs, bp->fp_array_size,
- GFP_KERNEL);
+ bp->sp_objs = kzalloc_objs(struct bnx2x_sp_objs, bp->fp_array_size);
if (!bp->sp_objs)
goto alloc_err;
/* allocate fp_stats */
- bp->fp_stats = kzalloc_objs(struct bnx2x_fp_stats, bp->fp_array_size,
- GFP_KERNEL);
+ bp->fp_stats = kzalloc_objs(struct bnx2x_fp_stats, bp->fp_array_size);
if (!bp->fp_stats)
goto alloc_err;
BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
- bp->bnx2x_txq = kzalloc_objs(struct bnx2x_fp_txdata, txq_array_size,
- GFP_KERNEL);
+ bp->bnx2x_txq = kzalloc_objs(struct bnx2x_fp_txdata, txq_array_size);
if (!bp->bnx2x_txq)
goto alloc_err;
goto alloc_mem_err;
allocated += bp->context[i].size;
}
- bp->ilt->lines = kzalloc_objs(struct ilt_line, ILT_MAX_LINES,
- GFP_KERNEL);
+ bp->ilt->lines = kzalloc_objs(struct ilt_line, ILT_MAX_LINES);
if (!bp->ilt->lines)
goto alloc_mem_err;
else
set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
if (mc_num) {
- mc = kzalloc_objs(struct bnx2x_mcast_list_elem, mc_num,
- GFP_KERNEL);
+ mc = kzalloc_objs(struct bnx2x_mcast_list_elem, mc_num);
if (!mc) {
BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
return -ENOMEM;
num_vfs_param, iov->nr_virtfn);
/* allocate the vf array */
- bp->vfdb->vfs = kzalloc_objs(struct bnx2x_virtf, BNX2X_NR_VIRTFN(bp),
- GFP_KERNEL);
+ bp->vfdb->vfs = kzalloc_objs(struct bnx2x_virtf, BNX2X_NR_VIRTFN(bp));
if (!bp->vfdb->vfs) {
BNX2X_ERR("failed to allocate vf array\n");
err = -ENOMEM;
struct rx_agg_cmp *agg;
int i;
- rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa,
- GFP_KERNEL);
+ rxr->rx_tpa = kzalloc_objs(struct bnxt_tpa_info, bp->max_tpa);
if (!rxr->rx_tpa)
return -ENOMEM;
cpr->cp_desc_ring = kzalloc_objs(*cpr->cp_desc_ring, n);
if (!cpr->cp_desc_ring)
return -ENOMEM;
- cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n,
- GFP_KERNEL);
+ cpr->cp_desc_mapping = kzalloc_objs(*cpr->cp_desc_mapping, n);
if (!cpr->cp_desc_mapping)
return -ENOMEM;
return 0;
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
num_vnics++;
- bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics,
- GFP_KERNEL);
+ bp->vnic_info = kzalloc_objs(struct bnxt_vnic_info, num_vnics);
if (!bp->vnic_info)
return -ENOMEM;
int nr_tbls, i;
rmem->depth = 2;
- ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES,
- GFP_KERNEL);
+ ctx_pg->ctx_pg_tbl = kzalloc_objs(ctx_pg, MAX_CTX_PAGES);
if (!ctx_pg->ctx_pg_tbl)
return -ENOMEM;
nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
return;
if (!err) {
- ent = kzalloc_objs(*ent, ulp->msix_requested,
- GFP_KERNEL);
+ ent = kzalloc_objs(*ent, ulp->msix_requested);
if (!ent)
return;
bnxt_fill_msix_vecs(bp, ent);
cp->fcoe_init_cid = 0x10;
}
- cp->iscsi_tbl = kzalloc_objs(struct cnic_iscsi, MAX_ISCSI_TBL_SZ,
- GFP_KERNEL);
+ cp->iscsi_tbl = kzalloc_objs(struct cnic_iscsi, MAX_ISCSI_TBL_SZ);
if (!cp->iscsi_tbl)
goto error;
- cp->ctx_tbl = kzalloc_objs(struct cnic_context, cp->max_cid_space,
- GFP_KERNEL);
+ cp->ctx_tbl = kzalloc_objs(struct cnic_context, cp->max_cid_space);
if (!cp->ctx_tbl)
goto error;
u32 port_id;
int i;
- cp->csk_tbl = kvzalloc_objs(struct cnic_sock, MAX_CM_SK_TBL_SZ,
- GFP_KERNEL);
+ cp->csk_tbl = kvzalloc_objs(struct cnic_sock, MAX_CM_SK_TBL_SZ);
if (!cp->csk_tbl)
return -ENOMEM;
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
- priv->rx_cbs = kzalloc_objs(struct enet_cb, priv->num_rx_bds,
- GFP_KERNEL);
+ priv->rx_cbs = kzalloc_objs(struct enet_cb, priv->num_rx_bds);
if (!priv->rx_cbs)
return -ENOMEM;
/* Initialize common TX ring structures */
priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
priv->num_tx_bds = TOTAL_DESC;
- priv->tx_cbs = kzalloc_objs(struct enet_cb, priv->num_tx_bds,
- GFP_KERNEL);
+ priv->tx_cbs = kzalloc_objs(struct enet_cb, priv->num_tx_bds);
if (!priv->tx_cbs) {
kfree(priv->rx_cbs);
return -ENOMEM;
* And context table
*/
- d->sbdma_ctxtable = kzalloc_objs(*d->sbdma_ctxtable, d->sbdma_maxdescr,
- GFP_KERNEL);
+ d->sbdma_ctxtable = kzalloc_objs(*d->sbdma_ctxtable, d->sbdma_maxdescr);
#ifdef CONFIG_SBMAC_COALESCE
/*
return 0;
}
- mem_info->mdl = kzalloc_objs(struct bna_mem_descr, mem_info->num,
- GFP_KERNEL);
+ mem_info->mdl = kzalloc_objs(struct bna_mem_descr, mem_info->num);
if (mem_info->mdl == NULL)
return -ENOMEM;
netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
- priv->rx_skbuff = kzalloc_objs(struct sk_buff *, DMA_RX_RING_SZ,
- GFP_KERNEL);
+ priv->rx_skbuff = kzalloc_objs(struct sk_buff *, DMA_RX_RING_SZ);
if (!priv->rx_skbuff)
return -ENOMEM;
if (!priv->dma_rx)
goto err_dma_rx;
- priv->tx_skbuff = kzalloc_objs(struct sk_buff *, DMA_TX_RING_SZ,
- GFP_KERNEL);
+ priv->tx_skbuff = kzalloc_objs(struct sk_buff *, DMA_TX_RING_SZ);
if (!priv->tx_skbuff)
goto err_tx_skb;
/* allocate memory to store virtual and dma base address of
* per glist consistent memory
*/
- lio->glists_virt_base = kzalloc_objs(*lio->glists_virt_base, num_iqs,
- GFP_KERNEL);
- lio->glists_dma_base = kzalloc_objs(*lio->glists_dma_base, num_iqs,
- GFP_KERNEL);
+ lio->glists_virt_base = kzalloc_objs(*lio->glists_virt_base, num_iqs);
+ lio->glists_dma_base = kzalloc_objs(*lio->glists_dma_base, num_iqs);
if (!lio->glists_virt_base || !lio->glists_dma_base) {
lio_delete_glists(lio);
if (!eth_filter)
return -ENOMEM;
- eth_filter_info = kzalloc_objs(*eth_filter_info, adap->params.nports,
- GFP_KERNEL);
+ eth_filter_info = kzalloc_objs(*eth_filter_info, adap->params.nports);
if (!eth_filter_info) {
ret = -ENOMEM;
goto free_eth_filter;
adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
- adap->sge.egr_map = kzalloc_objs(*adap->sge.egr_map, adap->sge.egr_sz,
- GFP_KERNEL);
+ adap->sge.egr_map = kzalloc_objs(*adap->sge.egr_map, adap->sge.egr_sz);
if (!adap->sge.egr_map) {
ret = -ENOMEM;
goto bye;
if (!tc_matchall)
return -ENOMEM;
- tc_port_matchall = kzalloc_objs(*tc_port_matchall, adap->params.nports,
- GFP_KERNEL);
+ tc_port_matchall = kzalloc_objs(*tc_port_matchall, adap->params.nports);
if (!tc_port_matchall) {
ret = -ENOMEM;
goto out_free_matchall;
if (!tc_mqprio)
return -ENOMEM;
- tc_port_mqprio = kzalloc_objs(*tc_port_mqprio, adap->params.nports,
- GFP_KERNEL);
+ tc_port_mqprio = kzalloc_objs(*tc_port_mqprio, adap->params.nports);
if (!tc_port_mqprio) {
ret = -ENOMEM;
goto out_free_mqprio;
tc_mqprio->port_mqprio = tc_port_mqprio;
for (i = 0; i < adap->params.nports; i++) {
port_mqprio = &tc_mqprio->port_mqprio[i];
- eosw_txq = kzalloc_objs(*eosw_txq, adap->tids.neotids,
- GFP_KERNEL);
+ eosw_txq = kzalloc_objs(*eosw_txq, adap->tids.neotids);
if (!eosw_txq) {
ret = -ENOMEM;
goto out_free_ports;
i = min_t(int, uld_info->ntxq, num_online_cpus());
txq_info->ntxq = roundup(i, adap->params.nports);
}
- txq_info->uldtxq = kzalloc_objs(struct sge_uld_txq, txq_info->ntxq,
- GFP_KERNEL);
+ txq_info->uldtxq = kzalloc_objs(struct sge_uld_txq, txq_info->ntxq);
if (!txq_info->uldtxq) {
kfree(txq_info);
return -ENOMEM;
if (!adap->uld)
return -ENOMEM;
- s->uld_rxq_info = kzalloc_objs(struct sge_uld_rxq_info *, CXGB4_ULD_MAX,
- GFP_KERNEL);
+ s->uld_rxq_info = kzalloc_objs(struct sge_uld_rxq_info *, CXGB4_ULD_MAX);
if (!s->uld_rxq_info)
goto err_uld;
- s->uld_txq_info = kzalloc_objs(struct sge_uld_txq_info *, CXGB4_TX_MAX,
- GFP_KERNEL);
+ s->uld_txq_info = kzalloc_objs(struct sge_uld_txq_info *, CXGB4_TX_MAX);
if (!s->uld_txq_info)
goto err_uld_rx;
return 0;
if (!enic->napi)
goto free_queues;
- enic->msix_entry = kzalloc_objs(struct msix_entry, enic->intr_avail,
- GFP_KERNEL);
+ enic->msix_entry = kzalloc_objs(struct msix_entry, enic->intr_avail);
if (!enic->msix_entry)
goto free_queues;
- enic->msix = kzalloc_objs(struct enic_msix_entry, enic->intr_avail,
- GFP_KERNEL);
+ enic->msix = kzalloc_objs(struct enic_msix_entry, enic->intr_avail);
if (!enic->msix)
goto free_queues;
- enic->intr = kzalloc_objs(struct vnic_intr, enic->intr_avail,
- GFP_KERNEL);
+ enic->intr = kzalloc_objs(struct vnic_intr, enic->intr_avail);
if (!enic->intr)
goto free_queues;
if (!adapter->pmac_id)
return -ENOMEM;
- adapter->mc_list = kzalloc_objs(*adapter->mc_list, be_max_mc(adapter),
- GFP_KERNEL);
+ adapter->mc_list = kzalloc_objs(*adapter->mc_list, be_max_mc(adapter));
if (!adapter->mc_list)
return -ENOMEM;
- adapter->uc_list = kzalloc_objs(*adapter->uc_list, be_max_uc(adapter),
- GFP_KERNEL);
+ adapter->uc_list = kzalloc_objs(*adapter->uc_list, be_max_uc(adapter));
if (!adapter->uc_list)
return -ENOMEM;
if (err)
goto err_free_cmdport;
- ethsw->ports = kzalloc_objs(*ethsw->ports, ethsw->sw_attr.num_ifs,
- GFP_KERNEL);
+ ethsw->ports = kzalloc_objs(*ethsw->ports, ethsw->sw_attr.num_ifs);
if (!(ethsw->ports)) {
err = -ENOMEM;
goto err_teardown;
}
- ethsw->fdbs = kzalloc_objs(*ethsw->fdbs, ethsw->sw_attr.num_ifs,
- GFP_KERNEL);
+ ethsw->fdbs = kzalloc_objs(*ethsw->fdbs, ethsw->sw_attr.num_ifs);
if (!ethsw->fdbs) {
err = -ENOMEM;
goto err_free_ports;
{
struct enetc_si *si = priv->si;
- priv->cls_rules = kzalloc_objs(*priv->cls_rules, si->num_fs_entries,
- GFP_KERNEL);
+ priv->cls_rules = kzalloc_objs(*priv->cls_rules, si->num_fs_entries);
if (!priv->cls_rules)
return -ENOMEM;
int i;
for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i] = kzalloc_obj(struct gfar_priv_tx_q,
- GFP_KERNEL);
+ priv->tx_queue[i] = kzalloc_obj(struct gfar_priv_tx_q);
if (!priv->tx_queue[i])
return -ENOMEM;
int i;
for (i = 0; i < priv->num_rx_queues; i++) {
- priv->rx_queue[i] = kzalloc_obj(struct gfar_priv_rx_q,
- GFP_KERNEL);
+ priv->rx_queue[i] = kzalloc_obj(struct gfar_priv_rx_q);
if (!priv->rx_queue[i])
return -ENOMEM;
priv = netdev_priv(netdev);
num_tx_queues = gve_num_tx_queues(priv);
report_stats = priv->stats_report->stats;
- rx_qid_to_stats_idx = kmalloc_objs(int, priv->rx_cfg.num_queues,
- GFP_KERNEL);
+ rx_qid_to_stats_idx = kmalloc_objs(int, priv->rx_cfg.num_queues);
if (!rx_qid_to_stats_idx)
return;
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
}
if (!gve_is_gqi(priv)) {
- priv->ptype_lut_dqo = kvzalloc_obj(*priv->ptype_lut_dqo,
- GFP_KERNEL);
+ priv->ptype_lut_dqo = kvzalloc_obj(*priv->ptype_lut_dqo);
if (!priv->ptype_lut_dqo) {
err = -ENOMEM;
goto abort_with_stats_report;
int err = 0;
int i, j;
- rx = kvzalloc_objs(struct gve_rx_ring, cfg->qcfg_rx->max_queues,
- GFP_KERNEL);
+ rx = kvzalloc_objs(struct gve_rx_ring, cfg->qcfg_rx->max_queues);
if (!rx)
return -ENOMEM;
int err;
int i;
- rx = kvzalloc_objs(struct gve_rx_ring, cfg->qcfg_rx->max_queues,
- GFP_KERNEL);
+ rx = kvzalloc_objs(struct gve_rx_ring, cfg->qcfg_rx->max_queues);
if (!rx)
return -ENOMEM;
return -EINVAL;
}
- tx = kvzalloc_objs(struct gve_tx_ring, cfg->qcfg->max_queues,
- GFP_KERNEL);
+ tx = kvzalloc_objs(struct gve_tx_ring, cfg->qcfg->max_queues);
if (!tx)
return -ENOMEM;
return -EINVAL;
}
- tx = kvzalloc_objs(struct gve_tx_ring, cfg->qcfg->max_queues,
- GFP_KERNEL);
+ tx = kvzalloc_objs(struct gve_tx_ring, cfg->qcfg->max_queues);
if (!tx)
return -ENOMEM;
assert(ring->next_to_use == 0);
assert(ring->next_to_clean == 0);
- ring->desc_cb = kzalloc_objs(ring->desc_cb[0], ring->desc_num,
- GFP_KERNEL);
+ ring->desc_cb = kzalloc_objs(ring->desc_cb[0], ring->desc_num);
if (!ring->desc_cb) {
ret = -ENOMEM;
goto out;
spin_lock_init(&cmdq->cmdq_lock);
- cmdq->cmd_infos = kzalloc_objs(*cmdq->cmd_infos, cmdq->wq.q_depth,
- GFP_KERNEL);
+ cmdq->cmd_infos = kzalloc_objs(*cmdq->cmd_infos, cmdq->wq.q_depth);
if (!cmdq->cmd_infos) {
err = -ENOMEM;
return err;
goto err_free_txqs_res_arr;
}
- q_params->irq_cfg = kzalloc_objs(*q_params->irq_cfg, q_params->num_qps,
- GFP_KERNEL);
+ q_params->irq_cfg = kzalloc_objs(*q_params->irq_cfg, q_params->num_qps);
if (!q_params->irq_cfg) {
err = -ENOMEM;
goto err_free_rxqs_res_arr;
u32 pg_idx;
int err;
- qpages->pages = kzalloc_objs(qpages->pages[0], qpages->num_pages,
- GFP_KERNEL);
+ qpages->pages = kzalloc_objs(qpages->pages[0], qpages->num_pages);
if (!qpages->pages)
return -ENOMEM;
for (idx = 0; idx < num_rq; idx++) {
rqres = &rxqs_res[idx];
- rqres->rx_info = kzalloc_objs(*rqres->rx_info, rq_depth,
- GFP_KERNEL);
+ rqres->rx_info = kzalloc_objs(*rqres->rx_info, rq_depth);
if (!rqres->rx_info)
goto err_free_rqres;
for (idx = 0; idx < num_sq; idx++) {
tqres = &txqs_res[idx];
- tqres->tx_info = kzalloc_objs(*tqres->tx_info, sq_depth,
- GFP_KERNEL);
+ tqres->tx_info = kzalloc_objs(*tqres->tx_info, sq_depth);
if (!tqres->tx_info)
goto err_free_tqres;
/* Allocate/populate the pools. */
release_rx_pools(adapter);
- adapter->rx_pool = kzalloc_objs(struct ibmvnic_rx_pool, num_pools,
- GFP_KERNEL);
+ adapter->rx_pool = kzalloc_objs(struct ibmvnic_rx_pool, num_pools);
if (!adapter->rx_pool) {
dev_err(dev, "Failed to allocate rx pools\n");
return -ENOMEM;
{
int i;
- tx_pool->tx_buff = kzalloc_objs(struct ibmvnic_tx_buff, pool_size,
- GFP_KERNEL);
+ tx_pool->tx_buff = kzalloc_objs(struct ibmvnic_tx_buff, pool_size);
if (!tx_pool->tx_buff)
return -ENOMEM;
pool_size = adapter->req_tx_entries_per_subcrq;
num_pools = adapter->num_active_tx_scrqs;
- adapter->tx_pool = kzalloc_objs(struct ibmvnic_tx_pool, num_pools,
- GFP_KERNEL);
+ adapter->tx_pool = kzalloc_objs(struct ibmvnic_tx_pool, num_pools);
if (!adapter->tx_pool)
return -ENOMEM;
- adapter->tso_pool = kzalloc_objs(struct ibmvnic_tx_pool, num_pools,
- GFP_KERNEL);
+ adapter->tso_pool = kzalloc_objs(struct ibmvnic_tx_pool, num_pools);
/* To simplify release_tx_pools() ensure that ->tx_pool and
* ->tso_pool are either both NULL or both non-NULL.
*/
{
int i;
- adapter->napi = kzalloc_objs(struct napi_struct, adapter->req_rx_queues,
- GFP_KERNEL);
+ adapter->napi = kzalloc_objs(struct napi_struct, adapter->req_rx_queues);
if (!adapter->napi)
return -ENOMEM;
rx_old = adapter->rx_ring;
err = -ENOMEM;
- txdr = kzalloc_objs(struct e1000_tx_ring, adapter->num_tx_queues,
- GFP_KERNEL);
+ txdr = kzalloc_objs(struct e1000_tx_ring, adapter->num_tx_queues);
if (!txdr)
goto err_alloc_tx;
- rxdr = kzalloc_objs(struct e1000_rx_ring, adapter->num_rx_queues,
- GFP_KERNEL);
+ rxdr = kzalloc_objs(struct e1000_rx_ring, adapter->num_rx_queues);
if (!rxdr)
goto err_alloc_rx;
if (!txdr->count)
txdr->count = E1000_DEFAULT_TXD;
- txdr->buffer_info = kzalloc_objs(struct e1000_tx_buffer, txdr->count,
- GFP_KERNEL);
+ txdr->buffer_info = kzalloc_objs(struct e1000_tx_buffer, txdr->count);
if (!txdr->buffer_info) {
ret_val = 1;
goto err_nomem;
if (!rxdr->count)
rxdr->count = E1000_DEFAULT_RXD;
- rxdr->buffer_info = kzalloc_objs(struct e1000_rx_buffer, rxdr->count,
- GFP_KERNEL);
+ rxdr->buffer_info = kzalloc_objs(struct e1000_rx_buffer, rxdr->count);
if (!rxdr->buffer_info) {
ret_val = 5;
goto err_nomem;
if (!tx_ring->count)
tx_ring->count = E1000_DEFAULT_TXD;
- tx_ring->buffer_info = kzalloc_objs(struct e1000_buffer, tx_ring->count,
- GFP_KERNEL);
+ tx_ring->buffer_info = kzalloc_objs(struct e1000_buffer, tx_ring->count);
if (!tx_ring->buffer_info) {
ret_val = 1;
goto err_nomem;
if (!rx_ring->count)
rx_ring->count = E1000_DEFAULT_RXD;
- rx_ring->buffer_info = kzalloc_objs(struct e1000_buffer, rx_ring->count,
- GFP_KERNEL);
+ rx_ring->buffer_info = kzalloc_objs(struct e1000_buffer, rx_ring->count);
if (!rx_ring->buffer_info) {
ret_val = 5;
goto err_nomem;
v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
/* A failure in MSI-X entry allocation is fatal. */
- interface->msix_entries = kzalloc_objs(struct msix_entry, v_budget,
- GFP_KERNEL);
+ interface->msix_entries = kzalloc_objs(struct msix_entry, v_budget);
if (!interface->msix_entries)
return -ENOMEM;
int i, ret;
u16 switch_id;
- bw_data = kzalloc_obj(struct i40e_aqc_query_port_ets_config_resp,
- GFP_KERNEL);
+ bw_data = kzalloc_obj(struct i40e_aqc_query_port_ets_config_resp);
if (!bw_data) {
ret = -ENOMEM;
goto command_write_done;
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0]->count, new_tx_count);
- tx_rings = kzalloc_objs(struct i40e_ring, tx_alloc_queue_pairs,
- GFP_KERNEL);
+ tx_rings = kzalloc_objs(struct i40e_ring, tx_alloc_queue_pairs);
if (!tx_rings) {
err = -ENOMEM;
goto done;
(int)(num_online_cpus()));
- adapter->tx_rings = kzalloc_objs(struct iavf_ring, num_active_queues,
- GFP_KERNEL);
+ adapter->tx_rings = kzalloc_objs(struct iavf_ring, num_active_queues);
if (!adapter->tx_rings)
goto err_out;
- adapter->rx_rings = kzalloc_objs(struct iavf_ring, num_active_queues,
- GFP_KERNEL);
+ adapter->rx_rings = kzalloc_objs(struct iavf_ring, num_active_queues);
if (!adapter->rx_rings)
goto err_out;
v_budget = min_t(int, pairs + NONQ_VECS,
(int)adapter->vf_res->max_vectors);
- adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget,
- GFP_KERNEL);
+ adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget);
if (!adapter->msix_entries) {
err = -ENOMEM;
goto out;
if (!vsi->arfs_fltr_cntrs)
return -ENOMEM;
- vsi->arfs_last_fltr_id = kzalloc_obj(*vsi->arfs_last_fltr_id,
- GFP_KERNEL);
+ vsi->arfs_last_fltr_id = kzalloc_obj(*vsi->arfs_last_fltr_id);
if (!vsi->arfs_last_fltr_id) {
kfree(vsi->arfs_fltr_cntrs);
vsi->arfs_fltr_cntrs = NULL;
if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
- arfs_fltr_list = kzalloc_objs(*arfs_fltr_list, ICE_MAX_ARFS_LIST,
- GFP_KERNEL);
+ arfs_fltr_list = kzalloc_objs(*arfs_fltr_list, ICE_MAX_ARFS_LIST);
if (!arfs_fltr_list)
return;
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
- mac_buf = kzalloc_objs(struct ice_aqc_manage_mac_read_resp, 2,
- GFP_KERNEL);
+ mac_buf = kzalloc_objs(struct ice_aqc_manage_mac_read_resp, 2);
if (!mac_buf) {
status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
return -ENOMEM;
vsi_stat->tx_ring_stats =
- kzalloc_objs(*vsi_stat->tx_ring_stats, vsi->alloc_txq,
- GFP_KERNEL);
+ kzalloc_objs(*vsi_stat->tx_ring_stats, vsi->alloc_txq);
if (!vsi_stat->tx_ring_stats)
goto err_alloc_tx;
vsi_stat->rx_ring_stats =
- kzalloc_objs(*vsi_stat->rx_ring_stats, vsi->alloc_rxq,
- GFP_KERNEL);
+ kzalloc_objs(*vsi_stat->rx_ring_stats, vsi->alloc_rxq);
if (!vsi_stat->rx_ring_stats)
goto err_alloc_rx;
if (ret)
goto unlock;
- coalesce = kzalloc_objs(struct ice_coalesce_stored, vsi->num_q_vectors,
- GFP_KERNEL);
+ coalesce = kzalloc_objs(struct ice_coalesce_stored, vsi->num_q_vectors);
if (!coalesce) {
ret = -ENOMEM;
goto decfg;
idpf_ctlq_init_rxq_bufs(cq);
} else {
/* Allocate the array of msg pointers for TX queues */
- cq->bi.tx_msg = kzalloc_objs(struct idpf_ctlq_msg *, qinfo->len,
- GFP_KERNEL);
+ cq->bi.tx_msg = kzalloc_objs(struct idpf_ctlq_msg *, qinfo->len);
if (!cq->bi.tx_msg) {
err = -ENOMEM;
goto init_dealloc_q_mem;
/* We'll be allocating the buffer info memory first, then we can
* allocate the mapped buffers for the event processing
*/
- cq->bi.rx_buff = kzalloc_objs(struct idpf_dma_mem *, cq->ring_size,
- GFP_KERNEL);
+ cq->bi.rx_buff = kzalloc_objs(struct idpf_dma_mem *, cq->ring_size);
if (!cq->bi.rx_buff)
return -ENOMEM;
struct idpf_dma_mem *bi;
int num = 1; /* number of idpf_dma_mem to be allocated */
- cq->bi.rx_buff[i] = kzalloc_objs(struct idpf_dma_mem, num,
- GFP_KERNEL);
+ cq->bi.rx_buff[i] = kzalloc_objs(struct idpf_dma_mem, num);
if (!cq->bi.rx_buff[i])
goto unwind_alloc_cq_bufs;
}
num_lan_vecs = actual_vecs - num_rdma_vecs;
- adapter->msix_entries = kzalloc_objs(struct msix_entry, num_lan_vecs,
- GFP_KERNEL);
+ adapter->msix_entries = kzalloc_objs(struct msix_entry, num_lan_vecs);
if (!adapter->msix_entries) {
err = -ENOMEM;
goto free_rdma_msix;
tx_q->buf_pool_size = U16_MAX;
else
tx_q->buf_pool_size = tx_q->desc_count;
- tx_q->tx_buf = kzalloc_objs(*tx_q->tx_buf, tx_q->buf_pool_size,
- GFP_KERNEL);
+ tx_q->tx_buf = kzalloc_objs(*tx_q->tx_buf, tx_q->buf_pool_size);
if (!tx_q->tx_buf)
return -ENOMEM;
{
bool split, flow_sch_en;
- rsrc->txq_grps = kzalloc_objs(*rsrc->txq_grps, rsrc->num_txq_grp,
- GFP_KERNEL);
+ rsrc->txq_grps = kzalloc_objs(*rsrc->txq_grps, rsrc->num_txq_grp);
if (!rsrc->txq_grps)
return -ENOMEM;
tx_qgrp->num_txq = num_txq;
for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
- tx_qgrp->txqs[j] = kzalloc_obj(*tx_qgrp->txqs[j],
- GFP_KERNEL);
+ tx_qgrp->txqs[j] = kzalloc_obj(*tx_qgrp->txqs[j]);
if (!tx_qgrp->txqs[j])
goto err_alloc;
}
bool hs, rsc;
int err = 0;
- rsrc->rxq_grps = kzalloc_objs(struct idpf_rxq_group, rsrc->num_rxq_grp,
- GFP_KERNEL);
+ rsrc->rxq_grps = kzalloc_objs(struct idpf_rxq_group, rsrc->num_rxq_grp);
if (!rsrc->rxq_grps)
return -ENOMEM;
if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
rx_qgrp->singleq.num_rxq = num_rxq;
for (unsigned int j = 0; j < num_rxq; j++) {
- rx_qgrp->singleq.rxqs[j] = kzalloc_obj(*rx_qgrp->singleq.rxqs[j],
- GFP_KERNEL);
+ rx_qgrp->singleq.rxqs[j] = kzalloc_obj(*rx_qgrp->singleq.rxqs[j]);
if (!rx_qgrp->singleq.rxqs[j]) {
err = -ENOMEM;
goto err_alloc;
q_vector->rx_intr_mode = q_coal->rx_intr_mode;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- q_vector->tx = kzalloc_objs(*q_vector->tx, txqs_per_vector,
- GFP_KERNEL);
+ q_vector->tx = kzalloc_objs(*q_vector->tx, txqs_per_vector);
if (!q_vector->tx)
goto error;
- q_vector->rx = kzalloc_objs(*q_vector->rx, rxqs_per_vector,
- GFP_KERNEL);
+ q_vector->rx = kzalloc_objs(*q_vector->rx, rxqs_per_vector);
if (!q_vector->rx)
goto error;
if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
- q_vector->bufq = kzalloc_objs(*q_vector->bufq, bufqs_per_vector,
- GFP_KERNEL);
+ q_vector->bufq = kzalloc_objs(*q_vector->bufq, bufqs_per_vector);
if (!q_vector->bufq)
goto error;
kfree(q_info->queue_chunks);
- q_info->queue_chunks = kzalloc_objs(*q_info->queue_chunks, num_chunks,
- GFP_KERNEL);
+ q_info->queue_chunks = kzalloc_objs(*q_info->queue_chunks, num_chunks);
if (!q_info->queue_chunks) {
q_info->num_chunks = 0;
return -ENOMEM;
u16 next_ptype_id = 0;
ssize_t reply_sz;
- singleq_pt_lkup = kzalloc_objs(*singleq_pt_lkup, IDPF_RX_MAX_BASE_PTYPE,
- GFP_KERNEL);
+ singleq_pt_lkup = kzalloc_objs(*singleq_pt_lkup, IDPF_RX_MAX_BASE_PTYPE);
if (!singleq_pt_lkup)
return -ENOMEM;
int err = 0;
if (!adapter->vcxn_mngr) {
- adapter->vcxn_mngr = kzalloc_obj(*adapter->vcxn_mngr,
- GFP_KERNEL);
+ adapter->vcxn_mngr = kzalloc_obj(*adapter->vcxn_mngr);
if (!adapter->vcxn_mngr) {
err = -ENOMEM;
goto init_failed;
pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
num_max_vports = idpf_get_max_vports(adapter);
adapter->max_vports = num_max_vports;
- adapter->vports = kzalloc_objs(*adapter->vports, num_max_vports,
- GFP_KERNEL);
+ adapter->vports = kzalloc_objs(*adapter->vports, num_max_vports);
if (!adapter->vports)
return -ENOMEM;
u32 temp_offset;
int reply_sz;
- recv_ptp_caps_msg = kzalloc_obj(struct virtchnl2_ptp_get_caps,
- GFP_KERNEL);
+ recv_ptp_caps_msg = kzalloc_obj(struct virtchnl2_ptp_get_caps);
if (!recv_ptp_caps_msg)
return -ENOMEM;
/* add 1 vector for link status interrupts */
numvecs++;
- adapter->msix_entries = kzalloc_objs(struct msix_entry, numvecs,
- GFP_KERNEL);
+ adapter->msix_entries = kzalloc_objs(struct msix_entry, numvecs);
if (!adapter->msix_entries)
return;
return -EINVAL;
if (!adapter->ixgbe_ieee_ets) {
- adapter->ixgbe_ieee_ets = kmalloc_obj(struct ieee_ets,
- GFP_KERNEL);
+ adapter->ixgbe_ieee_ets = kmalloc_obj(struct ieee_ets);
if (!adapter->ixgbe_ieee_ets)
return -ENOMEM;
return -EINVAL;
if (!adapter->ixgbe_ieee_pfc) {
- adapter->ixgbe_ieee_pfc = kmalloc_obj(struct ieee_pfc,
- GFP_KERNEL);
+ adapter->ixgbe_ieee_pfc = kmalloc_obj(struct ieee_pfc);
if (!adapter->ixgbe_ieee_pfc)
return -ENOMEM;
}
*/
vector_threshold = MIN_MSIX_COUNT;
- adapter->msix_entries = kzalloc_objs(struct msix_entry, vectors,
- GFP_KERNEL);
+ adapter->msix_entries = kzalloc_objs(struct msix_entry, vectors);
if (!adapter->msix_entries)
return -ENOMEM;
#endif /* IXGBE_FCOE */
/* initialize static ixgbe jump table entries */
- adapter->jump_tables[0] = kzalloc_obj(*adapter->jump_tables[0],
- GFP_KERNEL);
+ adapter->jump_tables[0] = kzalloc_obj(*adapter->jump_tables[0]);
if (!adapter->jump_tables[0])
return -ENOMEM;
adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
IXGBE_FLAG_VMDQ_ENABLED;
/* Allocate memory for per VF control structures */
- adapter->vfinfo = kzalloc_objs(struct vf_data_storage, num_vfs,
- GFP_KERNEL);
+ adapter->vfinfo = kzalloc_objs(struct vf_data_storage, num_vfs);
if (!adapter->vfinfo)
return -ENOMEM;
v_budget = min_t(int, v_budget, num_online_cpus());
v_budget += NON_Q_VECTORS;
- adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget,
- GFP_KERNEL);
+ adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget);
if (!adapter->msix_entries)
return -ENOMEM;
/* allocate space for this first because if it fails then we don't
* need to unwind
*/
- fw_modules = kzalloc_objs(*fw_modules, LIBIE_NR_FW_LOG_MODULES,
- GFP_KERNEL);
+ fw_modules = kzalloc_objs(*fw_modules, LIBIE_NR_FW_LOG_MODULES);
if (!fw_modules)
return;
for (thread = 0; thread < port->priv->nthreads; thread++) {
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
txq_pcpu->size = txq->size;
- txq_pcpu->buffs = kmalloc_objs(*txq_pcpu->buffs, txq_pcpu->size,
- GFP_KERNEL);
+ txq_pcpu->buffs = kmalloc_objs(*txq_pcpu->buffs, txq_pcpu->size);
if (!txq_pcpu->buffs)
return -ENOMEM;
/* Generic interrupts apart from input/output queues */
num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
- oct->msix_entries = kzalloc_objs(struct msix_entry, num_msix,
- GFP_KERNEL);
+ oct->msix_entries = kzalloc_objs(struct msix_entry, num_msix);
if (!oct->msix_entries)
goto msix_alloc_err;
/* Generic interrupts apart from input/output queues */
//num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
num_msix = oct->num_oqs;
- oct->msix_entries = kzalloc_objs(struct msix_entry, num_msix,
- GFP_KERNEL);
+ oct->msix_entries = kzalloc_objs(struct msix_entry, num_msix);
if (!oct->msix_entries)
goto msix_alloc_err;
/* CQ size of SQ */
qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
- qset->cq = kzalloc_objs(struct otx2_cq_queue, pf->qset.cq_cnt,
- GFP_KERNEL);
+ qset->cq = kzalloc_objs(struct otx2_cq_queue, pf->qset.cq_cnt);
if (!qset->cq)
goto err_free_mem;
if (!qset->sq)
goto err_free_mem;
- qset->rq = kzalloc_objs(struct otx2_rcv_queue, pf->hw.rx_queues,
- GFP_KERNEL);
+ qset->rq = kzalloc_objs(struct otx2_rcv_queue, pf->hw.rx_queues);
if (!qset->rq)
goto err_free_mem;
if (err)
goto err_block;
- block->stats = kzalloc_objs(*block->stats, block->num_counters,
- GFP_KERNEL);
+ block->stats = kzalloc_objs(*block->stats, block->num_counters);
if (!block->stats) {
err = -ENOMEM;
goto err_stats;
if (!sky2->tx_le)
goto nomem;
- sky2->tx_ring = kzalloc_objs(struct tx_ring_info, sky2->tx_ring_size,
- GFP_KERNEL);
+ sky2->tx_ring = kzalloc_objs(struct tx_ring_info, sky2->tx_ring_size);
if (!sky2->tx_ring)
goto nomem;
if (!sky2->rx_le)
goto nomem;
- sky2->rx_ring = kzalloc_objs(struct rx_ring_info, sky2->rx_pending,
- GFP_KERNEL);
+ sky2->rx_ring = kzalloc_objs(struct rx_ring_info, sky2->rx_pending);
if (!sky2->rx_ring)
goto nomem;
buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE);
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
- buf->page_list = kzalloc_objs(*buf->page_list, buf->nbufs,
- GFP_KERNEL);
+ buf->page_list = kzalloc_objs(*buf->page_list, buf->nbufs);
if (!buf->page_list)
return -ENOMEM;
struct mlx4_vf_admin_state *vf_admin;
priv->mfunc.master.slave_state =
- kzalloc_objs(struct mlx4_slave_state, dev->num_slaves,
- GFP_KERNEL);
+ kzalloc_objs(struct mlx4_slave_state, dev->num_slaves);
if (!priv->mfunc.master.slave_state)
goto err_comm;
goto err_comm_admin;
priv->mfunc.master.vf_oper =
- kzalloc_objs(struct mlx4_vf_oper_state, dev->num_slaves,
- GFP_KERNEL);
+ kzalloc_objs(struct mlx4_vf_oper_state, dev->num_slaves);
if (!priv->mfunc.master.vf_oper)
goto err_comm_oper;
struct mlx4_vport_state *oper_vport;
s_state->vlan_filter[port] =
- kzalloc_obj(struct mlx4_vlan_fltr,
- GFP_KERNEL);
+ kzalloc_obj(struct mlx4_vlan_fltr);
if (!s_state->vlan_filter[port]) {
if (--port)
kfree(s_state->vlan_filter[port]);
if (!dst->tx_ring[t])
goto err_free_tx;
- dst->tx_cq[t] = kzalloc_objs(struct mlx4_en_cq *, MAX_TX_RINGS,
- GFP_KERNEL);
+ dst->tx_cq[t] = kzalloc_objs(struct mlx4_en_cq *, MAX_TX_RINGS);
if (!dst->tx_cq[t]) {
kfree(dst->tx_ring[t]);
goto err_free_tx;
err = -ENOMEM;
goto out;
}
- priv->tx_cq[t] = kzalloc_objs(struct mlx4_en_cq *, MAX_TX_RINGS,
- GFP_KERNEL);
+ priv->tx_cq[t] = kzalloc_objs(struct mlx4_en_cq *, MAX_TX_RINGS);
if (!priv->tx_cq[t]) {
err = -ENOMEM;
goto out;
int i, err = 0;
func_cap = kzalloc_obj(*func_cap);
- caps->spec_qps = kzalloc_objs(*caps->spec_qps, caps->num_ports,
- GFP_KERNEL);
+ caps->spec_qps = kzalloc_objs(*caps->spec_qps, caps->num_ports);
if (!func_cap || !caps->spec_qps) {
mlx4_err(dev, "Failed to allocate memory for special qps cap\n");
MLX4_MAX_NUM_VF);
if (reset_flow) {
- dev->dev_vfs = kzalloc_objs(*dev->dev_vfs, total_vfs,
- GFP_KERNEL);
+ dev->dev_vfs = kzalloc_objs(*dev->dev_vfs, total_vfs);
if (!dev->dev_vfs)
goto free_mem;
return dev_flags;
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[i];
- res_alloc->quota = kmalloc_objs(int, dev->persist->num_vfs + 1,
- GFP_KERNEL);
+ res_alloc->quota = kmalloc_objs(int, dev->persist->num_vfs + 1);
res_alloc->guaranteed = kmalloc_objs(int,
dev->persist->num_vfs + 1,
GFP_KERNEL);
GFP_KERNEL);
else
res_alloc->allocated =
- kzalloc_objs(int, dev->persist->num_vfs + 1,
- GFP_KERNEL);
+ kzalloc_objs(int, dev->persist->num_vfs + 1);
/* Reduce the sink counter */
if (i == RES_COUNTER)
res_alloc->res_free = dev->caps.max_counters - 1;
memset(data, 0, sizeof(*data));
- counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters,
- GFP_KERNEL);
+ counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters);
if (!counters_arr)
return -ENOMEM;
mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
slave);
- counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters,
- GFP_KERNEL);
+ counters_arr = kmalloc_objs(*counters_arr, dev->caps.max_counters);
if (!counters_arr)
return;
for (i = 0; i < chs->num; i++) {
struct mlx5e_txqsq **sqs;
- sqs = kvzalloc_objs(struct mlx5e_txqsq *, qos_sqs_size,
- GFP_KERNEL);
+ sqs = kvzalloc_objs(struct mlx5e_txqsq *, qos_sqs_size);
if (!sqs)
goto err_free;
static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
{
if (!xsk->pools) {
- xsk->pools = kzalloc_objs(*xsk->pools, MLX5E_MAX_NUM_CHANNELS,
- GFP_KERNEL);
+ xsk->pools = kzalloc_objs(*xsk->pools, MLX5E_MAX_NUM_CHANNELS);
if (unlikely(!xsk->pools))
return -ENOMEM;
}
if (!priv->channel_stats)
goto err_free_tx_rates;
- priv->fec_ranges = kzalloc_objs(*priv->fec_ranges, ETHTOOL_FEC_HIST_MAX,
- GFP_KERNEL);
+ priv->fec_ranges = kzalloc_objs(*priv->fec_ranges, ETHTOOL_FEC_HIST_MAX);
if (!priv->fec_ranges)
goto err_free_channel_stats;
if (err)
goto out;
- conn->qp.rq.bufs = kvzalloc_objs(conn->qp.rq.bufs[0], conn->qp.rq.size,
- GFP_KERNEL);
+ conn->qp.rq.bufs = kvzalloc_objs(conn->qp.rq.bufs[0], conn->qp.rq.size);
if (!conn->qp.rq.bufs) {
err = -ENOMEM;
goto err_wq;
}
- conn->qp.sq.bufs = kvzalloc_objs(conn->qp.sq.bufs[0], conn->qp.sq.size,
- GFP_KERNEL);
+ conn->qp.sq.bufs = kvzalloc_objs(conn->qp.sq.bufs[0], conn->qp.sq.size);
if (!conn->qp.sq.bufs) {
err = -ENOMEM;
goto err_rq_bufs;
return 0;
}
- table->rl_entry = kzalloc_objs(struct mlx5_rl_entry, table->max_size,
- GFP_KERNEL);
+ table->rl_entry = kzalloc_objs(struct mlx5_rl_entry, table->max_size);
if (!table->rl_entry)
return -ENOMEM;
;
at->num_actions = num_actions - 1;
- at->action_type_arr = kzalloc_objs(*action_type, num_actions,
- GFP_KERNEL);
+ at->action_type_arr = kzalloc_objs(*action_type, num_actions);
if (!at->action_type_arr)
goto free_at;
struct mlx5hws_matcher_attr attr = {0};
int i;
- bwc_matcher->rules = kzalloc_objs(*bwc_matcher->rules, bwc_queues,
- GFP_KERNEL);
+ bwc_matcher->rules = kzalloc_objs(*bwc_matcher->rules, bwc_queues);
if (!bwc_matcher->rules)
goto err;
int num_actions = 0;
int err;
- *ractions = kzalloc_objs(**ractions, MLX5_FLOW_CONTEXT_ACTION_MAX,
- GFP_KERNEL);
+ *ractions = kzalloc_objs(**ractions, MLX5_FLOW_CONTEXT_ACTION_MAX);
if (!*ractions) {
err = -ENOMEM;
goto out_err;
}
- fs_actions = kzalloc_objs(*fs_actions, MLX5_FLOW_CONTEXT_ACTION_MAX,
- GFP_KERNEL);
+ fs_actions = kzalloc_objs(*fs_actions, MLX5_FLOW_CONTEXT_ACTION_MAX);
if (!fs_actions) {
err = -ENOMEM;
goto free_actions_alloc;
}
- dest_actions = kzalloc_objs(*dest_actions, MLX5_FLOW_CONTEXT_ACTION_MAX,
- GFP_KERNEL);
+ dest_actions = kzalloc_objs(*dest_actions, MLX5_FLOW_CONTEXT_ACTION_MAX);
if (!dest_actions) {
err = -ENOMEM;
goto free_fs_actions_alloc;
matcher->size_of_at_array =
num_of_at + matcher->attr.max_num_of_at_attach;
- matcher->at = kvzalloc_objs(*matcher->at, matcher->size_of_at_array,
- GFP_KERNEL);
+ matcher->at = kvzalloc_objs(*matcher->at, matcher->size_of_at_array);
if (!matcher->at) {
mlx5hws_err(ctx, "Failed to allocate action template array\n");
ret = -ENOMEM;
if (err)
return err;
- ctx->send_queue = kzalloc_objs(*ctx->send_queue, ctx->queues,
- GFP_KERNEL);
+ ctx->send_queue = kzalloc_objs(*ctx->send_queue, ctx->queues);
if (!ctx->send_queue) {
err = -ENOMEM;
goto free_bwc_locks;
INIT_LIST_HEAD(&buddy->list_node);
- buddy->bitmap = kzalloc_objs(*buddy->bitmap, buddy->max_order + 1,
- GFP_KERNEL);
- buddy->num_free = kzalloc_objs(*buddy->num_free, buddy->max_order + 1,
- GFP_KERNEL);
+ buddy->bitmap = kzalloc_objs(*buddy->bitmap, buddy->max_order + 1);
+ buddy->num_free = kzalloc_objs(*buddy->num_free, buddy->max_order + 1);
if (!buddy->bitmap || !buddy->num_free)
goto err_free_all;
int num_of_entries =
mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
- buddy->ste_arr = kvzalloc_objs(struct mlx5dr_ste, num_of_entries,
- GFP_KERNEL);
+ buddy->ste_arr = kvzalloc_objs(struct mlx5dr_ste, num_of_entries);
if (!buddy->ste_arr)
return -ENOMEM;
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
- actions = kzalloc_objs(*actions, MLX5_FLOW_CONTEXT_ACTION_MAX,
- GFP_KERNEL);
+ actions = kzalloc_objs(*actions, MLX5_FLOW_CONTEXT_ACTION_MAX);
if (!actions) {
err = -ENOMEM;
goto out_err;
goto free_actions_alloc;
}
- term_actions = kzalloc_objs(*term_actions, MLX5_FLOW_CONTEXT_ACTION_MAX,
- GFP_KERNEL);
+ term_actions = kzalloc_objs(*term_actions, MLX5_FLOW_CONTEXT_ACTION_MAX);
if (!term_actions) {
err = -ENOMEM;
goto free_fs_dr_actions_alloc;
struct mlxsw_afk_key_info *key_info;
int err;
- key_info = kzalloc_flex(*key_info, blocks, mlxsw_afk->max_blocks,
- GFP_KERNEL);
+ key_info = kzalloc_flex(*key_info, blocks, mlxsw_afk->max_blocks);
if (!key_info)
return ERR_PTR(-ENOMEM);
err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage);
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
&num_of_slots);
- mlxsw_hwmon = kzalloc_flex(*mlxsw_hwmon, line_cards, num_of_slots + 1,
- GFP_KERNEL);
+ mlxsw_hwmon = kzalloc_flex(*mlxsw_hwmon, line_cards, num_of_slots + 1);
if (!mlxsw_hwmon)
return -ENOMEM;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
&num_of_slots);
- thermal = kzalloc_flex(*thermal, line_cards, num_of_slots + 1,
- GFP_KERNEL);
+ thermal = kzalloc_flex(*thermal, line_cards, num_of_slots + 1);
if (!thermal)
return -ENOMEM;
int i;
int err;
- mlxsw_pci->fw_area.items = kzalloc_objs(*mem_item, num_pages,
- GFP_KERNEL);
+ mlxsw_pci->fw_area.items = kzalloc_objs(*mem_item, num_pages);
if (!mlxsw_pci->fw_area.items)
return -ENOMEM;
mlxsw_pci->fw_area.count = num_pages;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
return -EIO;
max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
- trap = kzalloc_flex(*trap, policers_usage, BITS_TO_LONGS(max_policers),
- GFP_KERNEL);
+ trap = kzalloc_flex(*trap, policers_usage, BITS_TO_LONGS(max_policers));
if (!trap)
return -ENOMEM;
trap->max_policers = max_policers;
if (err)
return err;
- mlxsw_sp->lags = kzalloc_objs(struct mlxsw_sp_lag, mlxsw_sp->max_lag,
- GFP_KERNEL);
+ mlxsw_sp->lags = kzalloc_objs(struct mlxsw_sp_lag, mlxsw_sp->max_lag);
if (!mlxsw_sp->lags) {
err = -ENOMEM;
goto err_kcalloc;
* is 2^ACL_MAX_BF_LOG
*/
bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
- bf = kzalloc_flex(*bf, refcnt, size_mul(bf_bank_size, num_erp_banks),
- GFP_KERNEL);
+ bf = kzalloc_flex(*bf, refcnt, size_mul(bf_bank_size, num_erp_banks));
if (!bf)
return ERR_PTR(-ENOMEM);
int i;
int err;
- mlxsw_sp->sb->ports = kzalloc_objs(struct mlxsw_sp_sb_port, max_ports,
- GFP_KERNEL);
+ mlxsw_sp->sb->ports = kzalloc_objs(struct mlxsw_sp_sb_port, max_ports);
if (!mlxsw_sp->sb->ports)
return -ENOMEM;
static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- mlxsw_sp_port->dcb.ets = kzalloc_obj(*mlxsw_sp_port->dcb.ets,
- GFP_KERNEL);
+ mlxsw_sp_port->dcb.ets = kzalloc_obj(*mlxsw_sp_port->dcb.ets);
if (!mlxsw_sp_port->dcb.ets)
return -ENOMEM;
{
int i;
- mlxsw_sp_port->dcb.maxrate = kmalloc_obj(*mlxsw_sp_port->dcb.maxrate,
- GFP_KERNEL);
+ mlxsw_sp_port->dcb.maxrate = kmalloc_obj(*mlxsw_sp_port->dcb.maxrate);
if (!mlxsw_sp_port->dcb.maxrate)
return -ENOMEM;
static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- mlxsw_sp_port->dcb.pfc = kzalloc_obj(*mlxsw_sp_port->dcb.pfc,
- GFP_KERNEL);
+ mlxsw_sp_port->dcb.pfc = kzalloc_obj(*mlxsw_sp_port->dcb.pfc);
if (!mlxsw_sp_port->dcb.pfc)
return -ENOMEM;
struct mlxsw_sp_nve_mc_record *mc_record;
int err;
- mc_record = kzalloc_flex(*mc_record, entries, num_max_entries,
- GFP_KERNEL);
+ mc_record = kzalloc_flex(*mc_record, entries, num_max_entries);
if (!mc_record)
return ERR_PTR(-ENOMEM);
return -EIO;
max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
- mlxsw_sp->router->vrs = kzalloc_objs(struct mlxsw_sp_vr, max_vrs,
- GFP_KERNEL);
+ mlxsw_sp->router->vrs = kzalloc_objs(struct mlxsw_sp_vr, max_vrs);
if (!mlxsw_sp->router->vrs)
return -ENOMEM;
mlxsw_sp->router->max_rif_mac_profile =
MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
- mlxsw_sp->router->rifs = kzalloc_objs(struct mlxsw_sp_rif *, max_rifs,
- GFP_KERNEL);
+ mlxsw_sp->router->rifs = kzalloc_objs(struct mlxsw_sp_rif *, max_rifs);
if (!mlxsw_sp->router->rifs)
return -ENOMEM;
*/
static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
{
- desc_info->ring = kzalloc_objs(struct ksz_desc, desc_info->alloc,
- GFP_KERNEL);
+ desc_info->ring = kzalloc_objs(struct ksz_desc, desc_info->alloc);
if (!desc_info->ring)
return 1;
hw_init_desc(desc_info, transmit);
struct fdma *fdma = &tx->fdma;
int err;
- tx->dcbs_buf = kzalloc_objs(struct lan966x_tx_dcb_buf, fdma->n_dcbs,
- GFP_KERNEL);
+ tx->dcbs_buf = kzalloc_objs(struct lan966x_tx_dcb_buf, fdma->n_dcbs);
if (!tx->dcbs_buf)
return -ENOMEM;
}
sparx5->port_count = of_get_child_count(ports);
- configs = kzalloc_objs(struct initial_port_config, sparx5->port_count,
- GFP_KERNEL);
+ configs = kzalloc_objs(struct initial_port_config, sparx5->port_count);
if (!configs) {
err = -ENOMEM;
goto cleanup_pnode;
int err;
int i;
- apc->tx_qp = kzalloc_objs(struct mana_tx_qp, apc->num_queues,
- GFP_KERNEL);
+ apc->tx_qp = kzalloc_objs(struct mana_tx_qp, apc->num_queues);
if (!apc->tx_qp)
return -ENOMEM;
if (!apc->indir_table)
return -ENOMEM;
- apc->rxobj_table = kzalloc_objs(mana_handle_t, apc->indir_table_sz,
- GFP_KERNEL);
+ apc->rxobj_table = kzalloc_objs(mana_handle_t, apc->indir_table_sz);
if (!apc->rxobj_table) {
kfree(apc->indir_table);
return -ENOMEM;
* slices. We give up on MSI-X if we can only get a single
* vector. */
- mgp->msix_vectors = kzalloc_objs(*mgp->msix_vectors, mgp->num_slices,
- GFP_KERNEL);
+ mgp->msix_vectors = kzalloc_objs(*mgp->msix_vectors, mgp->num_slices);
if (mgp->msix_vectors == NULL)
goto no_msix;
for (i = 0; i < mgp->num_slices; i++) {
if (!cnt)
goto out;
- nfp_prog->map_records = kmalloc_objs(nfp_prog->map_records[0], cnt,
- GFP_KERNEL);
+ nfp_prog->map_records = kmalloc_objs(nfp_prog->map_records[0], cnt);
if (!nfp_prog->map_records) {
err = -ENOMEM;
goto out;
continue;
}
- acti_netdevs = kmalloc_objs(*acti_netdevs, entry->slave_cnt,
- GFP_KERNEL);
+ acti_netdevs = kmalloc_objs(*acti_netdevs, entry->slave_cnt);
if (!acti_netdevs) {
schedule_delayed_work(&lag->work,
NFP_FL_LAG_DELAY);
goto err_alloc;
}
- tx_ring->txbufs = kvzalloc_objs(*tx_ring->txbufs, tx_ring->cnt,
- GFP_KERNEL);
+ tx_ring->txbufs = kvzalloc_objs(*tx_ring->txbufs, tx_ring->cnt);
if (!tx_ring->txbufs)
goto err_alloc;
goto err_alloc;
}
- tx_ring->ktxbufs = kvzalloc_objs(*tx_ring->ktxbufs, tx_ring->cnt,
- GFP_KERNEL);
+ tx_ring->ktxbufs = kvzalloc_objs(*tx_ring->ktxbufs, tx_ring->cnt);
if (!tx_ring->ktxbufs)
goto err_alloc;
nn->dp.num_r_vecs, num_online_cpus());
nn->max_r_vecs = nn->dp.num_r_vecs;
- nn->dp.xsk_pools = kzalloc_objs(*nn->dp.xsk_pools, nn->max_r_vecs,
- GFP_KERNEL);
+ nn->dp.xsk_pools = kzalloc_objs(*nn->dp.xsk_pools, nn->max_r_vecs);
if (!nn->dp.xsk_pools) {
err = -ENOMEM;
goto err_free_nn;
wanted_irqs = 0;
list_for_each_entry(nn, &pf->vnics, vnic_list)
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
- pf->irq_entries = kzalloc_objs(*pf->irq_entries, wanted_irqs,
- GFP_KERNEL);
+ pf->irq_entries = kzalloc_objs(*pf->irq_entries, wanted_irqs);
if (!pf->irq_entries)
return -ENOMEM;
entry_sz = nfp_cpp_area_size(sb_desc_area) / num_entries;
- pf->shared_bufs = kmalloc_objs(pf->shared_bufs[0], num_entries,
- GFP_KERNEL);
+ pf->shared_bufs = kmalloc_objs(pf->shared_bufs[0], num_entries);
if (!pf->shared_bufs) {
err = -ENOMEM;
goto err_release_area;
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
- np->rx_skb = kzalloc_objs(struct nv_skb_map, np->rx_ring_size,
- GFP_KERNEL);
- np->tx_skb = kzalloc_objs(struct nv_skb_map, np->tx_ring_size,
- GFP_KERNEL);
+ np->rx_skb = kzalloc_objs(struct nv_skb_map, np->rx_ring_size);
+ np->tx_skb = kzalloc_objs(struct nv_skb_map, np->tx_ring_size);
if (!np->rx_skb || !np->tx_skb)
goto out_freering;
spin_lock_init(&ring->lock);
ring->size = RX_RING_SIZE;
- ring->ring_info = kzalloc_objs(struct pasemi_mac_buffer, RX_RING_SIZE,
- GFP_KERNEL);
+ ring->ring_info = kzalloc_objs(struct pasemi_mac_buffer, RX_RING_SIZE);
if (!ring->ring_info)
goto out_ring_info;
spin_lock_init(&ring->lock);
ring->size = TX_RING_SIZE;
- ring->ring_info = kzalloc_objs(struct pasemi_mac_buffer, TX_RING_SIZE,
- GFP_KERNEL);
+ ring->ring_info = kzalloc_objs(struct pasemi_mac_buffer, TX_RING_SIZE);
if (!ring->ring_info)
goto out_ring_info;
recv_ctx = &adapter->recv_ctx;
- rds_ring = kzalloc_objs(struct nx_host_rds_ring, adapter->max_rds_rings,
- GFP_KERNEL);
+ rds_ring = kzalloc_objs(struct nx_host_rds_ring, adapter->max_rds_rings);
if (rds_ring == NULL)
goto err_out;
p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_t2->dma_mem = kzalloc_objs(struct phys_mem_desc, p_t2->num_pages,
- GFP_KERNEL);
+ p_t2->dma_mem = kzalloc_objs(struct phys_mem_desc, p_t2->num_pages);
if (!p_t2->dma_mem) {
DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
rc = -ENOMEM;
int rc;
size = qed_cxt_ilt_shadow_size(clients);
- p_mngr->ilt_shadow = kzalloc_objs(struct phys_mem_desc, size,
- GFP_KERNEL);
+ p_mngr->ilt_shadow = kzalloc_objs(struct phys_mem_desc, size);
if (!p_mngr->ilt_shadow) {
rc = -ENOMEM;
goto ilt_shadow_fail;
/* Read number of formats and allocate memory for all formats */
meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
- meta->formats = kzalloc_objs(struct mcp_trace_format, meta->formats_num,
- GFP_KERNEL);
+ meta->formats = kzalloc_objs(struct mcp_trace_format, meta->formats_num);
if (!meta->formats)
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
if (!buf_size)
return NULL;
- allocated_mem = kzalloc_objs(struct phys_mem_desc, NUM_STORMS,
- GFP_KERNEL);
+ allocated_mem = kzalloc_objs(struct phys_mem_desc, NUM_STORMS);
if (!allocated_mem)
return NULL;
struct qede_fastpath *fp;
int i;
- edev->fp_array = kzalloc_objs(*edev->fp_array, QEDE_QUEUE_CNT(edev),
- GFP_KERNEL);
+ edev->fp_array = kzalloc_objs(*edev->fp_array, QEDE_QUEUE_CNT(edev));
if (!edev->fp_array) {
DP_NOTICE(edev, "fp array allocation failed\n");
goto err;
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq = kzalloc_objs(*fp->txq, edev->dev_info.num_tc,
- GFP_KERNEL);
+ fp->txq = kzalloc_objs(*fp->txq, edev->dev_info.num_tc);
if (!fp->txq)
goto err;
}
goto err;
if (edev->xdp_prog) {
- fp->xdp_tx = kzalloc_obj(*fp->xdp_tx,
- GFP_KERNEL);
+ fp->xdp_tx = kzalloc_obj(*fp->xdp_tx);
if (!fp->xdp_tx)
goto err;
fp->type |= QEDE_FASTPATH_XDP;
act_pci_func = ahw->total_nic_func;
- adapter->npars = kzalloc_objs(struct qlcnic_npar_info, act_pci_func,
- GFP_KERNEL);
+ adapter->npars = kzalloc_objs(struct qlcnic_npar_info, act_pci_func);
if (!adapter->npars) {
ret = -ENOMEM;
goto err_pci_info;
adapter->ahw->sriov = sriov;
sriov->num_vfs = num_vfs;
bc = &sriov->bc;
- sriov->vf_info = kzalloc_objs(struct qlcnic_vf_info, num_vfs,
- GFP_KERNEL);
+ sriov->vf_info = kzalloc_objs(struct qlcnic_vf_info, num_vfs);
if (!sriov->vf_info) {
err = -ENOMEM;
goto qlcnic_free_sriov;
goto error;
/* Allocate TX skb rings */
- priv->tx_skb[q] = kzalloc_objs(*priv->tx_skb[q], priv->num_tx_ring[q],
- GFP_KERNEL);
+ priv->tx_skb[q] = kzalloc_objs(*priv->tx_skb[q], priv->num_tx_ring[q]);
if (!priv->tx_skb[q])
goto error;
gq->ndev = ndev;
if (!dir_tx) {
- gq->rx_bufs = kzalloc_objs(*gq->rx_bufs, gq->ring_size,
- GFP_KERNEL);
+ gq->rx_bufs = kzalloc_objs(*gq->rx_bufs, gq->ring_size);
if (!gq->rx_bufs)
return -ENOMEM;
if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
gq->skbs = kzalloc_objs(*gq->skbs, gq->ring_size);
if (!gq->skbs)
return -ENOMEM;
- gq->unmap_addrs = kzalloc_objs(*gq->unmap_addrs, gq->ring_size,
- GFP_KERNEL);
+ gq->unmap_addrs = kzalloc_objs(*gq->unmap_addrs, gq->ring_size);
if (!gq->unmap_addrs)
goto out;
gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kzalloc_objs(*mdp->rx_skbuff, mdp->num_rx_ring,
- GFP_KERNEL);
+ mdp->rx_skbuff = kzalloc_objs(*mdp->rx_skbuff, mdp->num_rx_ring);
if (!mdp->rx_skbuff)
return -ENOMEM;
- mdp->tx_skbuff = kzalloc_objs(*mdp->tx_skbuff, mdp->num_tx_ring,
- GFP_KERNEL);
+ mdp->tx_skbuff = kzalloc_objs(*mdp->tx_skbuff, mdp->num_tx_ring);
if (!mdp->tx_skbuff)
goto ring_free;
if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
return -EINVAL;
- rocker->msix_entries = kmalloc_objs(struct msix_entry, msix_entries,
- GFP_KERNEL);
+ rocker->msix_entries = kmalloc_objs(struct msix_entry, msix_entries);
if (!rocker->msix_entries)
return -ENOMEM;
goto err_free_dma_rx;
}
- rx_ring->rx_skbuff = kmalloc_objs(struct sk_buff *, rx_rsize,
- GFP_KERNEL);
+ rx_ring->rx_skbuff = kmalloc_objs(struct sk_buff *, rx_rsize);
if (!rx_ring->rx_skbuff) {
ret = -ENOMEM;
goto err_free_skbuff_dma;
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
- rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size,
- GFP_KERNEL);
+ rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size);
if (!rx_queue->page_ring)
rx_queue->page_ptr_mask = 0;
else
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
- state->skbs = kzalloc_objs(state->skbs[0], state->packet_count,
- GFP_KERNEL);
+ state->skbs = kzalloc_objs(state->skbs[0], state->packet_count);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
rc = -ENOMEM;
goto fail;
}
- hwmon->group.attrs = kzalloc_objs(struct attribute *, n_attrs + 1,
- GFP_KERNEL);
+ hwmon->group.attrs = kzalloc_objs(struct attribute *, n_attrs + 1);
if (!hwmon->group.attrs) {
rc = -ENOMEM;
goto fail;
bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
- rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size,
- GFP_KERNEL);
+ rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size);
if (!rx_queue->page_ring)
rx_queue->page_ptr_mask = 0;
else
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
- state->skbs = kzalloc_objs(state->skbs[0], state->packet_count,
- GFP_KERNEL);
+ state->skbs = kzalloc_objs(state->skbs[0], state->packet_count);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
rc = -ENOMEM;
goto fail;
}
- hwmon->group.attrs = kzalloc_objs(struct attribute *, n_attrs + 1,
- GFP_KERNEL);
+ hwmon->group.attrs = kzalloc_objs(struct attribute *, n_attrs + 1);
if (!hwmon->group.attrs) {
rc = -ENOMEM;
goto fail;
bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
- rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size,
- GFP_KERNEL);
+ rx_queue->page_ring = kzalloc_objs(*rx_queue->page_ring, page_ring_size);
if (!rx_queue->page_ring)
rx_queue->page_ptr_mask = 0;
else
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
- state->skbs = kzalloc_objs(state->skbs[0], state->packet_count,
- GFP_KERNEL);
+ state->skbs = kzalloc_objs(state->skbs[0], state->packet_count);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
BUG_ON(!pd->tx_ring);
- pd->tx_buffers = kmalloc_objs(struct smsc9420_ring_info, TX_RING_SIZE,
- GFP_KERNEL);
+ pd->tx_buffers = kmalloc_objs(struct smsc9420_ring_info, TX_RING_SIZE);
if (!pd->tx_buffers)
return -ENOMEM;
BUG_ON(!pd->rx_ring);
- pd->rx_buffers = kmalloc_objs(struct smsc9420_ring_info, RX_RING_SIZE,
- GFP_KERNEL);
+ pd->rx_buffers = kmalloc_objs(struct smsc9420_ring_info, RX_RING_SIZE);
if (pd->rx_buffers == NULL)
goto out;
return ret;
}
- rx_q->buf_pool = kzalloc_objs(*rx_q->buf_pool, dma_conf->dma_rx_size,
- GFP_KERNEL);
+ rx_q->buf_pool = kzalloc_objs(*rx_q->buf_pool, dma_conf->dma_rx_size);
if (!rx_q->buf_pool)
return -ENOMEM;
if (!tx_q->tx_skbuff_dma)
return -ENOMEM;
- tx_q->tx_skbuff = kzalloc_objs(struct sk_buff *, dma_conf->dma_tx_size,
- GFP_KERNEL);
+ tx_q->tx_skbuff = kzalloc_objs(struct sk_buff *, dma_conf->dma_tx_size);
if (!tx_q->tx_skbuff)
return -ENOMEM;
int ret = -ENOMEM;
unsigned int i;
- channel_head = kzalloc_objs(struct xlgmac_channel, pdata->channel_count,
- GFP_KERNEL);
+ channel_head = kzalloc_objs(struct xlgmac_channel, pdata->channel_count);
if (!channel_head)
return ret;
netif_dbg(pdata, drv, pdata->netdev,
"channel_head=%p\n", channel_head);
- tx_ring = kzalloc_objs(struct xlgmac_ring, pdata->tx_ring_count,
- GFP_KERNEL);
+ tx_ring = kzalloc_objs(struct xlgmac_ring, pdata->tx_ring_count);
if (!tx_ring)
goto err_tx_ring;
- rx_ring = kzalloc_objs(struct xlgmac_ring, pdata->rx_ring_count,
- GFP_KERNEL);
+ rx_ring = kzalloc_objs(struct xlgmac_ring, pdata->rx_ring_count);
if (!rx_ring)
goto err_rx_ring;
pool->gen_pool->name = pool_name;
- pool->desc_infos = kzalloc_objs(*pool->desc_infos, pool->num_desc,
- GFP_KERNEL);
+ pool->desc_infos = kzalloc_objs(*pool->desc_infos, pool->num_desc);
if (!pool->desc_infos)
goto gen_pool_desc_infos_alloc_fail;
lp->tx_ring_head = 0;
lp->rx_ring_tail = 0;
lp->rx_ring_head = 0;
- lp->tx_skb_ring = kzalloc_objs(*lp->tx_skb_ring, TX_BD_NUM_MAX,
- GFP_KERNEL);
+ lp->tx_skb_ring = kzalloc_objs(*lp->tx_skb_ring, TX_BD_NUM_MAX);
if (!lp->tx_skb_ring) {
ret = -ENOMEM;
goto err_dma_release_rx;
lp->tx_skb_ring[i] = skbuf_dma;
}
- lp->rx_skb_ring = kzalloc_objs(*lp->rx_skb_ring, RX_BUF_NUM_DEFAULT,
- GFP_KERNEL);
+ lp->rx_skb_ring = kzalloc_objs(*lp->rx_skb_ring, RX_BUF_NUM_DEFAULT);
if (!lp->rx_skb_ring) {
ret = -ENOMEM;
goto err_free_tx_skb_ring;
data[i++] = xdp_tx;
}
- pcpu_sum = kvmalloc_objs(struct netvsc_ethtool_pcpu_stats, nr_cpu_ids,
- GFP_KERNEL);
+ pcpu_sum = kvmalloc_objs(struct netvsc_ethtool_pcpu_stats, nr_cpu_ids);
if (!pcpu_sum)
return;
&priv->spi->dev,
"Resetting MAC...\n");
- mlme_reset_wpc = kmalloc_obj(*mlme_reset_wpc,
- GFP_KERNEL);
+ mlme_reset_wpc = kmalloc_obj(*mlme_reset_wpc);
if (!mlme_reset_wpc)
goto finish;
INIT_WORK(
* modulo that number to determine the next one that's free.
* Transactions are allocated one at a time.
*/
- trans_info->trans = kzalloc_objs(*trans_info->trans, tre_count,
- GFP_KERNEL);
+ trans_info->trans = kzalloc_objs(*trans_info->trans, tre_count);
if (!trans_info->trans)
return -ENOMEM;
trans_info->free_id = 0; /* all modulo channel->tre_count */
s8 *dt_val;
u32 conf;
- dt_val = kmalloc_objs(*dt_val, miic->of_data->conf_conv_count,
- GFP_KERNEL);
+ dt_val = kmalloc_objs(*dt_val, miic->of_data->conf_conv_count);
if (!dt_val)
return -ENOMEM;
if (!serial->tiocmget)
goto exit;
serial->tiocmget->serial_state_notification
- = kzalloc_obj(struct hso_serial_state_notification,
- GFP_KERNEL);
+ = kzalloc_obj(struct hso_serial_state_notification);
if (!serial->tiocmget->serial_state_notification)
goto exit;
tiocmget = serial->tiocmget;
return ret;
}
- dev->data[0] = (unsigned long) kzalloc_obj(struct smsc75xx_priv,
- GFP_KERNEL);
+ dev->data[0] = (unsigned long) kzalloc_obj(struct smsc75xx_priv);
pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (!pdata)
goto free_tx_bd;
}
- priv->rx_skbuff = kzalloc_objs(*priv->rx_skbuff, priv->rx_ring_size,
- GFP_KERNEL);
+ priv->rx_skbuff = kzalloc_objs(*priv->rx_skbuff, priv->rx_ring_size);
if (!priv->rx_skbuff) {
ret = -ENOMEM;
goto free_ucc_pram;
}
- priv->tx_skbuff = kzalloc_objs(*priv->tx_skbuff, priv->tx_ring_size,
- GFP_KERNEL);
+ priv->tx_skbuff = kzalloc_objs(*priv->tx_skbuff, priv->tx_ring_size);
if (!priv->tx_skbuff) {
ret = -ENOMEM;
goto free_rx_skbuff;
nentries = roundup_pow_of_two(nentries);
- src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries,
- GFP_KERNEL);
+ src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries);
if (src_ring == NULL)
return ERR_PTR(-ENOMEM);
nentries = roundup_pow_of_two(nentries);
- src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries,
- GFP_KERNEL);
+ src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries);
if (!src_ring)
return ERR_PTR(-ENOMEM);
nentries = roundup_pow_of_two(attr->dest_nentries);
- dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries,
- GFP_KERNEL);
+ dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries);
if (dest_ring == NULL)
return ERR_PTR(-ENOMEM);
nentries = roundup_pow_of_two(attr->dest_nentries);
- dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries,
- GFP_KERNEL);
+ dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries);
if (!dest_ring)
return ERR_PTR(-ENOMEM);
}
if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
- arsta->tx_stats = kzalloc_obj(*arsta->tx_stats,
- GFP_KERNEL);
+ arsta->tx_stats = kzalloc_obj(*arsta->tx_stats);
if (!arsta->tx_stats) {
ath10k_mac_dec_num_stations(arvif, sta);
ret = -ENOMEM;
if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
- dp->spt_info = kzalloc_objs(struct ath12k_spt_info, dp->num_spt_pages,
- GFP_KERNEL);
+ dp->spt_info = kzalloc_objs(struct ath12k_spt_info, dp->num_spt_pages);
if (!dp->spt_info) {
ath12k_warn(ab, "SPT page allocation failure");
u8 link_id)
{
if (!ahvif->cache[link_id]) {
- ahvif->cache[link_id] = kzalloc_obj(*ahvif->cache[0],
- GFP_KERNEL);
+ ahvif->cache[link_id] = kzalloc_obj(*ahvif->cache[0]);
if (ahvif->cache[link_id])
INIT_LIST_HEAD(&ahvif->cache[link_id]->key_conf.list);
}
if (ar->ab->hw_params->single_pdev_only)
n_combinations = 2;
- combinations = kzalloc_objs(*combinations, n_combinations,
- GFP_KERNEL);
+ combinations = kzalloc_objs(*combinations, n_combinations);
if (!combinations)
return -ENOMEM;
if (!pd->pd_step)
goto err_out;
- pd->pd_pwr = kzalloc_objs(s16, AR5K_EEPROM_N_PWR_POINTS_5111,
- GFP_KERNEL);
+ pd->pd_pwr = kzalloc_objs(s16, AR5K_EEPROM_N_PWR_POINTS_5111);
if (!pd->pd_pwr)
goto err_out;
if (!pd->pd_step)
goto err_out;
- pd->pd_pwr = kzalloc_objs(s16, pd->pd_points,
- GFP_KERNEL);
+ pd->pd_pwr = kzalloc_objs(s16, pd->pd_points);
if (!pd->pd_pwr)
goto err_out;
if (!pd->pd_step)
goto err_out;
- pd->pd_pwr = kzalloc_objs(s16, pd->pd_points,
- GFP_KERNEL);
+ pd->pd_pwr = kzalloc_objs(s16, pd->pd_points);
if (!pd->pd_pwr)
goto err_out;
if (!pd->pd_step)
goto err_out;
- pd->pd_pwr = kzalloc_objs(s16, pd->pd_points,
- GFP_KERNEL);
+ pd->pd_pwr = kzalloc_objs(s16, pd->pd_points);
if (!pd->pd_pwr)
goto err_out;
return -EINVAL;
}
- wil->brd_info = kzalloc_objs(struct wil_brd_info, max_num_ent,
- GFP_KERNEL);
+ wil->brd_info = kzalloc_objs(struct wil_brd_info, max_num_ent);
if (!wil->brd_info)
return -ENOMEM;
num_descriptors, descriptor_size);
/* allocate descriptors info list in pmc context*/
- pmc->descriptors = kzalloc_objs(struct desc_alloc_info, num_descriptors,
- GFP_KERNEL);
+ pmc->descriptors = kzalloc_objs(struct desc_alloc_info, num_descriptors);
if (!pmc->descriptors) {
wil_err(wil, "ERROR allocating pmc skb list\n");
goto no_release_err;
struct list_head *free = &wil->rx_buff_mgmt.free;
int i;
- wil->rx_buff_mgmt.buff_arr = kzalloc_objs(struct wil_rx_buff, size + 1,
- GFP_KERNEL);
+ wil->rx_buff_mgmt.buff_arr = kzalloc_objs(struct wil_rx_buff, size + 1);
if (!wil->rx_buff_mgmt.buff_arr)
return -ENOMEM;
}
e->dev = dev;
log = &e->txstatlog;
- log->log = kzalloc_objs(struct b43_txstatus, B43_NR_LOGGED_TXSTATUS,
- GFP_KERNEL);
+ log->log = kzalloc_objs(struct b43_txstatus, B43_NR_LOGGED_TXSTATUS);
if (!log->log) {
b43err(dev->wl, "debugfs: add device txstatus OOM\n");
kfree(e);
if (for_tx)
ring->nr_slots = B43_TXRING_SLOTS;
- ring->meta = kzalloc_objs(struct b43_dmadesc_meta, ring->nr_slots,
- GFP_KERNEL);
+ ring->meta = kzalloc_objs(struct b43_dmadesc_meta, ring->nr_slots);
if (!ring->meta)
goto err_kfree_ring;
for (i = 0; i < ring->nr_slots; i++)
if (for_tx)
nr_slots = B43legacy_TXRING_SLOTS;
- ring->meta = kzalloc_objs(struct b43legacy_dmadesc_meta, nr_slots,
- GFP_KERNEL);
+ ring->meta = kzalloc_objs(struct b43legacy_dmadesc_meta, nr_slots);
if (!ring->meta)
goto err_kfree_ring;
if (for_tx) {
bus->msgbuf->commonrings[i] =
&devinfo->shared.commonrings[i]->commonring;
- flowrings = kzalloc_objs(*flowrings, devinfo->shared.max_flowrings,
- GFP_KERNEL);
+ flowrings = kzalloc_objs(*flowrings, devinfo->shared.max_flowrings);
if (!flowrings)
goto fail;
dma_addr_t p;
priv->msg_buffers =
- kmalloc_objs(struct ipw2100_tx_packet, IPW_COMMAND_POOL_SIZE,
- GFP_KERNEL);
+ kmalloc_objs(struct ipw2100_tx_packet, IPW_COMMAND_POOL_SIZE);
if (!priv->msg_buffers)
return -ENOMEM;
int i, j;
for (i = 0; i < MAX_NETWORK_COUNT; i++) {
- ieee->networks[i] = kzalloc_obj(struct libipw_network,
- GFP_KERNEL);
+ ieee->networks[i] = kzalloc_obj(struct libipw_network);
if (!ieee->networks[i]) {
LIBIPW_ERROR("Out of memory allocating beacons\n");
for (j = 0; j < i; j++)
/* Driver ilate data, only for Tx (not command) queues,
* not shared with device. */
if (id != il->cmd_queue) {
- txq->skbs = kzalloc_objs(struct sk_buff *, TFD_QUEUE_SIZE_MAX,
- GFP_KERNEL);
+ txq->skbs = kzalloc_objs(struct sk_buff *, TFD_QUEUE_SIZE_MAX);
if (!txq->skbs) {
IL_ERR("Fail to alloc skbs\n");
goto error;
}
channels =
- kzalloc_objs(struct ieee80211_channel, il->channel_count,
- GFP_KERNEL);
+ kzalloc_objs(struct ieee80211_channel, il->channel_count);
if (!channels)
return -ENOMEM;
{
if (!il->txq)
il->txq =
- kzalloc_objs(struct il_tx_queue, il->cfg->num_of_queues,
- GFP_KERNEL);
+ kzalloc_objs(struct il_tx_queue, il->cfg->num_of_queues);
if (!il->txq) {
IL_ERR("Not enough memory for txq\n");
return -ENOMEM;
struct iwl_fw_dump_desc *iwl_dump_error_desc;
int ret;
- iwl_dump_error_desc = kmalloc_obj(*iwl_dump_error_desc,
- GFP_KERNEL);
+ iwl_dump_error_desc = kmalloc_obj(*iwl_dump_error_desc);
if (!iwl_dump_error_desc)
return -ENOMEM;
u8 tx_chains = fw->valid_rx_ant;
if (cfg->uhb_supported)
- data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_UHB,
- GFP_KERNEL);
+ data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_UHB);
else
- data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_EXT,
- GFP_KERNEL);
+ data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_EXT);
if (!data)
return NULL;
const __le16 *ch_section;
if (cfg->uhb_supported)
- data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_UHB,
- GFP_KERNEL);
+ data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_UHB);
else if (cfg->nvm_type != IWL_NVM_EXT)
- data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS,
- GFP_KERNEL);
+ data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS);
else
- data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_EXT,
- GFP_KERNEL);
+ data = kzalloc_flex(*data, channels, IWL_NVM_NUM_CHANNELS_EXT);
if (!data)
return NULL;
goto out;
}
n_matches = hweight_long(matched_profiles);
- netdetect_info = kzalloc_flex(*netdetect_info, matches, n_matches,
- GFP_KERNEL);
+ netdetect_info = kzalloc_flex(*netdetect_info, matches, n_matches);
if (netdetect_info)
iwl_mld_set_netdetect_info(mld, netdetect_cfg, netdetect_info,
resume_data->netdetect_res,
iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
- resume_data.wowlan_status = kzalloc_obj(*resume_data.wowlan_status,
- GFP_KERNEL);
+ resume_data.wowlan_status = kzalloc_obj(*resume_data.wowlan_status);
if (!resume_data.wowlan_status)
return -ENOMEM;
if (mld->fw_status.in_hw_restart)
return 0;
- dup_data = kzalloc_objs(*dup_data, mld->trans->info.num_rxqs,
- GFP_KERNEL);
+ dup_data = kzalloc_objs(*dup_data, mld->trans->info.num_rxqs);
if (!dup_data)
return -ENOMEM;
static int iwl_mld_init_time_sync(struct iwl_mld *mld, u32 protocols,
const u8 *addr)
{
- struct iwl_mld_time_sync_data *time_sync = kzalloc_obj(*time_sync,
- GFP_KERNEL);
+ struct iwl_mld_time_sync_data *time_sync = kzalloc_obj(*time_sync);
if (!time_sync)
return -ENOMEM;
if (iwl_mvm_has_new_rx_api(mvm)) {
int q;
- dup_data = kzalloc_objs(*dup_data, mvm->trans->info.num_rxqs,
- GFP_KERNEL);
+ dup_data = kzalloc_objs(*dup_data, mvm->trans->info.num_rxqs);
if (!dup_data)
return -ENOMEM;
/*
if (WARN_ON(trans_pcie->rxq))
return -EINVAL;
- trans_pcie->rxq = kzalloc_objs(struct iwl_rxq, trans->info.num_rxqs,
- GFP_KERNEL);
+ trans_pcie->rxq = kzalloc_objs(struct iwl_rxq, trans->info.num_rxqs);
trans_pcie->rx_pool = kzalloc_objs(trans_pcie->rx_pool[0],
RX_POOL_SIZE(trans_pcie->num_rx_bufs),
GFP_KERNEL);
txq->n_window = slots_num;
- txq->entries = kzalloc_objs(struct iwl_pcie_txq_entry, slots_num,
- GFP_KERNEL);
+ txq->entries = kzalloc_objs(struct iwl_pcie_txq_entry, slots_num);
if (!txq->entries)
goto error;
goto free;
}
priv->chan_num = max_channel_num;
- priv->survey = kzalloc_objs(struct survey_info, max_channel_num,
- GFP_KERNEL);
+ priv->survey = kzalloc_objs(struct survey_info, max_channel_num);
if (!priv->survey) {
ret = -ENOMEM;
goto free;
}
list->max_entries = max_channel_num;
- list->channels = kzalloc_objs(struct p54_channel_entry, max_channel_num,
- GFP_KERNEL);
+ list->channels = kzalloc_objs(struct p54_channel_entry, max_channel_num);
if (!list->channels) {
ret = -ENOMEM;
goto free;
adapter->scan_processing = true;
spin_unlock_bh(&adapter->mwifiex_cmd_lock);
- scan_cfg_out = kzalloc_obj(union mwifiex_scan_cmd_config_tlv,
- GFP_KERNEL);
+ scan_cfg_out = kzalloc_obj(union mwifiex_scan_cmd_config_tlv);
if (!scan_cfg_out) {
ret = -ENOMEM;
goto done;
static int wilc_wfi_cfg_allocate_wpa_entry(struct wilc_priv *priv, u8 idx)
{
if (!priv->wilc_gtk[idx]) {
- priv->wilc_gtk[idx] = kzalloc_obj(*priv->wilc_gtk[idx],
- GFP_KERNEL);
+ priv->wilc_gtk[idx] = kzalloc_obj(*priv->wilc_gtk[idx]);
if (!priv->wilc_gtk[idx])
return -ENOMEM;
}
if (!priv->wilc_ptk[idx]) {
- priv->wilc_ptk[idx] = kzalloc_obj(*priv->wilc_ptk[idx],
- GFP_KERNEL);
+ priv->wilc_ptk[idx] = kzalloc_obj(*priv->wilc_ptk[idx]);
if (!priv->wilc_ptk[idx])
return -ENOMEM;
}
{
idx -= 4;
if (!priv->wilc_igtk[idx]) {
- priv->wilc_igtk[idx] = kzalloc_obj(*priv->wilc_igtk[idx],
- GFP_KERNEL);
+ priv->wilc_igtk[idx] = kzalloc_obj(*priv->wilc_igtk[idx]);
if (!priv->wilc_igtk[idx])
return -ENOMEM;
}
if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
return -E2BIG;
- mac->rd = kzalloc_flex(*mac->rd, reg_rules, resp->n_reg_rules,
- GFP_KERNEL);
+ mac->rd = kzalloc_flex(*mac->rd, reg_rules, resp->n_reg_rules);
if (!mac->rd)
return -ENOMEM;
return -EINVAL;
}
- limits = kzalloc_objs(*limits, rec->n_limits,
- GFP_KERNEL);
+ limits = kzalloc_objs(*limits, rec->n_limits);
if (!limits)
return -ENOMEM;
if (band->n_iftype_data == 0)
return 0;
- iftype_data = kzalloc_objs(*iftype_data, band->n_iftype_data,
- GFP_KERNEL);
+ iftype_data = kzalloc_objs(*iftype_data, band->n_iftype_data);
if (!iftype_data) {
band->n_iftype_data = 0;
return -ENOMEM;
return 0;
if (!band->channels)
- band->channels = kzalloc_objs(*chan, band->n_channels,
- GFP_KERNEL);
+ band->channels = kzalloc_objs(*chan, band->n_channels);
if (!band->channels) {
band->n_channels = 0;
return -ENOMEM;
return -ENOMEM;
rt2x00dev->chan_survey =
- kzalloc_objs(struct rt2x00_chan_survey, spec->num_channels,
- GFP_KERNEL);
+ kzalloc_objs(struct rt2x00_chan_survey, spec->num_channels);
if (!rt2x00dev->chan_survey) {
kfree(info);
return -ENOMEM;
rtlpriv->curveindex_5g = kcalloc(TARGET_CHNL_NUM_5G,
sizeof(*rtlpriv->curveindex_5g),
GFP_KERNEL);
- rtlpriv->mutex_for_power_on_off = kzalloc_obj(*rtlpriv->mutex_for_power_on_off,
- GFP_KERNEL);
- rtlpriv->mutex_for_hw_init = kzalloc_obj(*rtlpriv->mutex_for_hw_init,
- GFP_KERNEL);
+ rtlpriv->mutex_for_power_on_off = kzalloc_obj(*rtlpriv->mutex_for_power_on_off);
+ rtlpriv->mutex_for_hw_init = kzalloc_obj(*rtlpriv->mutex_for_hw_init);
if (!rtlpriv->curveindex_2g || !rtlpriv->curveindex_5g ||
!rtlpriv->mutex_for_power_on_off || !rtlpriv->mutex_for_hw_init) {
for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
skb_queue_head_init(&rtwsdio->tx_queue[i]);
- rtwsdio->tx_handler_data = kmalloc_obj(*rtwsdio->tx_handler_data,
- GFP_KERNEL);
+ rtwsdio->tx_handler_data = kmalloc_obj(*rtwsdio->tx_handler_data);
if (!rtwsdio->tx_handler_data)
goto err_destroy_wq;
if (ret)
return ret;
- rtwusb->vendor_req_buf = kmalloc_obj(*rtwusb->vendor_req_buf,
- GFP_KERNEL);
+ rtwusb->vendor_req_buf = kmalloc_obj(*rtwusb->vendor_req_buf);
if (!rtwusb->vendor_req_buf)
return -ENOMEM;
int cw1200_debug_init(struct cw1200_common *priv)
{
int ret = -ENOMEM;
- struct cw1200_debug_priv *d = kzalloc_obj(struct cw1200_debug_priv,
- GFP_KERNEL);
+ struct cw1200_debug_priv *d = kzalloc_obj(struct cw1200_debug_priv);
priv->debug = d;
if (!d)
return ret;
spin_lock_init(&queue->lock);
timer_setup(&queue->gc, cw1200_queue_gc, 0);
- queue->pool = kzalloc_objs(struct cw1200_queue_item, capacity,
- GFP_KERNEL);
+ queue->pool = kzalloc_objs(struct cw1200_queue_item, capacity);
if (!queue->pool)
return -ENOMEM;
- queue->link_map_cache = kzalloc_objs(int, stats->map_capacity,
- GFP_KERNEL);
+ queue->link_map_cache = kzalloc_objs(int, stats->map_capacity);
if (!queue->link_map_cache) {
kfree(queue->pool);
queue->pool = NULL;
scan.type = WSM_SCAN_TYPE_BACKGROUND;
scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
}
- scan.ch = kzalloc_objs(struct wsm_scan_ch, it - priv->scan.curr,
- GFP_KERNEL);
+ scan.ch = kzalloc_objs(struct wsm_scan_ch, it - priv->scan.curr);
if (!scan.ch) {
priv->scan.status = -ENOMEM;
goto fail;
int ret;
/* asking for the data path parameters */
- wl->data_path = kzalloc_obj(struct acx_data_path_params_resp,
- GFP_KERNEL);
+ wl->data_path = kzalloc_obj(struct acx_data_path_params_resp);
if (!wl->data_path)
return -ENOMEM;
struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem)
{
- struct iosm_protocol *ipc_protocol = kzalloc_obj(*ipc_protocol,
- GFP_KERNEL);
+ struct iosm_protocol *ipc_protocol = kzalloc_obj(*ipc_protocol);
struct ipc_protocol_context_info *p_ci;
u64 addr;
unsigned int i;
int ret;
- info->queues = kzalloc_objs(struct netfront_queue, *num_queues,
- GFP_KERNEL);
+ info->queues = kzalloc_objs(struct netfront_queue, *num_queues);
if (!info->queues)
return -ENOMEM;
struct log_entry log_new;
u32 i, map_entry, log_oldmap, log_newmap;
- arena->freelist = kzalloc_objs(struct free_entry, arena->nfree,
- GFP_KERNEL);
+ arena->freelist = kzalloc_objs(struct free_entry, arena->nfree);
if (!arena->freelist)
return -ENOMEM;
{
u32 i;
- arena->map_locks = kzalloc_objs(struct aligned_lock, arena->nfree,
- GFP_KERNEL);
+ arena->map_locks = kzalloc_objs(struct aligned_lock, arena->nfree);
if (!arena->map_locks)
return -ENOMEM;
}
/* Allocate memory for cpumask attribute group */
- nvdimm_pmu_cpumask_group = kzalloc_obj(*nvdimm_pmu_cpumask_group,
- GFP_KERNEL);
+ nvdimm_pmu_cpumask_group = kzalloc_obj(*nvdimm_pmu_cpumask_group);
if (!nvdimm_pmu_cpumask_group) {
kfree(pmu_events_attr);
kfree(attrs_group);
}
nd_region =
- kzalloc_flex(*nd_region, mapping, ndr_desc->num_mappings,
- GFP_KERNEL);
+ kzalloc_flex(*nd_region, mapping, ndr_desc->num_mappings);
if (!nd_region)
return NULL;
if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
return 0;
- ctrl->dhchap_ctxs = kvzalloc_objs(*chap, ctrl_max_dhchaps(ctrl),
- GFP_KERNEL);
+ ctrl->dhchap_ctxs = kvzalloc_objs(*chap, ctrl_max_dhchaps(ctrl));
if (!ctrl->dhchap_ctxs) {
ret = -ENOMEM;
goto err_free_dhchap_ctrl_secret;
ctrl->ctrl.kato = opts->kato;
ret = -ENOMEM;
- ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count,
- GFP_KERNEL);
+ ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count);
if (!ctrl->queues)
goto out_free_ctrl;
goto out_free_ctrl;
}
- ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count,
- GFP_KERNEL);
+ ctrl->queues = kzalloc_objs(*ctrl->queues, ctrl->ctrl.queue_count);
if (!ctrl->queues) {
ret = -ENOMEM;
goto out_free_ctrl;
if (!port)
return ERR_PTR(-ENOMEM);
- port->ana_state = kzalloc_objs(*port->ana_state, NVMET_MAX_ANAGRPS + 1,
- GFP_KERNEL);
+ port->ana_state = kzalloc_objs(*port->ana_state, NVMET_MAX_ANAGRPS + 1);
if (!port->ana_state) {
kfree(port);
return ERR_PTR(-ENOMEM);
if (!ctrl->changed_ns_list)
goto out_free_ctrl;
- ctrl->sqs = kzalloc_objs(struct nvmet_sq *, subsys->max_qid + 1,
- GFP_KERNEL);
+ ctrl->sqs = kzalloc_objs(struct nvmet_sq *, subsys->max_qid + 1);
if (!ctrl->sqs)
goto out_free_changed_ns_list;
- ctrl->cqs = kzalloc_objs(struct nvmet_cq *, subsys->max_qid + 1,
- GFP_KERNEL);
+ ctrl->cqs = kzalloc_objs(struct nvmet_cq *, subsys->max_qid + 1);
if (!ctrl->cqs)
goto out_free_sqs;
struct nvmet_fc_ls_iod *iod;
int i;
- iod = kzalloc_objs(struct nvmet_fc_ls_iod, NVMET_LS_CTX_COUNT,
- GFP_KERNEL);
+ iod = kzalloc_objs(struct nvmet_fc_ls_iod, NVMET_LS_CTX_COUNT);
if (!iod)
return -ENOMEM;
ctrl->ctrl.kato = opts->kato;
ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
- ctrl->queues = kzalloc_objs(*ctrl->queues, opts->nr_io_queues + 1,
- GFP_KERNEL);
+ ctrl->queues = kzalloc_objs(*ctrl->queues, opts->nr_io_queues + 1);
if (!ctrl->queues)
goto out_uninit_ctrl;
{
unsigned int qid;
- ctrl->sq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues,
- GFP_KERNEL);
+ ctrl->sq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues);
if (!ctrl->sq)
return -ENOMEM;
- ctrl->cq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues,
- GFP_KERNEL);
+ ctrl->cq = kzalloc_objs(struct nvmet_pci_epf_queue, ctrl->nr_queues);
if (!ctrl->cq) {
kfree(ctrl->sq);
ctrl->sq = NULL;
if (opp_table->regulators)
return 0;
- opp_table->regulators = kmalloc_objs(*opp_table->regulators, count,
- GFP_KERNEL);
+ opp_table->regulators = kmalloc_objs(*opp_table->regulators, count);
if (!opp_table->regulators)
return -ENOMEM;
if (!count)
return 0;
- opp->required_opps = kzalloc_objs(*opp->required_opps, count,
- GFP_KERNEL);
+ opp->required_opps = kzalloc_objs(*opp->required_opps, count);
if (!opp->required_opps)
return -ENOMEM;
}
while (numpmem--) {
- p_mem_node = kmalloc_obj(struct pci_resource,
- GFP_KERNEL);
+ p_mem_node = kmalloc_obj(struct pci_resource);
if (!p_mem_node)
break;
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
if ((w_base <= w_length) && (save_command & 0x02)) {
- p_mem_node = kmalloc_obj(*p_mem_node,
- GFP_KERNEL);
+ p_mem_node = kmalloc_obj(*p_mem_node);
if (!p_mem_node)
return -ENOMEM;
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
- io_node = kmalloc_obj(*io_node,
- GFP_KERNEL);
+ io_node = kmalloc_obj(*io_node);
if (!io_node)
return -ENOMEM;
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
- p_mem_node = kmalloc_obj(*p_mem_node,
- GFP_KERNEL);
+ p_mem_node = kmalloc_obj(*p_mem_node);
if (!p_mem_node)
return -ENOMEM;
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
- mem_node = kmalloc_obj(*mem_node,
- GFP_KERNEL);
+ mem_node = kmalloc_obj(*mem_node);
if (!mem_node)
return -ENOMEM;
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
- io_node = kmalloc_obj(*io_node,
- GFP_KERNEL);
+ io_node = kmalloc_obj(*io_node);
if (!io_node)
return -ENOMEM;
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
- p_mem_node = kmalloc_obj(*p_mem_node,
- GFP_KERNEL);
+ p_mem_node = kmalloc_obj(*p_mem_node);
if (!p_mem_node)
return -ENOMEM;
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
- mem_node = kmalloc_obj(*mem_node,
- GFP_KERNEL);
+ mem_node = kmalloc_obj(*mem_node);
if (!mem_node)
return -ENOMEM;
debug("now enter io table ---\n");
debug("rio blk id: %x\n", blk_id);
- rio_table_ptr = kzalloc_obj(struct rio_table_hdr,
- GFP_KERNEL);
+ rio_table_ptr = kzalloc_obj(struct rio_table_hdr);
if (!rio_table_ptr) {
rc = -ENOMEM;
goto out;
list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
opt_rio_lo_ptr = search_opt_lo(rio_detail_ptr->chassis_num);
if (!opt_rio_lo_ptr) {
- opt_rio_lo_ptr = kzalloc_obj(struct opt_rio_lo,
- GFP_KERNEL);
+ opt_rio_lo_ptr = kzalloc_obj(struct opt_rio_lo);
if (!opt_rio_lo_ptr)
return -ENOMEM;
opt_rio_lo_ptr->rio_type = rio_detail_ptr->rio_type;
bus_info_ptr2 = ibmphp_find_same_bus_num(slot_ptr->slot_bus_num);
if (!bus_info_ptr2) {
- bus_info_ptr1 = kzalloc_obj(struct bus_info,
- GFP_KERNEL);
+ bus_info_ptr1 = kzalloc_obj(struct bus_info);
if (!bus_info_ptr1) {
rc = -ENOMEM;
goto error_no_slot;
cleanup_count = 6;
goto error;
}
- newfunc = kzalloc_obj(*newfunc,
- GFP_KERNEL);
+ newfunc = kzalloc_obj(*newfunc);
if (!newfunc)
return -ENOMEM;
flag = 0;
for (i = 0; i < 32; i++) {
if (func->devices[i]) {
- newfunc = kzalloc_obj(*newfunc,
- GFP_KERNEL);
+ newfunc = kzalloc_obj(*newfunc);
if (!newfunc)
return -ENOMEM;
}
}
- newfunc = kzalloc_obj(*newfunc,
- GFP_KERNEL);
+ newfunc = kzalloc_obj(*newfunc);
if (!newfunc)
return -ENOMEM;
for (i = 0; i < 32; i++) {
if (func->devices[i]) {
debug("inside for loop, device is %x\n", i);
- newfunc = kzalloc_obj(*newfunc,
- GFP_KERNEL);
+ newfunc = kzalloc_obj(*newfunc);
if (!newfunc)
return -ENOMEM;
debug("len[count] in IO %x, count %d\n", len[count], count);
- io[count] = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ io[count] = kzalloc_obj(struct resource_node);
if (!io[count])
return -ENOMEM;
debug("len[count] in PFMEM %x, count %d\n", len[count], count);
- pfmem[count] = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ pfmem[count] = kzalloc_obj(struct resource_node);
if (!pfmem[count])
return -ENOMEM;
ibmphp_add_resource(pfmem[count]);
func->pfmem[count] = pfmem[count];
} else {
- mem_tmp = kzalloc_obj(*mem_tmp,
- GFP_KERNEL);
+ mem_tmp = kzalloc_obj(*mem_tmp);
if (!mem_tmp) {
kfree(pfmem[count]);
return -ENOMEM;
debug("len[count] in Mem %x, count %d\n", len[count], count);
- mem[count] = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ mem[count] = kzalloc_obj(struct resource_node);
if (!mem[count])
return -ENOMEM;
debug("len[count] in IO = %x\n", len[count]);
- bus_io[count] = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ bus_io[count] = kzalloc_obj(struct resource_node);
if (!bus_io[count]) {
retval = -ENOMEM;
debug("len[count] in PFMEM = %x\n", len[count]);
- bus_pfmem[count] = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ bus_pfmem[count] = kzalloc_obj(struct resource_node);
if (!bus_pfmem[count]) {
retval = -ENOMEM;
goto error;
ibmphp_add_resource(bus_pfmem[count]);
func->pfmem[count] = bus_pfmem[count];
} else {
- mem_tmp = kzalloc_obj(*mem_tmp,
- GFP_KERNEL);
+ mem_tmp = kzalloc_obj(*mem_tmp);
if (!mem_tmp) {
retval = -ENOMEM;
goto error;
debug("len[count] in Memory is %x\n", len[count]);
- bus_mem[count] = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ bus_mem[count] = kzalloc_obj(struct resource_node);
if (!bus_mem[count]) {
retval = -ENOMEM;
goto error;
bus_cur->firstPFMemFromMem = pfmem_cur;
- mem = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ mem = kzalloc_obj(struct resource_node);
if (!mem)
return -ENOMEM;
end_address |= (upper_io_end << 16);
if ((start_address) && (start_address <= end_address)) {
- range = kzalloc_obj(struct range_node,
- GFP_KERNEL);
+ range = kzalloc_obj(struct range_node);
if (!range)
return -ENOMEM;
fix_resources(bus_sec);
if (ibmphp_find_resource(bus_cur, start_address, &io, IO)) {
- io = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ io = kzalloc_obj(struct resource_node);
if (!io) {
kfree(range);
return -ENOMEM;
if ((start_address) && (start_address <= end_address)) {
- range = kzalloc_obj(struct range_node,
- GFP_KERNEL);
+ range = kzalloc_obj(struct range_node);
if (!range)
return -ENOMEM;
fix_resources(bus_sec);
if (ibmphp_find_resource(bus_cur, start_address, &mem, MEM)) {
- mem = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ mem = kzalloc_obj(struct resource_node);
if (!mem) {
kfree(range);
return -ENOMEM;
if ((start_address) && (start_address <= end_address)) {
- range = kzalloc_obj(struct range_node,
- GFP_KERNEL);
+ range = kzalloc_obj(struct range_node);
if (!range)
return -ENOMEM;
fix_resources(bus_sec);
if (ibmphp_find_resource(bus_cur, start_address, &pfmem, PFMEM)) {
- pfmem = kzalloc_obj(struct resource_node,
- GFP_KERNEL);
+ pfmem = kzalloc_obj(struct resource_node);
if (!pfmem) {
kfree(range);
return -ENOMEM;
int i, j, k, result = 0, count = 0;
struct sbiret ret;
- event_info_shmem = kzalloc_objs(*event_info_shmem, num_events,
- GFP_KERNEL);
+ event_info_shmem = kzalloc_objs(*event_info_shmem, num_events);
if (!event_info_shmem)
return -ENOMEM;
maps_per_pin++;
if (num_pulls)
maps_per_pin++;
- cur_map = maps = kzalloc_objs(*maps, num_pins * maps_per_pin,
- GFP_KERNEL);
+ cur_map = maps = kzalloc_objs(*maps, num_pins * maps_per_pin);
if (!maps)
return -ENOMEM;
}
/* we will reallocate later */
- pctrl->functions = kzalloc_objs(*pctrl->functions, max_functions,
- GFP_KERNEL);
+ pctrl->functions = kzalloc_objs(*pctrl->functions, max_functions);
if (!pctrl->functions)
return -ENOMEM;
girq->parents = kmalloc_array(girq->num_parents,
sizeof(*girq->parents),
GFP_KERNEL);
- irq_data = kmalloc_objs(*irq_data, girq->num_parents,
- GFP_KERNEL);
+ irq_data = kmalloc_objs(*irq_data, girq->num_parents);
if (!girq->parents || !irq_data) {
ret = -ENOMEM;
goto out_free_irq_data;
* special functions per pin, plus one entry for the sentinel.
* We'll reallocate that later anyway.
*/
- pctl->functions = kzalloc_objs(*pctl->functions, 7 * pctl->ngroups + 4,
- GFP_KERNEL);
+ pctl->functions = kzalloc_objs(*pctl->functions, 7 * pctl->ngroups + 4);
if (!pctl->functions)
return -ENOMEM;
if (num_pulls)
maps_per_pin++;
- cur_map = maps = kzalloc_objs(*maps, num_pins * maps_per_pin,
- GFP_KERNEL);
+ cur_map = maps = kzalloc_objs(*maps, num_pins * maps_per_pin);
if (!maps)
return -ENOMEM;
if (!n_peripherals)
return 0;
- acpi_peripherals = kzalloc_objs(*src->acpi_peripherals, n_peripherals,
- GFP_KERNEL);
+ acpi_peripherals = kzalloc_objs(*src->acpi_peripherals, n_peripherals);
if (!acpi_peripherals)
return -ENOMEM;
/* Initialize AC power tunables */
ac_limits = power_data->ac_data;
if (ac_limits) {
- ac_rog_tunables = kzalloc_obj(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC],
- GFP_KERNEL);
+ ac_rog_tunables = kzalloc_obj(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]);
if (!ac_rog_tunables)
goto err_nomem;
/* Initialize DC power tunables */
dc_limits = power_data->dc_data;
if (dc_limits) {
- dc_rog_tunables = kzalloc_obj(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC],
- GFP_KERNEL);
+ dc_rog_tunables = kzalloc_obj(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]);
if (!dc_rog_tunables) {
kfree(ac_rog_tunables);
goto err_nomem;
return -ENOMEM;
/* need to store both location and value + terminator*/
- token_attrs = kzalloc_objs(*token_attrs, (2 * da_num_tokens) + 1,
- GFP_KERNEL);
+ token_attrs = kzalloc_objs(*token_attrs, (2 * da_num_tokens) + 1);
if (!token_attrs)
goto out_allocate_attrs;
if (!board_data)
return dev_err_probe(&client->dev, -ENODEV, "No board-data found for this model\n");
- cells = kzalloc_objs(*cells, TPS68470_WIN_MFD_CELL_COUNT,
- GFP_KERNEL);
+ cells = kzalloc_objs(*cells, TPS68470_WIN_MFD_CELL_COUNT);
if (!cells)
return -ENOMEM;
{
int ret;
- isst_cpu_info = kzalloc_objs(*isst_cpu_info, num_possible_cpus(),
- GFP_KERNEL);
+ isst_cpu_info = kzalloc_objs(*isst_cpu_info, num_possible_cpus());
if (!isst_cpu_info)
return -ENOMEM;
- isst_pkg_info = kzalloc_objs(*isst_pkg_info, topology_max_packages(),
- GFP_KERNEL);
+ isst_pkg_info = kzalloc_objs(*isst_pkg_info, topology_max_packages());
if (!isst_pkg_info) {
kfree(isst_cpu_info);
return -ENOMEM;
uncore_max_entries = topology_max_packages() *
topology_max_dies_per_package();
- uncore_instances = kzalloc_objs(*uncore_instances, uncore_max_entries,
- GFP_KERNEL);
+ uncore_instances = kzalloc_objs(*uncore_instances, uncore_max_entries);
if (!uncore_instances)
return -ENOMEM;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
{
/* start IO enumeration */
- struct sony_pic_ioport *ioport = kzalloc_obj(*ioport,
- GFP_KERNEL);
+ struct sony_pic_ioport *ioport = kzalloc_obj(*ioport);
if (!ioport)
return AE_ERROR;
u64 sz;
int i, ret;
- prev_obj_to_cnode = kmalloc_objs(*prev_obj_to_cnode, uv_bios_obj_cnt,
- GFP_KERNEL);
+ prev_obj_to_cnode = kmalloc_objs(*prev_obj_to_cnode, uv_bios_obj_cnt);
if (!prev_obj_to_cnode)
return -ENOMEM;
}
for (j = 0; j < uv_bios_obj_cnt; j++) {
for (k = 0; k < hub_buf[j].ports; k++) {
- uv_hubs[j]->ports[k] = kzalloc_obj(*uv_hubs[j]->ports[k],
- GFP_KERNEL);
+ uv_hubs[j]->ports[k] = kzalloc_obj(*uv_hubs[j]->ports[k]);
if (!uv_hubs[j]->ports[k]) {
ret = -ENOMEM;
k--;
}
num_pci_lines = l;
- uv_pci_objs = kzalloc_objs(*uv_pci_objs, num_pci_lines,
- GFP_KERNEL);
+ uv_pci_objs = kzalloc_objs(*uv_pci_objs, num_pci_lines);
if (!uv_pci_objs) {
kfree(pci_top_str);
ret = -ENOMEM;
}
start = pci_top_str;
while ((found = strsep(&start, "\n")) != NULL) {
- uv_pci_objs[k] = kzalloc_obj(*uv_pci_objs[k],
- GFP_KERNEL);
+ uv_pci_objs[k] = kzalloc_obj(*uv_pci_objs[k]);
if (!uv_pci_objs[k]) {
ret = -ENOMEM;
goto err_pci_obj;
exit_handler = dev_info->exit;
}
- i2c_clients = kzalloc_objs(*i2c_clients, dev_info->i2c_client_count,
- GFP_KERNEL);
+ i2c_clients = kzalloc_objs(*i2c_clients, dev_info->i2c_client_count);
if (!i2c_clients) {
x86_android_tablet_remove(pdev);
return -ENOMEM;
}
pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name);
- rp->domains = kzalloc_objs(struct rapl_domain, rp->nr_domains,
- GFP_KERNEL);
+ rp->domains = kzalloc_objs(struct rapl_domain, rp->nr_domains);
if (!rp->domains)
return -ENOMEM;
}
if (num_consumers == 0)
return 0;
- _consumers = kmalloc_objs(struct regulator_bulk_data, num_consumers,
- GFP_KERNEL);
+ _consumers = kmalloc_objs(struct regulator_bulk_data, num_consumers);
if (!_consumers)
return -ENOMEM;
goto restart;
if (dev_info->num_of_segments <= 1)
return 0;
- sort_list = kzalloc_objs(struct segment_info, dev_info->num_of_segments,
- GFP_KERNEL);
+ sort_list = kzalloc_objs(struct segment_info, dev_info->num_of_segments);
if (sort_list == NULL)
return -ENOMEM;
i = 0;
* get a struct dcssblk_dev_info
*/
if (num_of_segments == 0) {
- dev_info = kzalloc_obj(struct dcssblk_dev_info,
- GFP_KERNEL);
+ dev_info = kzalloc_obj(struct dcssblk_dev_info);
if (dev_info == NULL) {
rc = -ENOMEM;
goto out;
if (!scmrq->aob)
goto free;
- scmrq->request = kzalloc_objs(scmrq->request[0], nr_requests_per_io,
- GFP_KERNEL);
+ scmrq->request = kzalloc_objs(scmrq->request[0], nr_requests_per_io);
if (!scmrq->request)
goto free;
if (!screen)
goto out_err;
for (lines = 0; lines < allocated; lines++) {
- screen[lines].cells = kzalloc_objs(struct tty3270_cell, cols,
- GFP_KERNEL);
+ screen[lines].cells = kzalloc_objs(struct tty3270_cell, cols);
if (!screen[lines].cells)
goto out_screen;
}
goto out_err;
}
- css->pseudo_subchannel = kzalloc_obj(*css->pseudo_subchannel,
- GFP_KERNEL);
+ css->pseudo_subchannel = kzalloc_obj(*css->pseudo_subchannel);
if (!css->pseudo_subchannel) {
device_unregister(&css->device);
ret = -ENOMEM;
{
int rc;
- q_indicators = kzalloc_objs(struct indicator_t, TIQDIO_NR_INDICATORS,
- GFP_KERNEL);
+ q_indicators = kzalloc_objs(struct indicator_t, TIQDIO_NR_INDICATORS);
if (!q_indicators)
return -ENOMEM;
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
- private->cp.guest_cp = kzalloc_objs(struct ccw1, CCWCHAIN_LEN_MAX,
- GFP_KERNEL);
+ private->cp.guest_cp = kzalloc_objs(struct ccw1, CCWCHAIN_LEN_MAX);
if (!private->cp.guest_cp)
goto out_free_private;
STATIC int
NCR_700_sdev_init(struct scsi_device *SDp)
{
- SDp->hostdata = kzalloc_obj(struct NCR_700_Device_Parameters,
- GFP_KERNEL);
+ SDp->hostdata = kzalloc_obj(struct NCR_700_Device_Parameters);
if (!SDp->hostdata)
return -ENOMEM;
if (blogic_probe_options.noprobe)
return -ENODEV;
blogic_probeinfo_list =
- kzalloc_objs(struct blogic_probeinfo, BLOGIC_MAX_ADAPTERS,
- GFP_KERNEL);
+ kzalloc_objs(struct blogic_probeinfo, BLOGIC_MAX_ADAPTERS);
if (blogic_probeinfo_list == NULL) {
blogic_err("BusLogic: Unable to allocate Probe Info List\n",
NULL);
if (aac_reset_devices || reset_devices)
aac->init_reset = true;
- aac->fibs = kzalloc_objs(struct fib, shost->can_queue + AAC_NUM_MGT_FIB,
- GFP_KERNEL);
+ aac->fibs = kzalloc_objs(struct fib, shost->can_queue + AAC_NUM_MGT_FIB);
if (!aac->fibs) {
error = -ENOMEM;
goto out_free_host;
return -ENOMEM;
}
- mem_arr_orig = kmalloc_objs(*mem_arr_orig, BEISCSI_MAX_FRAGS_INIT,
- GFP_KERNEL);
+ mem_arr_orig = kmalloc_objs(*mem_arr_orig, BEISCSI_MAX_FRAGS_INIT);
if (!mem_arr_orig) {
kfree(phba->init_mem);
kfree(phwi_ctrlr->wrb_context);
idx = 0;
mem_descr = phba->init_mem;
mem_descr += HWI_MEM_WRB;
- pwrb_arr = kmalloc_objs(*pwrb_arr, phba->params.cxns_per_ctrl,
- GFP_KERNEL);
+ pwrb_arr = kmalloc_objs(*pwrb_arr, phba->params.cxns_per_ctrl);
if (!pwrb_arr) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Memory alloc failed in create wrb ring.\n");
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
- ptr_cid_info = kzalloc_obj(struct ulp_cid_info,
- GFP_KERNEL);
+ ptr_cid_info = kzalloc_obj(struct ulp_cid_info);
if (!ptr_cid_info) {
ret = -ENOMEM;
hba->next_conn_id = 0;
hba->tgt_ofld_list =
- kzalloc_objs(struct bnx2fc_rport *, BNX2FC_NUM_MAX_SESS,
- GFP_KERNEL);
+ kzalloc_objs(struct bnx2fc_rport *, BNX2FC_NUM_MAX_SESS);
if (!hba->tgt_ofld_list) {
printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
goto tgtofld_err;
goto mem_err;
}
- cmgr->free_list_lock = kzalloc_objs(*cmgr->free_list_lock, arr_sz,
- GFP_KERNEL);
+ cmgr->free_list_lock = kzalloc_objs(*cmgr->free_list_lock, arr_sz);
if (!cmgr->free_list_lock) {
printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
kfree(cmgr->free_list);
ln->fcfinfo = pln->fcfinfo;
} else {
/* Another non-root physical lnode (FCF) */
- ln->fcfinfo = kzalloc_obj(struct csio_fcf_info,
- GFP_KERNEL);
+ ln->fcfinfo = kzalloc_obj(struct csio_fcf_info);
if (!ln->fcfinfo) {
csio_ln_err(ln, "Failed to alloc FCF info\n");
CSIO_INC_STATS(hw, n_err_nomem);
for (i = 0; i < hw->hw_rq_count; i++)
count += hw->hw_rq[i]->entry_count;
- hw->seq_pool = kmalloc_objs(struct efc_hw_sequence, count,
- GFP_KERNEL);
+ hw->seq_pool = kmalloc_objs(struct efc_hw_sequence, count);
if (!hw->seq_pool)
return -ENOMEM;
}
static bool alloc_vda_req(struct esas2r_adapter *a,
struct esas2r_request *rq)
{
- struct esas2r_mem_desc *memdesc = kzalloc_obj(struct esas2r_mem_desc,
- GFP_KERNEL);
+ struct esas2r_mem_desc *memdesc = kzalloc_obj(struct esas2r_mem_desc);
if (memdesc == NULL) {
esas2r_hdebug("could not alloc mem for vda request memdesc\n");
}
/* allocate the S/G list memory descriptors */
- a->sg_list_mds = kzalloc_objs(struct esas2r_mem_desc, num_sg_lists,
- GFP_KERNEL);
+ a->sg_list_mds = kzalloc_objs(struct esas2r_mem_desc, num_sg_lists);
if (a->sg_list_mds == NULL) {
esas2r_log(ESAS2R_LOG_CRIT,
int i, j;
int rc = 0;
- channels->scrqs = kzalloc_objs(*channels->scrqs, channels->max_queues,
- GFP_KERNEL);
+ channels->scrqs = kzalloc_objs(*channels->scrqs, channels->max_queues);
if (!channels->scrqs)
return -ENOMEM;
ioa_cfg->ipr_cmnd_list = kzalloc_objs(struct ipr_cmnd *,
IPR_NUM_CMD_BLKS, GFP_KERNEL);
- ioa_cfg->ipr_cmnd_list_dma = kzalloc_objs(dma_addr_t, IPR_NUM_CMD_BLKS,
- GFP_KERNEL);
+ ioa_cfg->ipr_cmnd_list_dma = kzalloc_objs(dma_addr_t, IPR_NUM_CMD_BLKS);
if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
ipr_free_cmd_blks(ioa_cfg);
if (!vport->qfpa_res) {
max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
- vport->qfpa_res = kzalloc_objs(*vport->qfpa_res, max_desc,
- GFP_KERNEL);
+ vport->qfpa_res = kzalloc_objs(*vport->qfpa_res, max_desc);
if (!vport->qfpa_res)
goto out;
}
desc = (struct priority_range_desc *)(pcmd + 8);
vmid_range = vport->vmid_priority.vmid_range;
if (!vmid_range) {
- vmid_range = kzalloc_objs(*vmid_range, MAX_PRIORITY_DESC,
- GFP_KERNEL);
+ vmid_range = kzalloc_objs(*vmid_range, MAX_PRIORITY_DESC);
if (!vmid_range) {
kfree(vport->qfpa_res);
goto out;
if (lpfc_is_vmid_enabled(phba)) {
vport->vmid =
- kzalloc_objs(struct lpfc_vmid, phba->cfg_max_vmid,
- GFP_KERNEL);
+ kzalloc_objs(struct lpfc_vmid, phba->cfg_max_vmid);
if (!vport->vmid)
return -ENOMEM;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
- mbox->sge_array = kzalloc_obj(struct lpfc_mbx_nembed_sge_virt,
- GFP_KERNEL);
+ mbox->sge_array = kzalloc_obj(struct lpfc_mbx_nembed_sge_virt);
if (!mbox->sge_array) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2527 Failed to allocate non-embedded SGE "
if (!phba->lpfc_mbuf_pool)
goto fail;
- pool->elements = kmalloc_objs(struct lpfc_dmabuf, LPFC_MBUF_POOL_SIZE,
- GFP_KERNEL);
+ pool->elements = kmalloc_objs(struct lpfc_dmabuf, LPFC_MBUF_POOL_SIZE);
if (!pool->elements)
goto fail_free_lpfc_mbuf_pool;
int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
u32 entries)
{
- rx_monitor->ring = kmalloc_objs(struct rx_info_entry, entries,
- GFP_KERNEL);
+ rx_monitor->ring = kmalloc_objs(struct rx_info_entry, entries);
if (!rx_monitor->ring)
return -ENOMEM;
struct lpfc_vport *port_iterator;
struct lpfc_vport **vports;
int index = 0;
- vports = kzalloc_objs(struct lpfc_vport *, phba->max_vports + 1,
- GFP_KERNEL);
+ vports = kzalloc_objs(struct lpfc_vport *, phba->max_vports + 1);
if (vports == NULL)
return NULL;
spin_lock_irq(&phba->port_list_lock);
* Allocate single blocks of memory for all required kiocs,
* mailboxes and passthru structures.
*/
- adapter->kioc_list = kmalloc_objs(uioc_t, lld_adp->max_kioc,
- GFP_KERNEL);
- adapter->mbox_list = kmalloc_objs(mbox64_t, lld_adp->max_kioc,
- GFP_KERNEL);
+ adapter->kioc_list = kmalloc_objs(uioc_t, lld_adp->max_kioc);
+ adapter->mbox_list = kmalloc_objs(mbox64_t, lld_adp->max_kioc);
adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
&adapter->pdev->dev,
sizeof(mraid_passthru_t),
* Allocate the dynamic array first and then allocate individual
* commands.
*/
- instance->cmd_list = kzalloc_objs(struct megasas_cmd *, max_cmd,
- GFP_KERNEL);
+ instance->cmd_list = kzalloc_objs(struct megasas_cmd *, max_cmd);
if (!instance->cmd_list) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
}
for (i = 0; i < max_cmd; i++) {
- instance->cmd_list[i] = kmalloc_obj(struct megasas_cmd,
- GFP_KERNEL);
+ instance->cmd_list[i] = kmalloc_obj(struct megasas_cmd);
if (!instance->cmd_list[i]) {
* commands.
*/
fusion->cmd_list =
- kzalloc_objs(struct megasas_cmd_fusion *, max_mpt_cmd,
- GFP_KERNEL);
+ kzalloc_objs(struct megasas_cmd_fusion *, max_mpt_cmd);
if (!fusion->cmd_list) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
}
for (i = 0; i < max_mpt_cmd; i++) {
- fusion->cmd_list[i] = kzalloc_obj(struct megasas_cmd_fusion,
- GFP_KERNEL);
+ fusion->cmd_list[i] = kzalloc_obj(struct megasas_cmd_fusion);
if (!fusion->cmd_list[i]) {
for (j = 0; j < i; j++)
kfree(fusion->cmd_list[j]);
num_queues);
if (!mrioc->req_qinfo) {
- mrioc->req_qinfo = kzalloc_objs(struct op_req_qinfo, num_queues,
- GFP_KERNEL);
+ mrioc->req_qinfo = kzalloc_objs(struct op_req_qinfo, num_queues);
if (!mrioc->req_qinfo) {
retval = -1;
goto out_failed;
mrioc->sas_hba.host_node = 1;
INIT_LIST_HEAD(&mrioc->sas_hba.sas_port_list);
mrioc->sas_hba.parent_dev = &mrioc->shost->shost_gendev;
- mrioc->sas_hba.phy = kzalloc_objs(struct mpi3mr_sas_phy, num_phys,
- GFP_KERNEL);
+ mrioc->sas_hba.phy = kzalloc_objs(struct mpi3mr_sas_phy, num_phys);
if (!mrioc->sas_hba.phy)
return;
sizeof(Mpi2DefaultReplyDescriptor_t);
int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
- ioc->reply_post = kzalloc_objs(struct reply_post_struct, count,
- GFP_KERNEL);
+ ioc->reply_post = kzalloc_objs(struct reply_post_struct, count);
if (!ioc->reply_post)
return -ENOMEM;
/*
rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
if (ioc->supports_trigger_pages) {
- master_tg = kzalloc_obj(struct SL_WH_MASTER_TRIGGER_T,
- GFP_KERNEL);
+ master_tg = kzalloc_obj(struct SL_WH_MASTER_TRIGGER_T);
if (!master_tg)
return -ENOMEM;
sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
if (ioc->supports_trigger_pages) {
- event_tg = kzalloc_obj(struct SL_WH_EVENT_TRIGGERS_T,
- GFP_KERNEL);
+ event_tg = kzalloc_obj(struct SL_WH_EVENT_TRIGGERS_T);
if (!event_tg)
return -ENOMEM;
port_id = sas_iounit_pg0->PhyData[i].Port;
mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
if (!mport) {
- mport = kzalloc_obj(struct hba_port,
- GFP_KERNEL);
+ mport = kzalloc_obj(struct hba_port);
if (!mport)
break;
mport->port_id = port_id;
}
ioc->sas_hba.num_phys = num_phys;
- port_table = kzalloc_objs(struct hba_port, ioc->sas_hba.num_phys,
- GFP_KERNEL);
+ port_table = kzalloc_objs(struct hba_port, ioc->sas_hba.num_phys);
if (!port_table)
return;
found = mvumi_match_devices(mhba, id, wwid);
if (!found) {
mvumi_remove_devices(mhba, id);
- mv_dev = kzalloc_obj(struct mvumi_device,
- GFP_KERNEL);
+ mv_dev = kzalloc_obj(struct mvumi_device);
if (!mv_dev) {
dev_err(&mhba->pdev->dev,
"%s alloc mv_dev failed\n",
}
/* Allocate task parameters to pass to f/w init funcions */
- io_req->task_params = kzalloc_obj(*io_req->task_params,
- GFP_KERNEL);
+ io_req->task_params = kzalloc_obj(*io_req->task_params);
if (!io_req->task_params) {
QEDF_ERR(&(qedf->dbg_ctx),
"Failed to allocate task_params for xid=0x%x\n",
* Allocate scatter/gather list info to pass to f/w init
* functions.
*/
- io_req->sgl_task_params = kzalloc_obj(struct scsi_sgl_task_params,
- GFP_KERNEL);
+ io_req->sgl_task_params = kzalloc_obj(struct scsi_sgl_task_params);
if (!io_req->sgl_task_params) {
QEDF_ERR(&(qedf->dbg_ctx),
"Failed to allocate sgl_task_params for xid=0x%x\n",
/* Allocate a CQ and an associated PBL for each MSI-X vector */
for (i = 0; i < qedf->num_queues; i++) {
- qedf->global_queues[i] = kzalloc_obj(struct global_queue,
- GFP_KERNEL);
+ qedf->global_queues[i] = kzalloc_obj(struct global_queue);
if (!qedf->global_queues[i]) {
QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
"global queue %d.\n", i);
*/
for (i = 0; i < qedi->num_queues; i++) {
qedi->global_queues[i] =
- kzalloc_obj(*qedi->global_queues[0],
- GFP_KERNEL);
+ kzalloc_obj(*qedi->global_queues[0]);
if (!qedi->global_queues[i]) {
QEDI_ERR(&qedi->dbg_ctx,
"Unable to allocation global queue %d.\n", i);
req->num_outstanding_cmds = ha->cur_fw_iocb_count;
}
- req->outstanding_cmds = kzalloc_objs(srb_t *, req->num_outstanding_cmds,
- GFP_KERNEL);
+ req->outstanding_cmds = kzalloc_objs(srb_t *, req->num_outstanding_cmds);
if (!req->outstanding_cmds) {
/*
/* Try GID_PT to get device list, else GAN. */
if (!ha->swl)
- ha->swl = kzalloc_objs(sw_info_t, ha->max_fibre_devices,
- GFP_KERNEL);
+ ha->swl = kzalloc_objs(sw_info_t, ha->max_fibre_devices);
swl = ha->swl;
if (!swl) {
/*EMPTY*/
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
if (!ha->qp_cpu_map) {
- ha->qp_cpu_map = kzalloc_objs(struct qla_qpair *, NR_CPUS,
- GFP_KERNEL);
+ ha->qp_cpu_map = kzalloc_objs(struct qla_qpair *, NR_CPUS);
if (!ha->qp_cpu_map) {
ql_log(ql_log_fatal, vha, 0x0180,
"Unable to allocate memory for qp_cpu_map ptrs.\n");
}
}
vha->irq_offset = desc.pre_vectors;
- ha->msix_entries = kzalloc_objs(struct qla_msix_entry, ha->msix_count,
- GFP_KERNEL);
+ ha->msix_entries = kzalloc_objs(struct qla_msix_entry, ha->msix_count);
if (!ha->msix_entries) {
ql_log(ql_log_fatal, vha, 0x00c8,
"Failed to allocate memory for ha->msix_entries.\n");
return -ENOMEM;
}
sz = qp->req->length * sizeof(dma_addr_t);
- qp->buf_pool.dma_array = kzalloc_objs(dma_addr_t, qp->req->length,
- GFP_KERNEL);
+ qp->buf_pool.dma_array = kzalloc_objs(dma_addr_t, qp->req->length);
if (!qp->buf_pool.dma_array) {
ql_log(ql_log_warn, vha, 0x0186,
"Failed to allocate dma_array(%d).\n", sz);
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- ha->req_q_map = kzalloc_objs(struct req_que *, ha->max_req_queues,
- GFP_KERNEL);
+ ha->req_q_map = kzalloc_objs(struct req_que *, ha->max_req_queues);
if (!ha->req_q_map) {
ql_log(ql_log_fatal, vha, 0x003b,
"Unable to allocate memory for request queue ptrs.\n");
goto fail_req_map;
}
- ha->rsp_q_map = kzalloc_objs(struct rsp_que *, ha->max_rsp_queues,
- GFP_KERNEL);
+ ha->rsp_q_map = kzalloc_objs(struct rsp_que *, ha->max_rsp_queues);
if (!ha->rsp_q_map) {
ql_log(ql_log_fatal, vha, 0x003c,
"Unable to allocate memory for response queue ptrs.\n");
INIT_LIST_HEAD(&ha->vp_list);
/* Allocate memory for our loop_id bitmap */
- ha->loop_id_map = kzalloc_objs(long, BITS_TO_LONGS(LOOPID_MAP_SIZE),
- GFP_KERNEL);
+ ha->loop_id_map = kzalloc_objs(long, BITS_TO_LONGS(LOOPID_MAP_SIZE));
if (!ha->loop_id_map)
goto fail_loop_id_map;
else {
return -ENOMEM;
}
- tgt->qphints = kzalloc_objs(struct qla_qpair_hint, ha->max_qpairs + 1,
- GFP_KERNEL);
+ tgt->qphints = kzalloc_objs(struct qla_qpair_hint, ha->max_qpairs + 1);
if (!tgt->qphints) {
kfree(tgt);
ql_log(ql_log_warn, base_vha, 0x0197,
devip->max_open = sdeb_zbc_max_open;
}
- devip->zstate = kzalloc_objs(struct sdeb_zone_state, devip->nr_zones,
- GFP_KERNEL);
+ devip->zstate = kzalloc_objs(struct sdeb_zone_state, devip->nr_zones);
if (!devip->zstate)
return -ENOMEM;
if (sdebug_ptype == TYPE_TAPE) {
if (!devip->tape_blocks[0]) {
devip->tape_blocks[0] =
- kzalloc_objs(struct tape_block, TAPE_UNITS,
- GFP_KERNEL);
+ kzalloc_objs(struct tape_block, TAPE_UNITS);
if (!devip->tape_blocks[0])
return 1;
}
}
page2_not_supported:
if (components > 0) {
- scomp = kzalloc_objs(struct ses_component, components,
- GFP_KERNEL);
+ scomp = kzalloc_objs(struct ses_component, components);
if (!scomp)
goto err_free;
}
num_new_devices = num_physicals + num_logicals;
- new_device_list = kmalloc_objs(*new_device_list, num_new_devices,
- GFP_KERNEL);
+ new_device_list = kmalloc_objs(*new_device_list, num_new_devices);
if (!new_device_list) {
dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
rc = -ENOMEM;
*/
pvscsi_setup_all_rings(adapter);
- adapter->cmd_map = kzalloc_objs(struct pvscsi_ctx, adapter->req_depth,
- GFP_KERNEL);
+ adapter->cmd_map = kzalloc_objs(struct pvscsi_ctx, adapter->req_depth);
if (!adapter->cmd_map) {
printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
error = -ENOMEM;
int domains_read = 0;
int ret, i;
- struct servreg_get_domain_list_resp *resp __free(kfree) = kzalloc_obj(*resp,
- GFP_KERNEL);
+ struct servreg_get_domain_list_resp *resp __free(kfree) = kzalloc_obj(*resp);
if (!resp)
return -ENOMEM;
struct rz_sysc *sysc;
int ret;
- struct regmap_config *regmap_cfg __free(kfree) = kzalloc_obj(*regmap_cfg,
- GFP_KERNEL);
+ struct regmap_config *regmap_cfg __free(kfree) = kzalloc_obj(*regmap_cfg);
if (!regmap_cfg)
return -ENOMEM;
ctx->count = count;
ctx->link_mask = res->link_mask;
- struct resource *sdw_res __free(kfree) = kzalloc_obj(*sdw_res,
- GFP_KERNEL);
+ struct resource *sdw_res __free(kfree) = kzalloc_obj(*sdw_res);
if (!sdw_res) {
kfree(ctx);
return NULL;
num_slaves++;
}
- ctx->peripherals = kmalloc_flex(*ctx->peripherals, array, num_slaves,
- GFP_KERNEL);
+ ctx->peripherals = kmalloc_flex(*ctx->peripherals, array, num_slaves);
if (!ctx->peripherals)
return -ENOMEM;
ctx->peripherals->num_peripherals = num_slaves;
sconfig.bps = snd_pcm_format_width(params_format(params));
/* Port configuration */
- struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig,
- GFP_KERNEL);
+ struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig);
if (!pconfig)
return -ENOMEM;
sconfig.bps = snd_pcm_format_width(params_format(params));
/* Port configuration */
- struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig,
- GFP_KERNEL);
+ struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig);
if (!pconfig)
return -ENOMEM;
sconfig.bps = snd_pcm_format_width(params_format(params));
/* Port configuration */
- struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig,
- GFP_KERNEL);
+ struct sdw_port_config *pconfig __free(kfree) = kzalloc_obj(*pconfig);
if (!pconfig)
return -ENOMEM;
num_slaves++;
}
- ctx->peripherals = kmalloc_flex(*ctx->peripherals, array, num_slaves,
- GFP_KERNEL);
+ ctx->peripherals = kmalloc_flex(*ctx->peripherals, array, num_slaves);
if (!ctx->peripherals)
goto err;
ctx->peripherals->num_peripherals = num_slaves;
return PTR_ERR(qspi->base[CHIP_SELECT]);
}
- qspi->dev_ids = kzalloc_objs(struct bcm_qspi_dev_id, num_irqs,
- GFP_KERNEL);
+ qspi->dev_ids = kzalloc_objs(struct bcm_qspi_dev_id, num_irqs);
if (!qspi->dev_ids)
return -ENOMEM;
ms->gpio_cs_count = gpiod_count(&op->dev, NULL);
if (ms->gpio_cs_count > 0) {
host->num_chipselect = ms->gpio_cs_count;
- ms->gpio_cs = kmalloc_objs(*ms->gpio_cs, ms->gpio_cs_count,
- GFP_KERNEL);
+ ms->gpio_cs = kmalloc_objs(*ms->gpio_cs, ms->gpio_cs_count);
if (!ms->gpio_cs) {
rc = -ENOMEM;
goto err_alloc_gpio;
unsigned int incnt = 0;
int ret;
- struct virtio_spi_req *spi_req __free(kfree) = kzalloc_obj(*spi_req,
- GFP_KERNEL);
+ struct virtio_spi_req *spi_req __free(kfree) = kzalloc_obj(*spi_req);
if (!spi_req)
return -ENOMEM;
channel->attr_group = kzalloc_obj(*channel->attr_group);
if (!channel->attr_group)
return -ENOMEM;
- channel->attr_groups = kzalloc_objs(*channel->attr_groups, 2,
- GFP_KERNEL);
+ channel->attr_groups = kzalloc_objs(*channel->attr_groups, 2);
if (!channel->attr_groups)
return -ENOMEM;
light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
if (!light->name)
return -ENOMEM;
- light->channels = kzalloc_objs(struct gb_channel, conf.channel_count,
- GFP_KERNEL);
+ light->channels = kzalloc_objs(struct gb_channel, conf.channel_count);
if (!light->channels)
return -ENOMEM;
/*
if (ret < 0)
goto out;
- glights->lights = kzalloc_objs(struct gb_light, glights->lights_count,
- GFP_KERNEL);
+ glights->lights = kzalloc_objs(struct gb_light, glights->lights_count);
if (!glights->lights) {
ret = -ENOMEM;
goto out;
}
}
- gbpsy->props = kzalloc_objs(*gbpsy->props, gbpsy->properties_count,
- GFP_KERNEL);
+ gbpsy->props = kzalloc_objs(*gbpsy->props, gbpsy->properties_count);
if (!gbpsy->props) {
ret = -ENOMEM;
goto out_put_operation;
ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL;
dev_dbg(isp->dev, "allocating %d 3a buffers\n", count);
while (count--) {
- s3a_buf = kzalloc_obj(struct atomisp_s3a_buf,
- GFP_KERNEL);
+ s3a_buf = kzalloc_obj(struct atomisp_s3a_buf);
if (!s3a_buf)
goto error;
count = ATOMISP_CSS_Q_DEPTH + 1;
dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
while (count--) {
- dis_buf = kzalloc_obj(struct atomisp_dis_buf,
- GFP_KERNEL);
+ dis_buf = kzalloc_obj(struct atomisp_dis_buf);
if (!dis_buf)
goto error;
if (atomisp_css_allocate_stat_buffers(
dev_dbg(isp->dev, "allocating %d metadata buffers for type %d\n",
count, i);
while (count--) {
- md_buf = kzalloc_obj(struct atomisp_metadata_buf,
- GFP_KERNEL);
+ md_buf = kzalloc_obj(struct atomisp_metadata_buf);
if (!md_buf)
goto error;
mycs->num_vf_pp = 1;
}
- mycs->vf_pp_binary = kzalloc_objs(struct ia_css_binary, mycs->num_vf_pp,
- GFP_KERNEL);
+ mycs->vf_pp_binary = kzalloc_objs(struct ia_css_binary, mycs->num_vf_pp);
if (!mycs->vf_pp_binary) {
err = -ENOMEM;
goto ERR;
/* allocate pipes */
curr_stream->num_pipes = num_pipes;
- curr_stream->pipes = kzalloc_objs(struct ia_css_pipe *, num_pipes,
- GFP_KERNEL);
+ curr_stream->pipes = kzalloc_objs(struct ia_css_pipe *, num_pipes);
if (!curr_stream->pipes) {
curr_stream->num_pipes = 0;
kfree(curr_stream);
sh_css_blob_info = NULL;
}
- fw_minibuffer = kzalloc_objs(struct fw_param, sh_css_num_binaries,
- GFP_KERNEL);
+ fw_minibuffer = kzalloc_objs(struct fw_param, sh_css_num_binaries);
if (!fw_minibuffer)
return -ENOMEM;
if (!params || !params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO])
goto err;
- dvs_config = kvzalloc_objs(struct ia_css_dvs_6axis_config, 1,
- GFP_KERNEL);
+ dvs_config = kvzalloc_objs(struct ia_css_dvs_6axis_config, 1);
if (!dvs_config)
goto err;
if (err_chk) {
master_num--;
- tsi148_device->flush_image = kmalloc_obj(*tsi148_device->flush_image,
- GFP_KERNEL);
+ tsi148_device->flush_image = kmalloc_obj(*tsi148_device->flush_image);
if (!tsi148_device->flush_image) {
retval = -ENOMEM;
goto err_master;
}
ib_dev->ibd_exclusive = true;
- ib_dev->ibd_plug = kzalloc_objs(*ib_dev->ibd_plug, nr_cpu_ids,
- GFP_KERNEL);
+ ib_dev->ibd_plug = kzalloc_objs(*ib_dev->ibd_plug, nr_cpu_ids);
if (!ib_dev->ibd_plug)
goto free_dev;
* the dest_node_acl and dest_se_deve pointers for the
* loop below.
*/
- tidh_new = kzalloc_obj(struct pr_transport_id_holder,
- GFP_KERNEL);
+ tidh_new = kzalloc_obj(struct pr_transport_id_holder);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
core_scsi3_lunacl_undepend_item(dest_se_deve);
static int qcomtee_open(struct tee_context *ctx)
{
- struct qcomtee_context_data *ctxdata __free(kfree) = kzalloc_obj(*ctxdata,
- GFP_KERNEL);
+ struct qcomtee_context_data *ctxdata __free(kfree) = kzalloc_obj(*ctxdata);
if (!ctxdata)
return -ENOMEM;
struct tee_shm *shm;
int err;
- struct qcomtee_mem_object *mem_object __free(kfree) = kzalloc_obj(*mem_object,
- GFP_KERNEL);
+ struct qcomtee_mem_object *mem_object __free(kfree) = kzalloc_obj(*mem_object);
if (!mem_object)
return -ENOMEM;
return -EINVAL;
if (arg.num_params) {
- params = kzalloc_objs(struct tee_param, arg.num_params,
- GFP_KERNEL);
+ params = kzalloc_objs(struct tee_param, arg.num_params);
if (!params)
return -ENOMEM;
uparams = uarg->params;
return -EINVAL;
if (arg.num_params) {
- params = kzalloc_objs(struct tee_param, arg.num_params,
- GFP_KERNEL);
+ params = kzalloc_objs(struct tee_param, arg.num_params);
if (!params)
return -ENOMEM;
uparams = uarg->params;
return -EINVAL;
if (arg.num_params) {
- params = kzalloc_objs(struct tee_param, arg.num_params,
- GFP_KERNEL);
+ params = kzalloc_objs(struct tee_param, arg.num_params);
if (!params)
return -ENOMEM;
uparams = uarg->params;
* This allocation may fail. CPU hotplug callbacks must check
* for a null pointer.
*/
- hfi_instances = kzalloc_objs(*hfi_instances, max_hfi_instances,
- GFP_KERNEL);
+ hfi_instances = kzalloc_objs(*hfi_instances, max_hfi_instances);
if (!hfi_instances)
return;
{
int ret;
- struct tt_thermal_zone *tt_zone __free(kfree) = kzalloc_obj(*tt_zone,
- GFP_KERNEL);
+ struct tt_thermal_zone *tt_zone __free(kfree) = kzalloc_obj(*tt_zone);
if (!tt_zone)
return -ENOMEM;
- struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work,
- GFP_KERNEL);
+ struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work);
if (!tt_work)
return -ENOMEM;
if (ret != 1)
return -EINVAL;
- struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work,
- GFP_KERNEL);
+ struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work);
if (!tt_work)
return -ENOMEM;
{
int id;
- struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work,
- GFP_KERNEL);
+ struct tt_work *tt_work __free(kfree) = kzalloc_obj(*tt_work);
if (!tt_work)
return -ENOMEM;
- struct tt_trip *tt_trip __free(kfree) = kzalloc_obj(*tt_trip,
- GFP_KERNEL);
+ struct tt_trip *tt_trip __free(kfree) = kzalloc_obj(*tt_trip);
if (!tt_trip)
return -ENOMEM;
if (!count)
return NULL;
- struct thermal_trip *tt __free(kfree) = kzalloc_objs(*tt, count,
- GFP_KERNEL);
+ struct thermal_trip *tt __free(kfree) = kzalloc_objs(*tt, count);
if (!tt)
return ERR_PTR(-ENOMEM);
}
/* initialize ports */
- sw->ports = kzalloc_objs(*sw->ports, sw->config.max_port_number + 1,
- GFP_KERNEL);
+ sw->ports = kzalloc_objs(*sw->ports, sw->config.max_port_number + 1);
if (!sw->ports) {
ret = -ENOMEM;
goto err_free_sw_ports;
int ret;
struct tty_driver *tty;
- goldfish_ttys = kzalloc_objs(*goldfish_ttys, goldfish_tty_line_count,
- GFP_KERNEL);
+ goldfish_ttys = kzalloc_objs(*goldfish_ttys, goldfish_tty_line_count);
if (goldfish_ttys == NULL) {
ret = -ENOMEM;
goto err_alloc_goldfish_ttys_failed;
struct resource *regs;
int ret, line;
- struct uart_8250_port *uart __free(kfree) = kzalloc_obj(*uart,
- GFP_KERNEL);
+ struct uart_8250_port *uart __free(kfree) = kzalloc_obj(*uart);
if (!uart)
return -ENOMEM;
{
int ret, i;
- struct uart_8250_port *uart __free(kfree) = kzalloc_obj(*uart,
- GFP_KERNEL);
+ struct uart_8250_port *uart __free(kfree) = kzalloc_obj(*uart);
if (!uart)
return -ENOMEM;
* Okay to malloc with GFP_KERNEL, we are not at
* interrupt context, and there are no locks held.
*/
- brd->channels[i] = kzalloc_obj(struct jsm_channel,
- GFP_KERNEL);
+ brd->channels[i] = kzalloc_obj(struct jsm_channel);
if (!brd->channels[i]) {
jsm_dbg(CORE, &brd->pci_dev,
"%s:%d Unable to allocate memory for channel struct\n",
if (uport->attr_group)
num_groups++;
- uport->tty_groups = kzalloc_objs(*uport->tty_groups, num_groups,
- GFP_KERNEL);
+ uport->tty_groups = kzalloc_objs(*uport->tty_groups, num_groups);
if (!uport->tty_groups)
return -ENOMEM;
if (!(flags & TTY_DRIVER_DEVPTS_MEM)) {
driver->ttys = kzalloc_objs(*driver->ttys, lines);
- driver->termios = kzalloc_objs(*driver->termios, lines,
- GFP_KERNEL);
+ driver->termios = kzalloc_objs(*driver->termios, lines);
if (!driver->ttys || !driver->termios) {
err = -ENOMEM;
goto err_free_all;
{
int error;
- struct input_handle __free(kfree) *handle = kzalloc_obj(*handle,
- GFP_KERNEL);
+ struct input_handle __free(kfree) *handle = kzalloc_obj(*handle);
if (!handle)
return -ENOMEM;
if (!hcd)
return NULL;
if (primary_hcd == NULL) {
- hcd->address0_mutex = kmalloc_obj(*hcd->address0_mutex,
- GFP_KERNEL);
+ hcd->address0_mutex = kmalloc_obj(*hcd->address0_mutex);
if (!hcd->address0_mutex) {
kfree(hcd);
dev_dbg(dev, "hcd address0 mutex alloc failed\n");
return NULL;
}
mutex_init(hcd->address0_mutex);
- hcd->bandwidth_mutex = kmalloc_obj(*hcd->bandwidth_mutex,
- GFP_KERNEL);
+ hcd->bandwidth_mutex = kmalloc_obj(*hcd->bandwidth_mutex);
if (!hcd->bandwidth_mutex) {
kfree(hcd->address0_mutex);
kfree(hcd);
goto fail;
/* allocate temporary function list */
- midi_function = kzalloc_objs(*midi_function, (MAX_PORTS * 4) + 11,
- GFP_KERNEL);
+ midi_function = kzalloc_objs(*midi_function, (MAX_PORTS * 4) + 11);
if (!midi_function) {
status = -ENOMEM;
goto fail;
return -ENODEV;
usb_ep->complete = complete;
- usb_ep->reqs = kzalloc_objs(*usb_ep->reqs, midi2->info.num_reqs,
- GFP_KERNEL);
+ usb_ep->reqs = kzalloc_objs(*usb_ep->reqs, midi2->info.num_reqs);
if (!usb_ep->reqs)
return -ENOMEM;
for (i = 0; i < midi2->info.num_reqs; i++) {
"ieps:%d eops:%d num_eps:%d\n",
num_ieps, num_oeps, bdc->num_eps);
/* allocate array of ep pointers */
- bdc->bdc_ep_array = kzalloc_objs(struct bdc_ep *, bdc->num_eps,
- GFP_KERNEL);
+ bdc->bdc_ep_array = kzalloc_objs(struct bdc_ep *, bdc->num_eps);
if (!bdc->bdc_ep_array)
goto fail;
goto err_alloc_frame;
}
- uhci->frame_cpu = kzalloc_objs(*uhci->frame_cpu, UHCI_NUMFRAMES,
- GFP_KERNEL);
+ uhci->frame_cpu = kzalloc_objs(*uhci->frame_cpu, UHCI_NUMFRAMES);
if (!uhci->frame_cpu)
goto err_alloc_frame_cpu;
if (utt->multi) {
tt_index = utt->hcpriv;
if (!tt_index) { /* Create the index array */
- tt_index = kzalloc_objs(*tt_index, utt->hub->maxchild,
- GFP_KERNEL);
+ tt_index = kzalloc_objs(*tt_index, utt->hub->maxchild);
if (!tt_index)
return ERR_PTR(-ENOMEM);
utt->hcpriv = tt_index;
return -EINVAL;
size = CHUNK_ALIGN(arg);
- vec = kzalloc_objs(struct mon_pgmap, size / CHUNK_SIZE,
- GFP_KERNEL);
+ vec = kzalloc_objs(struct mon_pgmap, size / CHUNK_SIZE);
if (vec == NULL) {
ret = -ENOMEM;
break;
/* Initialize LED timers */
if (mos7840_port->has_led) {
mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
- mos7840_port->led_dr = kmalloc_obj(*mos7840_port->led_dr,
- GFP_KERNEL);
+ mos7840_port->led_dr = kmalloc_obj(*mos7840_port->led_dr);
if (!mos7840_port->led_urb || !mos7840_port->led_dr) {
status = -ENOMEM;
goto error;
}
/* Allocate the connectors. Released in ucsi_unregister() */
- connector = kzalloc_objs(*connector, ucsi->cap.num_connectors + 1,
- GFP_KERNEL);
+ connector = kzalloc_objs(*connector, ucsi->cap.num_connectors + 1);
if (!connector) {
ret = -ENOMEM;
goto err_reset;
{
int id;
- status_attrs = kzalloc_objs(struct status_attr, vhci_num_controllers,
- GFP_KERNEL);
+ status_attrs = kzalloc_objs(struct status_attr, vhci_num_controllers);
if (status_attrs == NULL)
return -ENOMEM;
struct attribute **attrs;
int ret, i;
- attrs = kzalloc_objs(struct attribute *, (vhci_num_controllers + 5),
- GFP_KERNEL);
+ attrs = kzalloc_objs(struct attribute *, (vhci_num_controllers + 5));
if (attrs == NULL)
return -ENOMEM;
mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
ndev->vqs = kzalloc_objs(*ndev->vqs, max_vqs);
- ndev->event_cbs = kzalloc_objs(*ndev->event_cbs, max_vqs + 1,
- GFP_KERNEL);
+ ndev->event_cbs = kzalloc_objs(*ndev->event_cbs, max_vqs + 1);
if (!ndev->vqs || !ndev->event_cbs) {
err = -ENOMEM;
goto err_alloc;
if (!vdpasim->config)
goto err_iommu;
- vdpasim->vqs = kzalloc_objs(struct vdpasim_virtqueue, dev_attr->nvqs,
- GFP_KERNEL);
+ vdpasim->vqs = kzalloc_objs(struct vdpasim_virtqueue, dev_attr->nvqs);
if (!vdpasim->vqs)
goto err_iommu;
- vdpasim->iommu = kmalloc_objs(*vdpasim->iommu, vdpasim->dev_attr.nas,
- GFP_KERNEL);
+ vdpasim->iommu = kmalloc_objs(*vdpasim->iommu, vdpasim->dev_attr.nas);
if (!vdpasim->iommu)
goto err_iommu;
u8 num_regions;
int err;
- region_info = kzalloc_objs(struct pds_lm_dirty_region_info, max_regions,
- GFP_KERNEL);
+ region_info = kzalloc_objs(struct pds_lm_dirty_region_info, max_regions);
if (!region_info)
return;
ret = -ENOMEM;
goto err_free_ranges;
}
- priv->phys_vec = kzalloc_objs(*priv->phys_vec, get_dma_buf.nr_ranges,
- GFP_KERNEL);
+ priv->phys_vec = kzalloc_objs(*priv->phys_vec, get_dma_buf.nr_ranges);
if (!priv->phys_vec) {
ret = -ENOMEM;
goto err_free_priv;
if (!zcopy)
continue;
n->vqs[i].ubuf_info =
- kmalloc_objs(*n->vqs[i].ubuf_info, UIO_MAXIOV,
- GFP_KERNEL);
+ kmalloc_objs(*n->vqs[i].ubuf_info, UIO_MAXIOV);
if (!n->vqs[i].ubuf_info)
goto err;
}
unsigned int log_num)
{
if (!cmd->tvc_log)
- cmd->tvc_log = kmalloc_objs(*cmd->tvc_log, vq->dev->iov_limit,
- GFP_KERNEL);
+ cmd->tvc_log = kmalloc_objs(*cmd->tvc_log, vq->dev->iov_limit);
if (unlikely(!cmd->tvc_log)) {
vq_err(vq, "Failed to alloc tvc_log\n");
return -ENOMEM;
}
- svq->upages = kzalloc_objs(struct page *, VHOST_SCSI_PREALLOC_UPAGES,
- GFP_KERNEL);
+ svq->upages = kzalloc_objs(struct page *, VHOST_SCSI_PREALLOC_UPAGES);
if (!svq->upages)
goto out;
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
- vq->indirect = kmalloc_objs(*vq->indirect, UIO_MAXIOV,
- GFP_KERNEL);
+ vq->indirect = kmalloc_objs(*vq->indirect, UIO_MAXIOV);
vq->log = kmalloc_objs(*vq->log, dev->iov_limit);
vq->heads = kmalloc_objs(*vq->heads, dev->iov_limit);
vq->nheads = kmalloc_array(dev->iov_limit, sizeof(*vq->nheads),
static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
{
- struct ics5342_info *ics_info = kzalloc_obj(struct ics5342_info,
- GFP_KERNEL);
+ struct ics5342_info *ics_info = kzalloc_obj(struct ics5342_info);
struct dac_info *info = &ics_info->dac;
if (!ics_info)
return 0;
}
/* put videomode list to info structure */
- videomodes = kzalloc_objs(struct fb_videomode, videomode_num,
- GFP_KERNEL);
+ videomodes = kzalloc_objs(struct fb_videomode, videomode_num);
if (!videomodes)
return -ENOMEM;
num_managers = dss_feat_get_num_mgrs();
- managers = kzalloc_objs(struct omap_overlay_manager, num_managers,
- GFP_KERNEL);
+ managers = kzalloc_objs(struct omap_overlay_manager, num_managers);
BUG_ON(managers == NULL);
/* TODO: Help propose a standard fb.h ioctl to report mmap damage */
if (cmd == UFX_IOCTL_REPORT_DAMAGE) {
- struct dloarea *area __free(kfree) = kmalloc_obj(*area,
- GFP_KERNEL);
+ struct dloarea *area __free(kfree) = kmalloc_obj(*area);
if (!area)
return -ENOMEM;
static void dlfb_deferred_vfree(struct dlfb_data *dlfb, void *mem)
{
- struct dlfb_deferred_free *d = kmalloc_obj(struct dlfb_deferred_free,
- GFP_KERNEL);
+ struct dlfb_deferred_free *d = kmalloc_obj(struct dlfb_deferred_free);
if (!d)
return;
d->mem = mem;
mode++;
}
- par->vbe_modes = kzalloc_objs(struct vbe_mode_ib, par->vbe_modes_cnt,
- GFP_KERNEL);
+ par->vbe_modes = kzalloc_objs(struct vbe_mode_ib, par->vbe_modes_cnt);
if (!par->vbe_modes)
return -ENOMEM;
goto timingfail;
}
- disp->timings = kzalloc_objs(struct display_timing *, disp->num_timings,
- GFP_KERNEL);
+ disp->timings = kzalloc_objs(struct display_timing *, disp->num_timings);
if (!disp->timings) {
pr_err("%pOF: could not allocate timings array\n", np);
goto timingfail;
}
/* Prepare the vm_memory_region_batch */
- regions_info = kzalloc_flex(*regions_info, regions_op, nr_regions,
- GFP_KERNEL);
+ regions_info = kzalloc_flex(*regions_info, regions_op, nr_regions);
if (!regions_info) {
ret = -ENOMEM;
goto unmap_kernel_map;
max_nr_pages = mem_region.memory_size / NE_MIN_MEM_REGION_SIZE;
- ne_mem_region->pages = kzalloc_objs(*ne_mem_region->pages, max_nr_pages,
- GFP_KERNEL);
+ ne_mem_region->pages = kzalloc_objs(*ne_mem_region->pages, max_nr_pages);
if (!ne_mem_region->pages) {
rc = -ENOMEM;
vp_dev->msix_vectors = nvectors;
- vp_dev->msix_names = kmalloc_objs(*vp_dev->msix_names, nvectors,
- GFP_KERNEL);
+ vp_dev->msix_names = kmalloc_objs(*vp_dev->msix_names, nvectors);
if (!vp_dev->msix_names)
goto error;
vp_dev->msix_affinity_masks
- = kzalloc_objs(*vp_dev->msix_affinity_masks, nvectors,
- GFP_KERNEL);
+ = kzalloc_objs(*vp_dev->msix_affinity_masks, nvectors);
if (!vp_dev->msix_affinity_masks)
goto error;
for (i = 0; i < nvectors; ++i)
NULL == add->being_removed)
goto err;
if (xen_pv_domain()) {
- add->kmap_ops = kvmalloc_objs(add->kmap_ops[0], count,
- GFP_KERNEL);
- add->kunmap_ops = kvmalloc_objs(add->kunmap_ops[0], count,
- GFP_KERNEL);
+ add->kmap_ops = kvmalloc_objs(add->kmap_ops[0], count);
+ add->kunmap_ops = kvmalloc_objs(add->kunmap_ops[0], count);
if (NULL == add->kmap_ops || NULL == add->kunmap_ops)
goto err;
}
*/
max_nr_glist_frames = max_nr_grefs / RPP;
- gnttab_list = kmalloc_objs(grant_ref_t *, max_nr_glist_frames,
- GFP_KERNEL);
+ gnttab_list = kmalloc_objs(grant_ref_t *, max_nr_glist_frames);
if (gnttab_list == NULL)
return -ENOMEM;
unsigned int i, ok;
int ret = 0;
- dst_cx_states = kzalloc_objs(struct xen_processor_cx, _pr->power.count,
- GFP_KERNEL);
+ dst_cx_states = kzalloc_objs(struct xen_processor_cx, _pr->power.count);
if (!dst_cx_states)
return -ENOMEM;
return -ENOMEM;
}
- acpi_psd = kzalloc_objs(struct acpi_psd_package, nr_acpi_bits,
- GFP_KERNEL);
+ acpi_psd = kzalloc_objs(struct acpi_psd_package, nr_acpi_bits);
if (!acpi_psd) {
bitmap_free(acpi_id_present);
bitmap_free(acpi_id_cst_present);
}
/* free of (sgl) in fast_flush_area() */
- pending_req->sgl = kmalloc_objs(struct scatterlist, nr_segments,
- GFP_KERNEL);
+ pending_req->sgl = kmalloc_objs(struct scatterlist, nr_segments);
if (!pending_req->sgl)
return -ENOMEM;
ctx->ring_folios = ctx->internal_folios;
if (nr_pages > AIO_RING_PAGES) {
- ctx->ring_folios = kzalloc_objs(struct folio *, nr_pages,
- GFP_KERNEL);
+ ctx->ring_folios = kzalloc_objs(struct folio *, nr_pages);
if (!ctx->ring_folios) {
put_aio_ring_file(ctx);
return -ENOMEM;
/*
* Allocate a structure for each thread.
*/
- info->thread = kzalloc_flex(*info->thread, notes, info->thread_notes,
- GFP_KERNEL);
+ info->thread = kzalloc_flex(*info->thread, notes, info->thread_notes);
if (unlikely(!info->thread))
return 0;
ret = get_anon_bdev(&pending_snapshot->anon_dev);
if (ret < 0)
goto free_pending;
- pending_snapshot->root_item = kzalloc_obj(struct btrfs_root_item,
- GFP_KERNEL);
+ pending_snapshot->root_item = kzalloc_obj(struct btrfs_root_item);
pending_snapshot->path = btrfs_alloc_path();
if (!pending_snapshot->root_item || !pending_snapshot->path) {
ret = -ENOMEM;
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
{
- struct btrfs_fs_info *fs_info = kzalloc_obj(struct btrfs_fs_info,
- GFP_KERNEL);
+ struct btrfs_fs_info *fs_info = kzalloc_obj(struct btrfs_fs_info);
if (!fs_info)
return fs_info;
goto out;
}
- zones = kvzalloc_objs(struct blk_zone, BTRFS_REPORT_NR_ZONES,
- GFP_KERNEL);
+ zones = kvzalloc_objs(struct blk_zone, BTRFS_REPORT_NR_ZONES);
if (!zones) {
ret = -ENOMEM;
goto out;
if (!cachefiles_in_ondemand_mode(volume->cache))
return 0;
- object->ondemand = kzalloc_obj(struct cachefiles_ondemand_info,
- GFP_KERNEL);
+ object->ondemand = kzalloc_obj(struct cachefiles_ondemand_info);
if (!object->ondemand)
return -ENOMEM;
gate_vma = get_gate_vma(mm);
cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0);
- cprm->vma_meta = kvmalloc_objs(*cprm->vma_meta, cprm->vma_count,
- GFP_KERNEL);
+ cprm->vma_meta = kvmalloc_objs(*cprm->vma_meta, cprm->vma_count);
if (!cprm->vma_meta) {
mmap_write_unlock(mm);
return false;
}
vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter);
vi->xattr_shared_count = ih->h_shared_count;
- vi->xattr_shared_xattrs = kmalloc_objs(uint, vi->xattr_shared_count,
- GFP_KERNEL);
+ vi->xattr_shared_xattrs = kmalloc_objs(uint, vi->xattr_shared_count);
if (!vi->xattr_shared_xattrs) {
erofs_put_metabuf(&buf);
ret = -ENOMEM;
}
sbi->map_sectors = ((need_map_size - 1) >>
(sb->s_blocksize_bits)) + 1;
- sbi->vol_amap = kvmalloc_objs(struct buffer_head *, sbi->map_sectors,
- GFP_KERNEL);
+ sbi->vol_amap = kvmalloc_objs(struct buffer_head *, sbi->map_sectors);
if (!sbi->vol_amap)
return -ENOMEM;
}
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
- sbi->s_group_desc = kvmalloc_objs(struct buffer_head *, db_count,
- GFP_KERNEL);
+ sbi->s_group_desc = kvmalloc_objs(struct buffer_head *, db_count);
if (sbi->s_group_desc == NULL) {
ret = -ENOMEM;
ext2_msg(sb, KERN_ERR, "error: not enough memory");
}
oi->of_blocks = inode->i_size >> sb->s_blocksize_bits;
oi->of_csum_seed = EXT4_I(inode)->i_csum_seed;
- oi->of_binfo = kvmalloc_objs(struct ext4_orphan_block, oi->of_blocks,
- GFP_KERNEL);
+ oi->of_binfo = kvmalloc_objs(struct ext4_orphan_block, oi->of_blocks);
if (!oi->of_binfo) {
ret = -ENOMEM;
goto out_put;
if (f_handle.handle_bytes > MAX_HANDLE_SZ)
return -EINVAL;
- handle = kzalloc_flex(*handle, f_handle, f_handle.handle_bytes,
- GFP_KERNEL);
+ handle = kzalloc_flex(*handle, f_handle, f_handle.handle_bytes);
if (!handle)
return -ENOMEM;
if (retval)
goto out_path;
- handle = kmalloc_flex(*handle, f_handle, f_handle.handle_bytes,
- GFP_KERNEL);
+ handle = kmalloc_flex(*handle, f_handle, f_handle.handle_bytes);
if (!handle) {
retval = -ENOMEM;
goto out_path;
/* Are we called twice? */
WARN_ON(journal->j_fc_wbuf != NULL);
- journal->j_fc_wbuf = kmalloc_objs(struct buffer_head *, num_fc_blks,
- GFP_KERNEL);
+ journal->j_fc_wbuf = kmalloc_objs(struct buffer_head *, num_fc_blks);
if (!journal->j_fc_wbuf)
return -ENOMEM;
int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri,
uint32_t ofs)
{
- struct jffs2_sum_inode_mem *temp = kmalloc_obj(struct jffs2_sum_inode_mem,
- GFP_KERNEL);
+ struct jffs2_sum_inode_mem *temp = kmalloc_obj(struct jffs2_sum_inode_mem);
if (!temp)
return -ENOMEM;
switch (je16_to_cpu(node->u.nodetype)) {
case JFFS2_NODETYPE_INODE: {
struct jffs2_sum_inode_mem *temp =
- kmalloc_obj(struct jffs2_sum_inode_mem,
- GFP_KERNEL);
+ kmalloc_obj(struct jffs2_sum_inode_mem);
if (!temp)
goto no_mem;
#ifdef CONFIG_JFFS2_FS_XATTR
case JFFS2_NODETYPE_XATTR: {
struct jffs2_sum_xattr_mem *temp;
- temp = kmalloc_obj(struct jffs2_sum_xattr_mem,
- GFP_KERNEL);
+ temp = kmalloc_obj(struct jffs2_sum_xattr_mem);
if (!temp)
goto no_mem;
}
case JFFS2_NODETYPE_XREF: {
struct jffs2_sum_xref_mem *temp;
- temp = kmalloc_obj(struct jffs2_sum_xref_mem,
- GFP_KERNEL);
+ temp = kmalloc_obj(struct jffs2_sum_xref_mem);
if (!temp)
goto no_mem;
temp->nodetype = node->r.nodetype;
BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
- xref_tmphash = kzalloc_objs(struct jffs2_xattr_ref *, XREF_TMPHASH_SIZE,
- GFP_KERNEL);
+ xref_tmphash = kzalloc_objs(struct jffs2_xattr_ref *, XREF_TMPHASH_SIZE);
if (!xref_tmphash)
return -ENOMEM;
cache->c_max_entries = bucket_count << 4;
INIT_LIST_HEAD(&cache->c_list);
spin_lock_init(&cache->c_list_lock);
- cache->c_hash = kmalloc_objs(struct hlist_bl_head, bucket_count,
- GFP_KERNEL);
+ cache->c_hash = kmalloc_objs(struct hlist_bl_head, bucket_count);
if (!cache->c_hash) {
kfree(cache);
goto err_out;
if (atomic_inc_return(&nn->pending_async_copies) >
(int)rqstp->rq_pool->sp_nrthreads)
goto out_dec_async_copy_err;
- async_copy->cp_src = kmalloc_obj(*async_copy->cp_src,
- GFP_KERNEL);
+ async_copy->cp_src = kmalloc_obj(*async_copy->cp_src);
if (!async_copy->cp_src)
goto out_dec_async_copy_err;
if (!nfs4_init_copy_state(nn, copy))
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int i;
- nn->conf_id_hashtbl = kmalloc_objs(struct list_head, CLIENT_HASH_SIZE,
- GFP_KERNEL);
+ nn->conf_id_hashtbl = kmalloc_objs(struct list_head, CLIENT_HASH_SIZE);
if (!nn->conf_id_hashtbl)
goto err;
- nn->unconf_id_hashtbl = kmalloc_objs(struct list_head, CLIENT_HASH_SIZE,
- GFP_KERNEL);
+ nn->unconf_id_hashtbl = kmalloc_objs(struct list_head, CLIENT_HASH_SIZE);
if (!nn->unconf_id_hashtbl)
goto err_unconf_id;
nn->sessionid_hashtbl = kmalloc_objs(struct list_head,
if (reg->hr_tmp_block == NULL)
return -ENOMEM;
- reg->hr_slots = kzalloc_objs(struct o2hb_disk_slot, reg->hr_blocks,
- GFP_KERNEL);
+ reg->hr_slots = kzalloc_objs(struct o2hb_disk_slot, reg->hr_blocks);
if (reg->hr_slots == NULL)
return -ENOMEM;
"at %u blocks per page\n",
reg->hr_num_pages, reg->hr_blocks, spp);
- reg->hr_slot_data = kzalloc_objs(struct page *, reg->hr_num_pages,
- GFP_KERNEL);
+ reg->hr_slot_data = kzalloc_objs(struct page *, reg->hr_num_pages);
if (!reg->hr_slot_data)
return -ENOMEM;
if (osb->replay_map)
return 0;
- replay_map = kzalloc_flex(*replay_map, rm_replay_slots, osb->max_slots,
- GFP_KERNEL);
+ replay_map = kzalloc_flex(*replay_map, rm_replay_slots, osb->max_slots);
if (!replay_map) {
mlog_errno(-ENOMEM);
return -ENOMEM;
trace_ocfs2_map_slot_buffers(bytes, si->si_blocks);
- si->si_bh = kzalloc_objs(struct buffer_head *, si->si_blocks,
- GFP_KERNEL);
+ si->si_bh = kzalloc_objs(struct buffer_head *, si->si_blocks);
if (!si->si_bh) {
status = -ENOMEM;
mlog_errno(status);
goto out_free_bufmap;
bufmap->desc_array =
- kzalloc_objs(struct orangefs_bufmap_desc, bufmap->desc_count,
- GFP_KERNEL);
+ kzalloc_objs(struct orangefs_bufmap_desc, bufmap->desc_count);
if (!bufmap->desc_array)
goto out_free_index_array;
*/
struct persistent_ram_zone *tmp_prz, *prz_next;
- tmp_prz = kzalloc_obj(struct persistent_ram_zone,
- GFP_KERNEL);
+ tmp_prz = kzalloc_obj(struct persistent_ram_zone);
if (!tmp_prz)
return -ENOMEM;
prz = tmp_prz;
todo -= walk->len;
len = min(todo, POLLFD_PER_PAGE);
- walk = walk->next = kmalloc_flex(*walk, entries, len,
- GFP_KERNEL);
+ walk = walk->next = kmalloc_flex(*walk, entries, len);
if (!walk) {
err = -ENOMEM;
goto out_fds;
cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
*num_of_nodes, le32_to_cpu(rsp->DFSFlags));
- *target_nodes = kzalloc_objs(struct dfs_info3_param, *num_of_nodes,
- GFP_KERNEL);
+ *target_nodes = kzalloc_objs(struct dfs_info3_param, *num_of_nodes);
if (*target_nodes == NULL) {
rc = -ENOMEM;
goto parse_DFS_referrals_exit;
if (!mp)
return -ENOMEM;
#ifdef DEBUG
- mp->m_errortag = kzalloc_objs(*mp->m_errortag, XFS_ERRTAG_MAX,
- GFP_KERNEL);
+ mp->m_errortag = kzalloc_objs(*mp->m_errortag, XFS_ERRTAG_MAX);
if (!mp->m_errortag) {
kfree(mp);
return -ENOMEM;
data = kzalloc_obj(*data);
if (!data)
return NULL;
- data->iter.recs = kzalloc_objs(*data->iter.recs, XFS_ZONE_GC_RECS,
- GFP_KERNEL);
+ data->iter.recs = kzalloc_objs(*data->iter.recs, XFS_ZONE_GC_RECS);
if (!data->iter.recs)
goto out_free_data;
struct block_device *bdev = zd->sb->s_bdev;
int ret;
- zd->zones = kvzalloc_objs(struct blk_zone, bdev_nr_zones(bdev),
- GFP_KERNEL);
+ zd->zones = kvzalloc_objs(struct blk_zone, bdev_nr_zones(bdev));
if (!zd->zones)
return -ENOMEM;
if (!zgroup->g_nr_zones)
return 0;
- zgroup->g_zones = kvzalloc_objs(struct zonefs_zone, zgroup->g_nr_zones,
- GFP_KERNEL);
+ zgroup->g_zones = kvzalloc_objs(struct zonefs_zone, zgroup->g_nr_zones);
if (!zgroup->g_zones)
return -ENOMEM;
up->forward_threshold = sk->sk_rcvbuf >> 2;
set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
- up->udp_prod_queue = kzalloc_objs(*up->udp_prod_queue, nr_node_ids,
- GFP_KERNEL);
+ up->udp_prod_queue = kzalloc_objs(*up->udp_prod_queue, nr_node_ids);
if (!up->udp_prod_queue)
return -ENOMEM;
for (int i = 0; i < nr_node_ids; i++)
struct bpf_prog_pack *pack;
int err;
- pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT),
- GFP_KERNEL);
+ pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT));
if (!pack)
return NULL;
pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
if (!node)
goto err_nomem;
if (info) {
- node->loaded_info = kzalloc_objs(struct gcov_info *, 1,
- GFP_KERNEL);
+ node->loaded_info = kzalloc_objs(struct gcov_info *, 1);
if (!node->loaded_info)
goto err_nomem;
}
if (!dup->filename)
goto err_free;
- dup->functions = kzalloc_objs(struct gcov_fn_info *, info->n_functions,
- GFP_KERNEL);
+ dup->functions = kzalloc_objs(struct gcov_fn_info *, info->n_functions);
if (!dup->functions)
goto err_free;
/* Initialize the statistics so that each run gets its own numbers. */
if (nwriters_stress) {
lock_is_write_held = false;
- cxt.lwsa = kmalloc_objs(*cxt.lwsa, cxt.nrealwriters_stress,
- GFP_KERNEL);
+ cxt.lwsa = kmalloc_objs(*cxt.lwsa, cxt.nrealwriters_stress);
if (cxt.lwsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
#endif
possible_cpus = num_possible_cpus();
- padata_works = kmalloc_objs(struct padata_work, possible_cpus,
- GFP_KERNEL);
+ padata_works = kmalloc_objs(struct padata_work, possible_cpus);
if (!padata_works)
goto remove_dead_state;
return -ENOMEM;
mk->mp->grp.name = "parameters";
/* NULL-terminated attribute array. */
- mk->mp->grp.attrs = kzalloc_obj(mk->mp->grp.attrs[0],
- GFP_KERNEL);
+ mk->mp->grp.attrs = kzalloc_obj(mk->mp->grp.attrs[0]);
/* Caller will cleanup via free_module_param_attrs */
if (!mk->mp->grp.attrs)
return -ENOMEM;
}
for (i = 0; i < kfree_alloc_num; i++) {
- alloc_ptr = kzalloc_objs(struct kfree_obj, kfree_mult,
- GFP_KERNEL);
+ alloc_ptr = kzalloc_objs(struct kfree_obj, kfree_mult);
if (!alloc_ptr)
return -ENOMEM;
schedule_timeout_uninterruptible(1);
writer_tasks = kzalloc_objs(writer_tasks[0], nrealwriters);
writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL);
- writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters,
- GFP_KERNEL);
+ writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters);
writer_done = kzalloc_objs(writer_done[0], nrealwriters);
if (gp_async) {
if (gp_async_max <= 0) {
if (WARN_ON(nocbs_toggle < 0))
nocbs_toggle = HZ;
if (nrealnocbers > 0) {
- nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers,
- GFP_KERNEL);
+ nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers);
if (nocb_tasks == NULL) {
TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
if (ret < 0)
goto err_free_ei;
- sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids,
- GFP_KERNEL);
+ sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids);
if (!sch->global_dsqs) {
ret = -ENOMEM;
goto err_free_hash;
if (!iter)
return ERR_PTR(-ENOMEM);
- iter->buffer_iter = kzalloc_objs(*iter->buffer_iter, nr_cpu_ids,
- GFP_KERNEL);
+ iter->buffer_iter = kzalloc_objs(*iter->buffer_iter, nr_cpu_ids);
if (!iter->buffer_iter)
goto release;
mod_addr_comp, NULL, NULL);
if (IS_ENABLED(CONFIG_MODULES)) {
- module_delta = kzalloc_flex(*module_delta, delta, nr_entries,
- GFP_KERNEL);
+ module_delta = kzalloc_flex(*module_delta, delta, nr_entries);
if (!module_delta) {
pr_info("module_delta allocation failed. Not able to decode module address.");
goto reset;
void __init early_trace_init(void)
{
if (tracepoint_printk) {
- tracepoint_print_iter = kzalloc_obj(*tracepoint_print_iter,
- GFP_KERNEL);
+ tracepoint_print_iter = kzalloc_obj(*tracepoint_print_iter);
if (MEM_FAIL(!tracepoint_print_iter,
"Failed to allocate trace iterator\n"))
tracepoint_printk = 0;
(HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH)))
continue;
if (!stats) {
- stats = kzalloc_objs(*stats, hist_data->n_vals,
- GFP_KERNEL);
+ stats = kzalloc_objs(*stats, hist_data->n_vals);
if (!stats) {
n_entries = -ENOMEM;
goto out;
if (!earg)
return -ENOMEM;
earg->size = 2 * tp->nr_args + 1;
- earg->code = kzalloc_objs(struct fetch_insn, earg->size,
- GFP_KERNEL);
+ earg->code = kzalloc_objs(struct fetch_insn, earg->size);
if (!earg->code) {
kfree(earg);
return -ENOMEM;
return -EINVAL;
if (!info->cache) {
- info->cache = kzalloc_flex(*cache, entries, UNWIND_MAX_ENTRIES,
- GFP_KERNEL);
+ info->cache = kzalloc_flex(*cache, entries, UNWIND_MAX_ENTRIES);
if (!info->cache)
return -ENOMEM;
}
node = parent;
/* Create a new node to collapse into */
- new_n0 = kzalloc_obj(struct assoc_array_node,
- GFP_KERNEL);
+ new_n0 = kzalloc_obj(struct assoc_array_node);
if (!new_n0)
goto enomem;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
/* Parse attribute filters */
if (filters) {
filter_count = kunit_get_filter_count(filters);
- parsed_filters = kzalloc_objs(*parsed_filters, filter_count,
- GFP_KERNEL);
+ parsed_filters = kzalloc_objs(*parsed_filters, filter_count);
if (!parsed_filters) {
*err = -ENOMEM;
goto free_parsed_glob;
int i;
const int n_stress_kthread = cpumask_weight(cpu_online_mask);
struct stress_kthread skt = { 0 };
- struct stress_kthread *sktp = kzalloc_objs(*sktp, n_stress_kthread,
- GFP_KERNEL);
+ struct stress_kthread *sktp = kzalloc_objs(*sktp, n_stress_kthread);
KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "Memory allocation failure");
for (i = 0; i < n_stress_kthread; i++) {
static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc(
struct damon_region *region)
{
- struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region,
- GFP_KERNEL);
+ struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region);
if (!sysfs_region)
return NULL;
static struct damon_sysfs_scheme_regions *
damon_sysfs_scheme_regions_alloc(void)
{
- struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions,
- GFP_KERNEL);
+ struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions);
if (!regions)
return NULL;
enum damos_wmark_metric metric, unsigned long interval_us,
unsigned long high, unsigned long mid, unsigned long low)
{
- struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks,
- GFP_KERNEL);
+ struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks);
if (!watermarks)
return NULL;
static
struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
{
- struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern,
- GFP_KERNEL);
+ struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern);
if (!access_pattern)
return NULL;
struct damos_migrate_dests *dests = &scheme->migrate_dests;
int i;
- dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr,
- GFP_KERNEL);
+ dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr);
if (!dests->node_id_arr)
return -ENOMEM;
- dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr,
- GFP_KERNEL);
+ dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr);
if (!dests->weight_arr)
/* ->node_id_arr will be freed by scheme destruction */
return -ENOMEM;
unsigned long sample_us, unsigned long aggr_us,
unsigned long update_us)
{
- struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals,
- GFP_KERNEL);
+ struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals);
if (!intervals)
return NULL;
{
size_t i;
- dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests,
- GFP_KERNEL);
+ dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests);
if (!dests->node_id_arr)
return -ENOMEM;
- dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests,
- GFP_KERNEL);
+ dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests);
if (!dests->weight_arr) {
kfree(dests->node_id_arr);
dests->node_id_arr = NULL;
use_target_nid = dests->nr_dests == 0;
nr_dests = use_target_nid ? 1 : dests->nr_dests;
priv.scheme = s;
- priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests,
- GFP_KERNEL);
+ priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests);
if (!priv.migration_lists)
return 0;
* Allocate stable and unstable together:
* MAXSMP NODES_SHIFT 10 will use 16kB.
*/
- buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids,
- GFP_KERNEL);
+ buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids);
/* Let us assume that RB_ROOT is NULL is zero */
if (!buf)
err = -ENOMEM;
panic("%s() failed to register memory tier subsystem\n", __func__);
#ifdef CONFIG_MIGRATION
- node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids,
- GFP_KERNEL);
+ node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids);
WARN_ON(!node_demotion);
#endif
if (!new_bw)
return -ENOMEM;
- new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids,
- GFP_KERNEL);
+ new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state) {
kfree(new_bw);
return -ENOMEM;
kstrtou8(buf, 0, &weight) || weight == 0)
return -EINVAL;
- new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids,
- GFP_KERNEL);
+ new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state)
return -ENOMEM;
if (kstrtobool(buf, &input))
return -EINVAL;
- new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids,
- GFP_KERNEL);
+ new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state)
return -ENOMEM;
for (i = 0; i < nr_node_ids; i++)
* know that mm->notifier_subscriptions can't change while we
* hold the write side of the mmap_lock.
*/
- subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions,
- GFP_KERNEL);
+ subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions);
if (!subscriptions)
return -ENOMEM;
spin_lock_init(&cluster_info[i].lock);
if (!(si->flags & SWP_SOLIDSTATE)) {
- si->global_cluster = kmalloc_obj(*si->global_cluster,
- GFP_KERNEL);
+ si->global_cluster = kmalloc_obj(*si->global_cluster);
if (!si->global_cluster)
goto err;
for (i = 0; i < SWAP_NR_ORDERS; i++)
(chan->mode == L2CAP_MODE_ERTM ||
chan->mode == L2CAP_MODE_LE_FLOWCTL ||
chan->mode == L2CAP_MODE_EXT_FLOWCTL)) {
- struct l2cap_rx_busy *rx_busy = kmalloc_obj(*rx_busy,
- GFP_KERNEL);
+ struct l2cap_rx_busy *rx_busy = kmalloc_obj(*rx_busy);
if (!rx_busy) {
err = -ENOMEM;
goto done;
static int can_pernet_init(struct net *net)
{
spin_lock_init(&net->can.rcvlists_lock);
- net->can.rx_alldev_list = kzalloc_obj(*net->can.rx_alldev_list,
- GFP_KERNEL);
+ net->can.rx_alldev_list = kzalloc_obj(*net->can.rx_alldev_list);
if (!net->can.rx_alldev_list)
goto out;
net->can.pkg_stats = kzalloc_obj(*net->can.pkg_stats);
if (!net->can.pkg_stats)
goto out_free_rx_alldev_list;
- net->can.rcv_lists_stats = kzalloc_obj(*net->can.rcv_lists_stats,
- GFP_KERNEL);
+ net->can.rcv_lists_stats = kzalloc_obj(*net->can.rcv_lists_stats);
if (!net->can.rcv_lists_stats)
goto out_free_pkg_stats;
int i;
/* build initial monmap */
- monc->monmap = kzalloc_flex(*monc->monmap, mon_inst, num_mon,
- GFP_KERNEL);
+ monc->monmap = kzalloc_flex(*monc->monmap, mon_inst, num_mon);
if (!monc->monmap)
return -ENOMEM;
monc->monmap->num_mon = num_mon;
int err;
int rc;
- dev->offload_xstats_l3 = kzalloc_obj(*dev->offload_xstats_l3,
- GFP_KERNEL);
+ dev->offload_xstats_l3 = kzalloc_obj(*dev->offload_xstats_l3);
if (!dev->offload_xstats_l3)
return -ENOMEM;
struct net_dm_hw_entries *hw_entries;
unsigned long flags;
- hw_entries = kzalloc_flex(*hw_entries, entries, dm_hit_limit,
- GFP_KERNEL);
+ hw_entries = kzalloc_flex(*hw_entries, entries, dm_hit_limit);
if (!hw_entries) {
/* If the memory allocation failed, we try to perform another
* allocation in 1/10 second. Otherwise, the probe function
struct flow_offload_action *fl_action;
int i;
- fl_action = kzalloc_flex(*fl_action, action.entries, num_actions,
- GFP_KERNEL);
+ fl_action = kzalloc_flex(*fl_action, action.entries, num_actions);
if (!fl_action)
return NULL;
if (!data->capability.supported_caps)
return 0;
- data->snapshots = kzalloc_objs(*data->snapshots, PHY_MSE_CHANNEL_COUNT,
- GFP_KERNEL);
+ data->snapshots = kzalloc_objs(*data->snapshots, PHY_MSE_CHANNEL_COUNT);
if (!data->snapshots)
return -ENOMEM;
net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
if (!net->mib.icmp_statistics)
goto err_icmp_mib;
- net->mib.icmpmsg_statistics = kzalloc_obj(struct icmpmsg_mib,
- GFP_KERNEL);
+ net->mib.icmpmsg_statistics = kzalloc_obj(struct icmpmsg_mib);
if (!net->mib.icmpmsg_statistics)
goto err_icmpmsg_mib;
static struct hlist_head *fib_info_hash_alloc(unsigned int hash_bits)
{
/* The second half is used for prefsrc */
- return kvzalloc_objs(struct hlist_head, (1 << hash_bits) * 2,
- GFP_KERNEL);
+ return kvzalloc_objs(struct hlist_head, (1 << hash_bits) * 2);
}
static void fib_info_hash_free(struct hlist_head *head)
int i;
info->type = NH_NOTIFIER_INFO_TYPE_GRP;
- info->nh_grp = kzalloc_flex(*info->nh_grp, nh_entries, num_nh,
- GFP_KERNEL);
+ info->nh_grp = kzalloc_flex(*info->nh_grp, nh_entries, num_nh);
if (!info->nh_grp)
return -ENOMEM;
}
- idev->stats.icmpv6dev = kzalloc_obj(struct icmpv6_mib_device,
- GFP_KERNEL);
+ idev->stats.icmpv6dev = kzalloc_obj(struct icmpv6_mib_device);
if (!idev->stats.icmpv6dev)
goto err_icmp;
idev->stats.icmpv6msgdev = kzalloc_obj(struct icmpv6msg_mib_device,
net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib);
if (!net->mib.icmpv6_statistics)
goto err_icmp_mib;
- net->mib.icmpv6msg_statistics = kzalloc_obj(struct icmpv6msg_mib,
- GFP_KERNEL);
+ net->mib.icmpv6msg_statistics = kzalloc_obj(struct icmpv6msg_mib);
if (!net->mib.icmpv6msg_statistics)
goto err_icmpmsg_mib;
return 0;
spin_lock_init(&net->ipv6.fib_table_hash_lock);
- net->ipv6.fib6_main_tbl = kzalloc_obj(*net->ipv6.fib6_main_tbl,
- GFP_KERNEL);
+ net->ipv6.fib6_main_tbl = kzalloc_obj(*net->ipv6.fib6_main_tbl);
if (!net->ipv6.fib6_main_tbl)
goto out_fib_table_hash;
INIT_HLIST_HEAD(&net->ipv6.fib6_main_tbl->tb6_gc_hlist);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- net->ipv6.fib6_local_tbl = kzalloc_obj(*net->ipv6.fib6_local_tbl,
- GFP_KERNEL);
+ net->ipv6.fib6_local_tbl = kzalloc_obj(*net->ipv6.fib6_local_tbl);
if (!net->ipv6.fib6_local_tbl)
goto out_fib6_main_tbl;
net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL;
if (WARN_ON(res))
return res;
- funcs = kzalloc_objs(*funcs, sdata->local->hw.max_nan_de_entries + 1,
- GFP_KERNEL);
+ funcs = kzalloc_objs(*funcs, sdata->local->hw.max_nan_de_entries + 1);
if (!funcs)
return -ENOMEM;
*/
tab_array_size = array_size(ip_vs_conn_tab_size,
sizeof(*ip_vs_conn_tab));
- ip_vs_conn_tab = kvmalloc_objs(*ip_vs_conn_tab, ip_vs_conn_tab_size,
- GFP_KERNEL);
+ ip_vs_conn_tab = kvmalloc_objs(*ip_vs_conn_tab, ip_vs_conn_tab_size);
if (!ip_vs_conn_tab)
return -ENOMEM;
if (!s)
return -ENOMEM;
- s->lookup = kzalloc_objs(struct ip_vs_mh_lookup, IP_VS_MH_TAB_SIZE,
- GFP_KERNEL);
+ s->lookup = kzalloc_objs(struct ip_vs_mh_lookup, IP_VS_MH_TAB_SIZE);
if (!s->lookup) {
kfree(s);
return -ENOMEM;
struct nf_conntrack_expect_policy *policy;
int i, ret = 0;
- new_policy = kmalloc_objs(*new_policy, helper->expect_class_max + 1,
- GFP_KERNEL);
+ new_policy = kmalloc_objs(*new_policy, helper->expect_class_max + 1);
if (!new_policy)
return -ENOMEM;
{
struct netlink_policy_dump_state *state;
- state = kzalloc_flex(*state, policies, INITIAL_POLICIES_ALLOC,
- GFP_KERNEL);
+ state = kzalloc_flex(*state, policies, INITIAL_POLICIES_ALLOC);
if (!state)
return ERR_PTR(-ENOMEM);
state->n_alloc = INITIAL_POLICIES_ALLOC;
{
int i, err;
- ovs_net->ct_limit_info = kmalloc_obj(*ovs_net->ct_limit_info,
- GFP_KERNEL);
+ ovs_net->ct_limit_info = kmalloc_obj(*ovs_net->ct_limit_info);
if (!ovs_net->ct_limit_info)
return -ENOMEM;
ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT;
ovs_net->ct_limit_info->limits =
- kmalloc_objs(struct hlist_head, CT_LIMIT_HASH_BUCKETS,
- GFP_KERNEL);
+ kmalloc_objs(struct hlist_head, CT_LIMIT_HASH_BUCKETS);
if (!ovs_net->ct_limit_info->limits) {
kfree(ovs_net->ct_limit_info);
return -ENOMEM;
{
int i;
- dp->ports = kmalloc_objs(struct hlist_head, DP_VPORT_HASH_BUCKETS,
- GFP_KERNEL);
+ dp->ports = kmalloc_objs(struct hlist_head, DP_VPORT_HASH_BUCKETS);
if (!dp->ports)
return -ENOMEM;
*/
int ovs_vport_init(void)
{
- dev_table = kzalloc_objs(struct hlist_head, VPORT_HASH_BUCKETS,
- GFP_KERNEL);
+ dev_table = kzalloc_objs(struct hlist_head, VPORT_HASH_BUCKETS);
if (!dev_table)
return -ENOMEM;
/* legacy PACKET_FANOUT_MAX */
args->max_num_members = 256;
err = -ENOMEM;
- match = kvzalloc_flex(*match, arr, args->max_num_members,
- GFP_KERNEL);
+ match = kvzalloc_flex(*match, arr, args->max_num_members);
if (!match)
goto out;
write_pnet(&match->net, sock_net(sk));
rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
- rds_ibdev->vector_load = kzalloc_objs(int, device->num_comp_vectors,
- GFP_KERNEL);
+ rds_ibdev->vector_load = kzalloc_objs(int, device->num_comp_vectors);
if (!rds_ibdev->vector_load) {
pr_err("RDS/IB: %s failed to allocate vector memory\n",
__func__);
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
- rm->atomic.op_notifier = kmalloc_obj(*rm->atomic.op_notifier,
- GFP_KERNEL);
+ rm->atomic.op_notifier = kmalloc_obj(*rm->atomic.op_notifier);
if (!rm->atomic.op_notifier) {
ret = -ENOMEM;
goto err;
* This reference might be taken later from tcf_exts_get_net().
*/
exts->net = net;
- exts->actions = kzalloc_objs(struct tc_action *, TCA_ACT_MAX_PRIO,
- GFP_KERNEL);
+ exts->actions = kzalloc_objs(struct tc_action *, TCA_ACT_MAX_PRIO);
if (!exts->actions)
return -ENOMEM;
#endif
#ifdef CONFIG_NET_CLS_ACT
pr_info(" Actions configured\n");
#endif
- tc_u_common_hash = kvmalloc_objs(struct hlist_head, U32_HASH_SIZE,
- GFP_KERNEL);
+ tc_u_common_hash = kvmalloc_objs(struct hlist_head, U32_HASH_SIZE);
if (!tc_u_common_hash)
return -ENOMEM;
for (i = 1; i <= CAKE_QUEUES; i++)
quantum_div[i] = 65535 / i;
- qd->tins = kvzalloc_objs(struct cake_tin_data, CAKE_MAX_TINS,
- GFP_KERNEL);
+ qd->tins = kvzalloc_objs(struct cake_tin_data, CAKE_MAX_TINS);
if (!qd->tins)
return -ENOMEM;
goto init_failure;
if (!q->flows) {
- q->flows = kvzalloc_objs(struct fq_codel_flow, q->flows_cnt,
- GFP_KERNEL);
+ q->flows = kvzalloc_objs(struct fq_codel_flow, q->flows_cnt);
if (!q->flows) {
err = -ENOMEM;
goto init_failure;
if (!q->hh_flows) {
/* Initialize heavy-hitter flow table. */
- q->hh_flows = kvzalloc_objs(struct list_head, HH_FLOWS_CNT,
- GFP_KERNEL);
+ q->hh_flows = kvzalloc_objs(struct list_head, HH_FLOWS_CNT);
if (!q->hh_flows)
return -ENOMEM;
for (i = 0; i < HH_FLOWS_CNT; i++)
return -EOPNOTSUPP;
/* pre-allocate qdiscs, attachment can't fail */
- priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues,
- GFP_KERNEL);
+ priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues);
if (!priv->qdiscs)
return -ENOMEM;
}
/* pre-allocate qdisc, attachment can't fail */
- priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues,
- GFP_KERNEL);
+ priv->qdiscs = kzalloc_objs(priv->qdiscs[0], dev->num_tx_queues);
if (!priv->qdiscs)
return -ENOMEM;
{
struct __tc_taprio_qopt_offload *__offload;
- __offload = kzalloc_flex(*__offload, offload.entries, num_entries,
- GFP_KERNEL);
+ __offload = kzalloc_flex(*__offload, offload.entries, num_entries);
if (!__offload)
return NULL;
GFP_KERNEL);
if (!link->wr_rx_bufs)
goto no_mem_wr_tx_bufs;
- link->wr_tx_ibs = kzalloc_objs(link->wr_tx_ibs[0], link->max_send_wr,
- GFP_KERNEL);
+ link->wr_tx_ibs = kzalloc_objs(link->wr_tx_ibs[0], link->max_send_wr);
if (!link->wr_tx_ibs)
goto no_mem_wr_rx_bufs;
- link->wr_rx_ibs = kzalloc_objs(link->wr_rx_ibs[0], link->max_recv_wr,
- GFP_KERNEL);
+ link->wr_rx_ibs = kzalloc_objs(link->wr_rx_ibs[0], link->max_recv_wr);
if (!link->wr_rx_ibs)
goto no_mem_wr_tx_ibs;
link->wr_tx_rdmas = kzalloc_objs(link->wr_tx_rdmas[0],
link->max_send_wr, GFP_KERNEL);
if (!link->wr_tx_rdma_sges)
goto no_mem_wr_tx_rdmas;
- link->wr_tx_sges = kzalloc_objs(link->wr_tx_sges[0], link->max_send_wr,
- GFP_KERNEL);
+ link->wr_tx_sges = kzalloc_objs(link->wr_tx_sges[0], link->max_send_wr);
if (!link->wr_tx_sges)
goto no_mem_wr_tx_rdma_sges;
link->wr_rx_sges = kcalloc(link->max_recv_wr,
link->wr_tx_v2_ib = kzalloc_obj(*link->wr_tx_v2_ib);
if (!link->wr_tx_v2_ib)
goto no_mem_tx_compl;
- link->wr_tx_v2_sge = kzalloc_obj(*link->wr_tx_v2_sge,
- GFP_KERNEL);
+ link->wr_tx_v2_sge = kzalloc_obj(*link->wr_tx_v2_sge);
if (!link->wr_tx_v2_sge)
goto no_mem_v2_ib;
- link->wr_tx_v2_pend = kzalloc_obj(*link->wr_tx_v2_pend,
- GFP_KERNEL);
+ link->wr_tx_v2_pend = kzalloc_obj(*link->wr_tx_v2_pend);
if (!link->wr_tx_v2_pend)
goto no_mem_v2_sge;
}
last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
rqstp->rq_enc_pages_num = last - first + 1 + 1;
rqstp->rq_enc_pages
- = kmalloc_objs(struct page *, rqstp->rq_enc_pages_num,
- GFP_KERNEL);
+ = kmalloc_objs(struct page *, rqstp->rq_enc_pages_num);
if (!rqstp->rq_enc_pages)
goto out;
for (i=0; i < rqstp->rq_enc_pages_num; i++) {
if (cd == NULL)
return ERR_PTR(-ENOMEM);
- cd->hash_table = kzalloc_objs(struct hlist_head, cd->hash_size,
- GFP_KERNEL);
+ cd->hash_table = kzalloc_objs(struct hlist_head, cd->hash_size);
if (cd->hash_table == NULL) {
kfree(cd);
return ERR_PTR(-ENOMEM);
return ERR_PTR(-ENOMEM);
if (sendpages) {
- svsk->sk_bvec = kzalloc_objs(*svsk->sk_bvec, sendpages,
- GFP_KERNEL);
+ svsk->sk_bvec = kzalloc_objs(*svsk->sk_bvec, sendpages);
if (!svsk->sk_bvec) {
kfree(svsk);
return ERR_PTR(-ENOMEM);
return -ENOMEM;
}
- attrbuf = kzalloc_objs(struct nlattr *, tipc_genl_family.maxattr + 1,
- GFP_KERNEL);
+ attrbuf = kzalloc_objs(struct nlattr *, tipc_genl_family.maxattr + 1);
if (!attrbuf) {
err = -ENOMEM;
goto err_out;
if (!trans_buf)
return -ENOMEM;
- attrbuf = kmalloc_objs(struct nlattr *, tipc_genl_family.maxattr + 1,
- GFP_KERNEL);
+ attrbuf = kmalloc_objs(struct nlattr *, tipc_genl_family.maxattr + 1);
if (!attrbuf) {
err = -ENOMEM;
goto trans_out;
goto err_sysctl;
#endif
- net->unx.table.locks = kvmalloc_objs(spinlock_t, UNIX_HASH_SIZE,
- GFP_KERNEL);
+ net->unx.table.locks = kvmalloc_objs(spinlock_t, UNIX_HASH_SIZE);
if (!net->unx.table.locks)
goto err_proc;
struct nlattr **attrbuf_free = NULL;
if (!attrbuf) {
- attrbuf = kzalloc_objs(*attrbuf, NUM_NL80211_ATTR,
- GFP_KERNEL);
+ attrbuf = kzalloc_objs(*attrbuf, NUM_NL80211_ATTR);
if (!attrbuf)
return -ENOMEM;
attrbuf_free = attrbuf;
if (n_patterns > coalesce->n_patterns)
return -EINVAL;
- new_rule->patterns = kzalloc_objs(new_rule->patterns[0], n_patterns,
- GFP_KERNEL);
+ new_rule->patterns = kzalloc_objs(new_rule->patterns[0], n_patterns);
if (!new_rule->patterns)
return -ENOMEM;
func->srf_num_macs = n_entries;
func->srf_macs =
- kzalloc_objs(*func->srf_macs, n_entries,
- GFP_KERNEL);
+ kzalloc_objs(*func->srf_macs, n_entries);
if (!func->srf_macs) {
err = -ENOMEM;
goto out;
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{
- pool->tx_descs = kvzalloc_objs(*pool->tx_descs, xs->tx->nentries,
- GFP_KERNEL);
+ pool->tx_descs = kvzalloc_objs(*pool->tx_descs, xs->tx->nentries);
if (!pool->tx_descs)
return -ENOMEM;
if (!dma_map)
return NULL;
- dma_map->dma_pages = kvzalloc_objs(*dma_map->dma_pages, nr_pages,
- GFP_KERNEL);
+ dma_map->dma_pages = kvzalloc_objs(*dma_map->dma_pages, nr_pages);
if (!dma_map->dma_pages) {
kfree(dma_map);
return NULL;
if (!aa_unpack_array(e, NULL, &size))
goto fail;
- rules->secmark = kzalloc_objs(struct aa_secmark, size,
- GFP_KERNEL);
+ rules->secmark = kzalloc_objs(struct aa_secmark, size);
if (!rules->secmark)
goto fail;
for (rules = arch_rules; *rules != NULL; rules++)
arch_entries++;
- arch_policy_entry = kzalloc_objs(*arch_policy_entry, arch_entries + 1,
- GFP_KERNEL);
+ arch_policy_entry = kzalloc_objs(*arch_policy_entry, arch_entries + 1);
if (!arch_policy_entry)
return 0;
return rc;
newp->cond_list_len = 0;
- newp->cond_list = kzalloc_objs(*newp->cond_list, origp->cond_list_len,
- GFP_KERNEL);
+ newp->cond_list = kzalloc_objs(*newp->cond_list, origp->cond_list_len);
if (!newp->cond_list)
goto error;
return rc;
if (p->policyvers >=
POLICYDB_VERSION_CONSTRAINT_NAMES) {
- e->type_names = kzalloc_obj(*e->type_names,
- GFP_KERNEL);
+ e->type_names = kzalloc_obj(*e->type_names);
if (!e->type_names)
return -ENOMEM;
type_set_init(e->type_names);
/* if no inputs are present... */
if ((onyx->codec.connected & 0xC) == 0) {
if (!onyx->codec_info)
- onyx->codec_info = kmalloc_obj(struct codec_info,
- GFP_KERNEL);
+ onyx->codec_info = kmalloc_obj(struct codec_info);
if (!onyx->codec_info)
return -ENOMEM;
ci = onyx->codec_info;
/* if no outputs are present... */
if ((onyx->codec.connected & 3) == 0) {
if (!onyx->codec_info)
- onyx->codec_info = kmalloc_obj(struct codec_info,
- GFP_KERNEL);
+ onyx->codec_info = kmalloc_obj(struct codec_info);
if (!onyx->codec_info)
return -ENOMEM;
ci = onyx->codec_info;
channels = src_format->channels;
else
channels = dst_format->channels;
- plugin->buf_channels = kzalloc_objs(*plugin->buf_channels, channels,
- GFP_KERNEL);
+ plugin->buf_channels = kzalloc_objs(*plugin->buf_channels, channels);
if (plugin->buf_channels == NULL) {
snd_pcm_plugin_free(plugin);
return -ENOMEM;
if (snd_BUG_ON(!pool))
return -EINVAL;
- cellptr = kvmalloc_objs(struct snd_seq_event_cell, pool->size,
- GFP_KERNEL);
+ cellptr = kvmalloc_objs(struct snd_seq_event_cell, pool->size);
if (!cellptr)
return -ENOMEM;
bool input, output;
int err, num;
- ump->out_cvts = kzalloc_objs(*ump->out_cvts, SNDRV_UMP_MAX_GROUPS,
- GFP_KERNEL);
+ ump->out_cvts = kzalloc_objs(*ump->out_cvts, SNDRV_UMP_MAX_GROUPS);
if (!ump->out_cvts)
return -ENOMEM;
chip->audio_info = rmh.Stat[1];
/* allocate pipes */
- chip->playback_pipes = kzalloc_objs(struct vx_pipe *, chip->audio_outs,
- GFP_KERNEL);
+ chip->playback_pipes = kzalloc_objs(struct vx_pipe *, chip->audio_outs);
if (!chip->playback_pipes)
return -ENOMEM;
- chip->capture_pipes = kzalloc_objs(struct vx_pipe *, chip->audio_ins,
- GFP_KERNEL);
+ chip->capture_pipes = kzalloc_objs(struct vx_pipe *, chip->audio_ins);
if (!chip->capture_pipes) {
kfree(chip->playback_pipes);
return -ENOMEM;
*/
if (ca0132_use_pci_mmio(spec))
spec->desktop_init_verbs = ca0132_init_verbs1;
- spec->spec_init_verbs = kzalloc_objs(struct hda_verb, NUM_SPEC_VERBS,
- GFP_KERNEL);
+ spec->spec_init_verbs = kzalloc_objs(struct hda_verb, NUM_SPEC_VERBS);
if (!spec->spec_init_verbs)
return -ENOMEM;
if (wid_caps & AC_WCAP_CONN_LIST) {
conn_len = snd_hda_get_num_raw_conns(codec, nid);
if (conn_len > 0) {
- conn = kmalloc_objs(hda_nid_t, conn_len,
- GFP_KERNEL);
+ conn = kmalloc_objs(hda_nid_t, conn_len);
if (!conn)
return;
if (snd_hda_get_raw_connections(codec, nid, conn,
setup_op = snd_hdac_stream_setup;
for (i = 0; i < num_stream; i++) {
- struct hdac_ext_stream *hext_stream = kzalloc_obj(*hext_stream,
- GFP_KERNEL);
+ struct hdac_ext_stream *hext_stream = kzalloc_obj(*hext_stream);
if (!hext_stream)
return -ENOMEM;
tag = ++stream_tag;
if (!tree->root)
return -ENOMEM;
- tree->nodes = kzalloc_objs(*tree->nodes, codec->num_nodes + 1,
- GFP_KERNEL);
+ tree->nodes = kzalloc_objs(*tree->nodes, codec->num_nodes + 1);
if (!tree->nodes)
return -ENOMEM;
struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
{
- struct dsp_spos_instance * ins = kzalloc_obj(struct dsp_spos_instance,
- GFP_KERNEL);
+ struct dsp_spos_instance * ins = kzalloc_obj(struct dsp_spos_instance);
if (ins == NULL)
return NULL;
vmalloc(array_size(DSP_MAX_SYMBOLS,
sizeof(struct dsp_symbol_entry)));
ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
- ins->modules = kmalloc_objs(struct dsp_module_desc, DSP_MAX_MODULES,
- GFP_KERNEL);
+ ins->modules = kmalloc_objs(struct dsp_module_desc, DSP_MAX_MODULES);
if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) {
cs46xx_dsp_spos_destroy(chip);
goto error;
if (!icode)
return err;
- icode->gpr_map = kzalloc_objs(u_int32_t, 512 + 256 + 256 + 2 * 1024,
- GFP_KERNEL);
+ icode->gpr_map = kzalloc_objs(u_int32_t, 512 + 256 + 256 + 2 * 1024);
if (!icode->gpr_map)
goto __err_gpr;
controls = kzalloc_objs(*controls, SND_EMU10K1_GPR_CONTROLS);
if (!icode)
return err;
- icode->gpr_map = kzalloc_objs(u_int32_t, 256 + 160 + 160 + 2 * 512,
- GFP_KERNEL);
+ icode->gpr_map = kzalloc_objs(u_int32_t, 256 + 160 + 160 + 2 * 512);
if (!icode->gpr_map)
goto __err_gpr;
return -ENOMEM;
}
if (! dev->idx_table) {
- dev->idx_table = kmalloc_objs(*dev->idx_table, VIA_TABLE_SIZE,
- GFP_KERNEL);
+ dev->idx_table = kmalloc_objs(*dev->idx_table, VIA_TABLE_SIZE);
if (! dev->idx_table)
return -ENOMEM;
}
return -ENOMEM;
}
if (! dev->idx_table) {
- dev->idx_table = kmalloc_objs(*dev->idx_table, VIA_TABLE_SIZE,
- GFP_KERNEL);
+ dev->idx_table = kmalloc_objs(*dev->idx_table, VIA_TABLE_SIZE);
if (! dev->idx_table)
return -ENOMEM;
}
pointer->start = (u32)(dma_start + (i * period_bytes));
pointer->relative_end = (u32) (((i+1) * period_bytes) - 0x1);
if (i < periods - 1) {
- pointer->next = kmalloc_obj(struct pcm_period,
- GFP_KERNEL);
+ pointer->next = kmalloc_obj(struct pcm_period);
if (!pointer->next) {
au1000_release_dma_link(stream);
return -ENOMEM;
{
int ret;
- struct aw_all_prof_info *all_prof_info __free(kfree) = kzalloc_obj(*all_prof_info,
- GFP_KERNEL);
+ struct aw_all_prof_info *all_prof_info __free(kfree) = kzalloc_obj(*all_prof_info);
if (!all_prof_info)
return -ENOMEM;
/* For DT platforms allocate onecell data for clock registration */
if (np) {
- clk_data = kzalloc_flex(*clk_data, hws, DA7219_DAI_NUM_CLKS,
- GFP_KERNEL);
+ clk_data = kzalloc_flex(*clk_data, hws, DA7219_DAI_NUM_CLKS);
if (!clk_data)
return -ENOMEM;
rx->rxn_reg_stride = 0x80;
rx->rxn_reg_stride2 = 0xc;
def_count = ARRAY_SIZE(rx_defaults) + ARRAY_SIZE(rx_pre_2_5_defaults);
- reg_defaults = kmalloc_objs(struct reg_default, def_count,
- GFP_KERNEL);
+ reg_defaults = kmalloc_objs(struct reg_default, def_count);
if (!reg_defaults)
return -ENOMEM;
memcpy(®_defaults[0], rx_defaults, sizeof(rx_defaults));
rx->rxn_reg_stride = 0xc0;
rx->rxn_reg_stride2 = 0x0;
def_count = ARRAY_SIZE(rx_defaults) + ARRAY_SIZE(rx_2_5_defaults);
- reg_defaults = kmalloc_objs(struct reg_default, def_count,
- GFP_KERNEL);
+ reg_defaults = kmalloc_objs(struct reg_default, def_count);
if (!reg_defaults)
return -ENOMEM;
memcpy(®_defaults[0], rx_defaults, sizeof(rx_defaults));
case LPASS_CODEC_VERSION_2_1:
wsa->reg_layout = &wsa_codec_v2_1;
def_count = ARRAY_SIZE(wsa_defaults) + ARRAY_SIZE(wsa_defaults_v2_1);
- reg_defaults = kmalloc_objs(*reg_defaults, def_count,
- GFP_KERNEL);
+ reg_defaults = kmalloc_objs(*reg_defaults, def_count);
if (!reg_defaults)
return -ENOMEM;
memcpy(®_defaults[0], wsa_defaults, sizeof(wsa_defaults));
case LPASS_CODEC_VERSION_2_9:
wsa->reg_layout = &wsa_codec_v2_5;
def_count = ARRAY_SIZE(wsa_defaults) + ARRAY_SIZE(wsa_defaults_v2_5);
- reg_defaults = kmalloc_objs(*reg_defaults, def_count,
- GFP_KERNEL);
+ reg_defaults = kmalloc_objs(*reg_defaults, def_count);
if (!reg_defaults)
return -ENOMEM;
memcpy(®_defaults[0], wsa_defaults, sizeof(wsa_defaults));
img_data->nr_blk = get_unaligned_be32(&data[offset]);
offset += 4;
- img_data->dev_blks = kzalloc_objs(struct tasdev_blk, img_data->nr_blk,
- GFP_KERNEL);
+ img_data->dev_blks = kzalloc_objs(struct tasdev_blk, img_data->nr_blk);
if (!img_data->dev_blks) {
offset = -ENOMEM;
goto out;
img_data->nr_blk = get_unaligned_be16(&data[offset]);
offset += 2;
- img_data->dev_blks = kzalloc_objs(struct tasdev_blk, img_data->nr_blk,
- GFP_KERNEL);
+ img_data->dev_blks = kzalloc_objs(struct tasdev_blk, img_data->nr_blk);
if (!img_data->dev_blks) {
offset = -ENOMEM;
goto out;
}
tas_fmw->programs =
- kzalloc_objs(struct tasdevice_prog, tas_fmw->nr_programs,
- GFP_KERNEL);
+ kzalloc_objs(struct tasdevice_prog, tas_fmw->nr_programs);
if (!tas_fmw->programs) {
offset = -ENOMEM;
goto out;
fmw.size = fw_entry->size;
fmw.data = fw_entry->data;
- tas_fmw = tasdev->cali_data_fmw = kzalloc_obj(struct tasdevice_fw,
- GFP_KERNEL);
+ tas_fmw = tasdev->cali_data_fmw = kzalloc_obj(struct tasdevice_fw);
if (!tasdev->cali_data_fmw) {
ret = -ENOMEM;
goto out;
u32 offset = 0;
int i, ret;
- buf->regions = kzalloc_objs(*buf->regions, caps->num_regions,
- GFP_KERNEL);
+ buf->regions = kzalloc_objs(*buf->regions, caps->num_regions);
if (!buf->regions)
return -ENOMEM;
* stuff that increases stack usage.
* So, we use kzalloc()/kfree() for params in this function.
*/
- struct snd_pcm_hw_params *params __free(kfree) = kzalloc_obj(*params,
- GFP_KERNEL);
+ struct snd_pcm_hw_params *params __free(kfree) = kzalloc_obj(*params);
if (!params)
return -ENOMEM;
continue;
/* Reserve memory for all hw configs, eventually freed by widget */
- config = kzalloc_objs(*config, slink->num_hw_configs,
- GFP_KERNEL);
+ config = kzalloc_objs(*config, slink->num_hw_configs);
if (!config) {
ret = -ENOMEM;
goto free_comp;
SOF_IPC4_NODE_INDEX_INTEL_DMIC(ipc4_copier->dai_index);
break;
default:
- ipc4_copier->gtw_attr = kzalloc_obj(*ipc4_copier->gtw_attr,
- GFP_KERNEL);
+ ipc4_copier->gtw_attr = kzalloc_obj(*ipc4_copier->gtw_attr);
if (!ipc4_copier->gtw_attr) {
ret = -ENOMEM;
goto free_available_fmt;
emu->card = card;
emu->name = kstrdup_const(name, GFP_KERNEL);
- emu->voices = kzalloc_objs(struct snd_emux_voice, emu->max_voices,
- GFP_KERNEL);
+ emu->voices = kzalloc_objs(struct snd_emux_voice, emu->max_voices);
if (emu->name == NULL || emu->voices == NULL)
return -ENOMEM;
if (!p)
return NULL;
- p->chset.channels = kzalloc_objs(*p->chset.channels, max_channels,
- GFP_KERNEL);
+ p->chset.channels = kzalloc_objs(*p->chset.channels, max_channels);
if (!p->chset.channels) {
kfree(p);
return NULL;
{
int i;
int ret;
- struct control_runtime *rt = kzalloc_obj(struct control_runtime,
- GFP_KERNEL);
+ struct control_runtime *rt = kzalloc_obj(struct control_runtime);
struct comm_runtime *comm_rt = chip->comm;
if (!rt)
struct usb_line6 *line6 = line6pcm->line6;
int i;
- line6pcm->in.urbs = kzalloc_objs(struct urb *, line6->iso_buffers,
- GFP_KERNEL);
+ line6pcm->in.urbs = kzalloc_objs(struct urb *, line6->iso_buffers);
if (line6pcm->in.urbs == NULL)
return -ENOMEM;
struct usb_line6 *line6 = line6pcm->line6;
int i;
- line6pcm->out.urbs = kzalloc_objs(struct urb *, line6->iso_buffers,
- GFP_KERNEL);
+ line6pcm->out.urbs = kzalloc_objs(struct urb *, line6->iso_buffers);
if (line6pcm->out.urbs == NULL)
return -ENOMEM;
return -ENOMEM;
mixer->chip = chip;
mixer->ignore_ctl_error = !!(chip->quirk_flags & QUIRK_FLAG_IGNORE_CTL_ERROR);
- mixer->id_elems = kzalloc_objs(*mixer->id_elems, MAX_ID_ELEMS,
- GFP_KERNEL);
+ mixer->id_elems = kzalloc_objs(*mixer->id_elems, MAX_ID_ELEMS);
if (!mixer->id_elems) {
kfree(mixer);
return -ENOMEM;
mixer->rc_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mixer->rc_urb)
return -ENOMEM;
- mixer->rc_setup_packet = kmalloc_obj(*mixer->rc_setup_packet,
- GFP_KERNEL);
+ mixer->rc_setup_packet = kmalloc_obj(*mixer->rc_setup_packet);
if (!mixer->rc_setup_packet) {
usb_free_urb(mixer->rc_urb);
mixer->rc_urb = NULL;
for (i = playback_endpoint ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
i <= SNDRV_PCM_STREAM_CAPTURE; ++i) {
- usx2y_substream[i] = kzalloc_obj(struct snd_usx2y_substream,
- GFP_KERNEL);
+ usx2y_substream[i] = kzalloc_obj(struct snd_usx2y_substream);
if (!usx2y_substream[i])
return -ENOMEM;
stream->buffer_sz = buffer_sz;
stream->num_pages = DIV_ROUND_UP(stream->buffer_sz, PAGE_SIZE);
- stream->pages = kzalloc_objs(struct page *, stream->num_pages,
- GFP_KERNEL);
+ stream->pages = kzalloc_objs(struct page *, stream->num_pages);
if (!stream->pages)
return -ENOMEM;