| /linux/fs/xfs/ |
| H A D | xfs_mount.c | 65 struct xfs_mount *mp) in xfs_uuid_mount() argument 67 uuid_t *uuid = &mp->m_sb.sb_uuid; in xfs_uuid_mount() 71 super_set_uuid(mp->m_super, uuid->b, sizeof(*uuid)); in xfs_uuid_mount() 73 if (xfs_has_nouuid(mp)) in xfs_uuid_mount() 77 xfs_warn(mp, "Filesystem has null UUID - can't mount"); in xfs_uuid_mount() 104 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); in xfs_uuid_mount() 110 struct xfs_mount *mp) in xfs_uuid_unmount() argument 112 uuid_t *uuid = &mp->m_sb.sb_uuid; in xfs_uuid_unmount() 115 if (xfs_has_nouuid(mp)) in xfs_uuid_unmount() 160 struct xfs_mount *mp, in xfs_readsb() argument [all …]
|
| H A D | xfs_super.c | 76 struct xfs_mount *mp, in xfs_mount_set_dax_mode() argument 81 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); in xfs_mount_set_dax_mode() 84 mp->m_features |= XFS_FEAT_DAX_ALWAYS; in xfs_mount_set_dax_mode() 85 mp->m_features &= ~XFS_FEAT_DAX_NEVER; in xfs_mount_set_dax_mode() 88 mp->m_features |= XFS_FEAT_DAX_NEVER; in xfs_mount_set_dax_mode() 89 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; in xfs_mount_set_dax_mode() 202 struct xfs_mount *mp = XFS_M(root->d_sb); in xfs_fs_show_options() local 206 if (mp->m_features & xfs_infop->flag) in xfs_fs_show_options() 210 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); in xfs_fs_show_options() 212 if (xfs_has_allocsize(mp)) in xfs_fs_show_options() [all …]
|
| H A D | xfs_fsops.c | 51 struct xfs_mount *mp = tp->t_mountp; in xfs_resizefs_init_new_ags() local 52 xfs_rfsblock_t nb = mp->m_sb.sb_dblocks + delta; in xfs_resizefs_init_new_ags() 64 (xfs_rfsblock_t)mp->m_sb.sb_agblocks); in xfs_resizefs_init_new_ags() 66 id->agsize = mp->m_sb.sb_agblocks; in xfs_resizefs_init_new_ags() 68 error = xfs_ag_init_headers(mp, id); in xfs_resizefs_init_new_ags() 91 struct xfs_mount *mp, /* mount point for filesystem */ in xfs_growfs_data_private() argument 94 xfs_agnumber_t oagcount = mp->m_sb.sb_agcount; in xfs_growfs_data_private() 107 error = xfs_sb_validate_fsb_count(&mp->m_sb, nb); in xfs_growfs_data_private() 111 if (nb > mp->m_sb.sb_dblocks) { in xfs_growfs_data_private() 112 error = xfs_buf_read_uncached(mp->m_ddev_targp, in xfs_growfs_data_private() [all …]
|
| H A D | xfs_qm_syscalls.c | 24 xfs_mount_t *mp, in xfs_qm_scall_quotaoff() argument 32 if ((mp->m_qflags & flags) == 0) in xfs_qm_scall_quotaoff() 40 xfs_info(mp, "disabling of quota accounting not supported."); in xfs_qm_scall_quotaoff() 42 mutex_lock(&mp->m_quotainfo->qi_quotaofflock); in xfs_qm_scall_quotaoff() 43 mp->m_qflags &= ~(flags & XFS_ALL_QUOTA_ENFD); in xfs_qm_scall_quotaoff() 44 spin_lock(&mp->m_sb_lock); in xfs_qm_scall_quotaoff() 45 mp->m_sb.sb_qflags = mp->m_qflags; in xfs_qm_scall_quotaoff() 46 spin_unlock(&mp->m_sb_lock); in xfs_qm_scall_quotaoff() 47 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); in xfs_qm_scall_quotaoff() 50 return xfs_sync_sb(mp, false); in xfs_qm_scall_quotaoff() [all …]
|
| H A D | xfs_qm.c | 40 STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp); 41 STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp); 55 struct xfs_mount *mp, in xfs_qm_dquot_walk() argument 60 struct xfs_quotainfo *qi = mp->m_quotainfo; in xfs_qm_dquot_walk() 206 struct xfs_mount *mp) in xfs_qm_dqpurge_all() argument 208 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL); in xfs_qm_dqpurge_all() 209 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL); in xfs_qm_dqpurge_all() 210 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL); in xfs_qm_dqpurge_all() 218 struct xfs_mount *mp) in xfs_qm_unmount() argument 220 if (mp->m_quotainfo) { in xfs_qm_unmount() [all …]
|
| H A D | xfs_rtalloc.c | 108 for (log = oargs->mp->m_rsumlevels - 1; log >= 0; log--) { in xfs_rtcopy_summary() 109 for (bbno = oargs->mp->m_sb.sb_rbmblocks - 1; in xfs_rtcopy_summary() 141 struct xfs_mount *mp = args->mp; in xfs_rtallocate_range() local 171 xfs_rtx_to_rbmblock(mp, preblock), -1); in xfs_rtallocate_range() 182 xfs_rtx_to_rbmblock(mp, preblock), 1); in xfs_rtallocate_range() 194 xfs_rtx_to_rbmblock(mp, end + 1), 1); in xfs_rtallocate_range() 250 struct xfs_mount *mp = args->mp; in xfs_rtallocate_extent_block() local 264 end = min(args->rtg->rtg_extents, xfs_rbmblock_to_rtx(mp, bbno + 1)) - in xfs_rtallocate_extent_block() 266 for (i = xfs_rbmblock_to_rtx(mp, bbno); i <= end; i++) { in xfs_rtallocate_extent_block() 411 struct xfs_mount *mp = args->mp; in xfs_rtallocate_extent_near() local [all …]
|
| H A D | xfs_mount.h | 63 struct xfs_mount *mp; member 347 #define M_IGEO(mp) (&(mp)->m_ino_geo) argument 404 static inline bool xfs_has_ ## name (const struct xfs_mount *mp) \ 406 return mp->m_features & XFS_FEAT_ ## NAME; \ 412 static inline void xfs_add_ ## name (struct xfs_mount *mp) \ 414 mp->m_features |= XFS_FEAT_ ## NAME; \ 415 xfs_sb_version_add ## name(&mp->m_sb); \ 442 static inline bool xfs_has_rtgroups(const struct xfs_mount *mp) in __XFS_ADD_FEAT() 445 return xfs_has_metadir(mp); in __XFS_ADD_FEAT() 448 static inline bool xfs_has_rtsb(const struct xfs_mount *mp) in xfs_has_rtsb() argument [all …]
|
| H A D | xfs_zone_alloc.c | 49 struct xfs_mount *mp, in xfs_zone_bucket() argument 53 mp->m_groups[XG_TYPE_RTG].blocks; in xfs_zone_bucket() 82 struct xfs_mount *mp = rtg_mount(rtg); in xfs_zone_account_reclaimable() local 83 struct xfs_zone_info *zi = mp->m_zone_info; in xfs_zone_account_reclaimable() 86 uint32_t from_bucket = xfs_zone_bucket(mp, used + freed); in xfs_zone_account_reclaimable() 87 uint32_t to_bucket = xfs_zone_bucket(mp, used); in xfs_zone_account_reclaimable() 131 if (zi->zi_gc_thread && xfs_zoned_need_gc(mp)) in xfs_zone_account_reclaimable() 150 struct xfs_mount *mp = rtg_mount(rtg); in xfs_open_zone_mark_full() local 151 struct xfs_zone_info *zi = mp->m_zone_info; in xfs_open_zone_mark_full() 181 struct xfs_mount *mp = tp->t_mountp; in xfs_zone_record_blocks() local [all …]
|
| /linux/fs/xfs/libxfs/ |
| H A D | xfs_trans_space.h | 14 #define XFS_MAX_CONTIG_BMAPS_PER_BLOCK(mp) \ argument 15 (((mp)->m_bmap_dmxr[0]) - ((mp)->m_bmap_dmnr[0])) 18 #define XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp) \ argument 19 (((mp)->m_rtrmap_mxr[0]) - ((mp)->m_rtrmap_mnr[0])) 22 #define XFS_RTRMAPADD_SPACE_RES(mp) ((mp)->m_rtrmap_maxlevels) argument 25 #define XFS_NRTRMAPADD_SPACE_RES(mp, b) \ argument 26 ((((b) + XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp) - 1) / \ 27 XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp)) * \ 28 XFS_RTRMAPADD_SPACE_RES(mp)) 31 #define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) \ argument [all …]
|
| H A D | xfs_trans_resv.c | 76 struct xfs_mount *mp, in xfs_allocfree_block_count() argument 81 blocks = num_ops * 2 * (2 * mp->m_alloc_maxlevels - 1); in xfs_allocfree_block_count() 82 if (xfs_has_rmapbt(mp)) in xfs_allocfree_block_count() 83 blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1); in xfs_allocfree_block_count() 95 struct xfs_mount *mp, in xfs_refcountbt_block_count() argument 98 return num_ops * (2 * mp->m_refc_maxlevels - 1); in xfs_refcountbt_block_count() 103 struct xfs_mount *mp, in xfs_rtrefcountbt_block_count() argument 106 return num_ops * (2 * mp->m_rtrefc_maxlevels - 1); in xfs_rtrefcountbt_block_count() 140 struct xfs_mount *mp, in xfs_calc_inode_res() argument 146 mp->m_sb.sb_inodesize + in xfs_calc_inode_res() [all …]
|
| H A D | xfs_types.c | 25 struct xfs_mount *mp, in xfs_verify_agno_agbno() argument 31 eoag = xfs_ag_block_count(mp, agno); in xfs_verify_agno_agbno() 34 if (agbno <= XFS_AGFL_BLOCK(mp)) in xfs_verify_agno_agbno() 45 struct xfs_mount *mp, in xfs_verify_fsbno() argument 48 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno); in xfs_verify_fsbno() 50 if (agno >= mp->m_sb.sb_agcount) in xfs_verify_fsbno() 52 return xfs_verify_agno_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno)); in xfs_verify_fsbno() 61 struct xfs_mount *mp, in xfs_verify_fsbext() argument 68 if (!xfs_verify_fsbno(mp, fsbno)) in xfs_verify_fsbext() 71 if (!xfs_verify_fsbno(mp, fsbno + len - 1)) in xfs_verify_fsbext() [all …]
|
| H A D | xfs_metafile.c | 84 struct xfs_mount *mp, in xfs_metafile_resv_can_cover() argument 93 if (mp->m_metafile_resv_avail >= rhs) in xfs_metafile_resv_can_cover() 100 return xfs_compare_freecounter(mp, XC_FREE_BLOCKS, in xfs_metafile_resv_can_cover() 101 rhs - mp->m_metafile_resv_avail, 2048) >= 0; in xfs_metafile_resv_can_cover() 111 struct xfs_mount *mp) in xfs_metafile_resv_critical() argument 113 ASSERT(xfs_has_metadir(mp)); in xfs_metafile_resv_critical() 115 trace_xfs_metafile_resv_critical(mp, 0); in xfs_metafile_resv_critical() 117 if (!xfs_metafile_resv_can_cover(mp, mp->m_rtbtree_maxlevels)) in xfs_metafile_resv_critical() 120 if (!xfs_metafile_resv_can_cover(mp, in xfs_metafile_resv_critical() 121 div_u64(mp->m_metafile_resv_target, 10))) in xfs_metafile_resv_critical() [all …]
|
| H A D | xfs_rtbitmap.h | 13 struct xfs_mount *mp; member 28 struct xfs_mount *mp = rtg_mount(rtg); in xfs_rtx_to_rtb() local 31 if (mp->m_rtxblklog >= 0) in xfs_rtx_to_rtb() 32 return start + (rtx << mp->m_rtxblklog); in xfs_rtx_to_rtb() 33 return start + (rtx * mp->m_sb.sb_rextsize); in xfs_rtx_to_rtb() 39 struct xfs_mount *mp, in xfs_rgbno_to_rtx() argument 42 if (likely(mp->m_rtxblklog >= 0)) in xfs_rgbno_to_rtx() 43 return rgbno >> mp->m_rtxblklog; in xfs_rgbno_to_rtx() 44 return rgbno / mp->m_sb.sb_rextsize; in xfs_rgbno_to_rtx() 49 struct xfs_mount *mp, in xfs_rtbxlen_to_blen() argument [all …]
|
| H A D | xfs_ag.c | 45 struct xfs_mount *mp, in xfs_initialize_perag_data() argument 50 struct xfs_sb *sbp = &mp->m_sb; in xfs_initialize_perag_data() 64 pag = xfs_perag_get(mp, index); in xfs_initialize_perag_data() 89 xfs_alert(mp, "AGF corruption. Please run xfs_repair."); in xfs_initialize_perag_data() 90 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS); in xfs_initialize_perag_data() 96 spin_lock(&mp->m_sb_lock); in xfs_initialize_perag_data() 100 spin_unlock(&mp->m_sb_lock); in xfs_initialize_perag_data() 102 xfs_reinit_percpu_counters(mp); in xfs_initialize_perag_data() 104 xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS); in xfs_initialize_perag_data() 125 struct xfs_mount *mp, in xfs_free_perag_range() argument [all …]
|
| H A D | xfs_trans_space.c | 21 struct xfs_mount *mp, in xfs_parent_calc_space_res() argument 28 return XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK) + in xfs_parent_calc_space_res() 29 XFS_NEXTENTADD_SPACE_RES(mp, namelen, XFS_ATTR_FORK); in xfs_parent_calc_space_res() 34 struct xfs_mount *mp, in xfs_create_space_res() argument 39 ret = XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp, namelen); in xfs_create_space_res() 40 if (xfs_has_parent(mp)) in xfs_create_space_res() 41 ret += xfs_parent_calc_space_res(mp, namelen); in xfs_create_space_res() 48 struct xfs_mount *mp, in xfs_mkdir_space_res() argument 51 return xfs_create_space_res(mp, namelen); in xfs_mkdir_space_res() 56 struct xfs_mount *mp, in xfs_link_space_res() argument [all …]
|
| H A D | xfs_sb.c | 195 struct xfs_mount *mp, in xfs_validate_sb_read() argument 206 xfs_warn(mp, in xfs_validate_sb_read() 209 xfs_warn(mp, in xfs_validate_sb_read() 214 xfs_alert(mp, in xfs_validate_sb_read() 218 if (!xfs_is_readonly(mp)) { in xfs_validate_sb_read() 219 xfs_warn(mp, in xfs_validate_sb_read() 221 xfs_warn(mp, in xfs_validate_sb_read() 228 xfs_warn(mp, in xfs_validate_sb_read() 232 xfs_warn(mp, in xfs_validate_sb_read() 310 struct xfs_mount *mp, in xfs_validate_sb_write() argument [all …]
|
| H A D | xfs_log_rlimit.c | 53 struct xfs_mount *mp) in xfs_log_calc_max_attrsetm_res() argument 58 size = xfs_attr_leaf_entsize_local_max(mp->m_attr_geo->blksize) - in xfs_log_calc_max_attrsetm_res() 60 nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); in xfs_log_calc_max_attrsetm_res() 61 nblks += XFS_B_TO_FSB(mp, size); in xfs_log_calc_max_attrsetm_res() 68 if (xfs_want_minlogsize_fixes(&mp->m_sb)) in xfs_log_calc_max_attrsetm_res() 69 size = XFS_B_TO_FSB(mp, size); in xfs_log_calc_max_attrsetm_res() 71 nblks += XFS_NEXTENTADD_SPACE_RES(mp, size, XFS_ATTR_FORK); in xfs_log_calc_max_attrsetm_res() 73 return M_RES(mp)->tr_attrsetm.tr_logres + in xfs_log_calc_max_attrsetm_res() 74 M_RES(mp)->tr_attrsetrt.tr_logres * nblks; in xfs_log_calc_max_attrsetm_res() 83 struct xfs_mount *mp, in xfs_log_calc_trans_resv_for_minlogblocks() argument [all …]
|
| /linux/drivers/media/usb/pvrusb2/ |
| H A D | pvrusb2-context.c | 30 static void pvr2_context_set_notify(struct pvr2_context *mp, int fl) in pvr2_context_set_notify() argument 35 if (!mp->notify_flag) { in pvr2_context_set_notify() 37 mp->notify_prev = pvr2_context_notify_last; in pvr2_context_set_notify() 38 mp->notify_next = NULL; in pvr2_context_set_notify() 39 pvr2_context_notify_last = mp; in pvr2_context_set_notify() 40 if (mp->notify_prev) { in pvr2_context_set_notify() 41 mp->notify_prev->notify_next = mp; in pvr2_context_set_notify() 43 pvr2_context_notify_first = mp; in pvr2_context_set_notify() 45 mp->notify_flag = !0; in pvr2_context_set_notify() 48 if (mp->notify_flag) { in pvr2_context_set_notify() [all …]
|
| /linux/fs/jfs/ |
| H A D | jfs_metapage.c | 34 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag) argument 35 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag) argument 37 static inline void unlock_metapage(struct metapage *mp) in unlock_metapage() argument 39 clear_bit_unlock(META_locked, &mp->flag); in unlock_metapage() 40 wake_up(&mp->wait); in unlock_metapage() 43 static inline void __lock_metapage(struct metapage *mp) in __lock_metapage() argument 47 add_wait_queue_exclusive(&mp->wait, &wait); in __lock_metapage() 50 if (metapage_locked(mp)) { in __lock_metapage() 51 folio_unlock(mp->folio); in __lock_metapage() 53 folio_lock(mp->folio); in __lock_metapage() [all …]
|
| /linux/drivers/net/ethernet/apple/ |
| H A D | mace.c | 91 static inline void mace_clean_rings(struct mace_data *mp); 112 struct mace_data *mp; in mace_probe() local 155 mp = netdev_priv(dev); in mace_probe() 156 mp->mdev = mdev; in mace_probe() 160 mp->mace = ioremap(dev->base_addr, 0x1000); in mace_probe() 161 if (mp->mace == NULL) { in mace_probe() 173 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | in mace_probe() 174 in_8(&mp->mace->chipid_lo); in mace_probe() 177 mp = netdev_priv(dev); in mace_probe() 178 mp->maccc = ENXMT | ENRCV; in mace_probe() [all …]
|
| H A D | macmace.c | 103 struct mace_data *mp = netdev_priv(dev); in mace_load_rxdma_base() local 106 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys); in mace_load_rxdma_base() 109 mp->rx_tail = 0; in mace_load_rxdma_base() 118 struct mace_data *mp = netdev_priv(dev); in mace_rxdma_reset() local 119 volatile struct mace *mace = mp->mace; in mace_rxdma_reset() 133 mp->rx_slot = 0; in mace_rxdma_reset() 145 struct mace_data *mp = netdev_priv(dev); in mace_txdma_reset() local 146 volatile struct mace *mace = mp->mace; in mace_txdma_reset() 154 mp->tx_slot = mp->tx_sloti = 0; in mace_txdma_reset() 155 mp->tx_count = N_TX_RING; in mace_txdma_reset() [all …]
|
| /linux/drivers/net/ethernet/marvell/ |
| H A D | mv643xx_eth.c | 419 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) in rdl() argument 421 return readl(mp->shared->base + offset); in rdl() 424 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) in rdlp() argument 426 return readl(mp->base + offset); in rdlp() 429 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) in wrl() argument 431 writel(data, mp->shared->base + offset); in wrl() 434 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) in wrlp() argument 436 writel(data, mp->base + offset); in wrlp() 453 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_enable() local 454 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); in rxq_enable() [all …]
|
| /linux/drivers/scsi/sym53c8xx_2/ |
| H A D | sym_malloc.c | 47 static void *___sym_malloc(m_pool_p mp, int size) in ___sym_malloc() argument 53 m_link_p h = mp->h; in ___sym_malloc() 93 static void ___sym_mfree(m_pool_p mp, void *ptr, int size) in ___sym_mfree() argument 99 m_link_p h = mp->h; in ___sym_mfree() 145 static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags) in __sym_calloc2() argument 149 p = ___sym_malloc(mp, size); in __sym_calloc2() 161 #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN) argument 166 static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name) in __sym_mfree() argument 171 ___sym_mfree(mp, ptr, size); in __sym_mfree() 180 static void *___mp0_get_mem_cluster(m_pool_p mp) in ___mp0_get_mem_cluster() argument [all …]
|
| /linux/drivers/pci/controller/dwc/ |
| H A D | pci-meson.c | 75 static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp, in meson_pcie_get_reset() argument 79 struct device *dev = mp->pci.dev; in meson_pcie_get_reset() 90 static int meson_pcie_get_resets(struct meson_pcie *mp) in meson_pcie_get_resets() argument 92 struct meson_pcie_rc_reset *mrst = &mp->mrst; in meson_pcie_get_resets() 94 mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET); in meson_pcie_get_resets() 99 mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET); in meson_pcie_get_resets() 108 struct meson_pcie *mp) in meson_pcie_get_mems() argument 110 struct dw_pcie *pci = &mp->pci; in meson_pcie_get_mems() 116 mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg"); in meson_pcie_get_mems() 117 if (IS_ERR(mp->cfg_base)) in meson_pcie_get_mems() [all …]
|
| /linux/drivers/isdn/capi/ |
| H A D | capi.c | 147 static int capiminor_add_ack(struct capiminor *mp, u16 datahandle) in capiminor_add_ack() argument 158 spin_lock_bh(&mp->ackqlock); in capiminor_add_ack() 159 list_add_tail(&n->list, &mp->ackqueue); in capiminor_add_ack() 160 mp->nack++; in capiminor_add_ack() 161 spin_unlock_bh(&mp->ackqlock); in capiminor_add_ack() 165 static int capiminor_del_ack(struct capiminor *mp, u16 datahandle) in capiminor_del_ack() argument 169 spin_lock_bh(&mp->ackqlock); in capiminor_del_ack() 170 list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) { in capiminor_del_ack() 173 mp->nack--; in capiminor_del_ack() 174 spin_unlock_bh(&mp->ackqlock); in capiminor_del_ack() [all …]
|