| /linux/fs/btrfs/ |
| H A D | ordered-data.c | 336 void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered) in btrfs_mark_ordered_extent_error() argument 338 if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) in btrfs_mark_ordered_extent_error() 339 mapping_set_error(ordered->inode->vfs_inode.i_mapping, -EIO); in btrfs_mark_ordered_extent_error() 350 static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered, in can_finish_ordered_extent() argument 354 struct btrfs_inode *inode = ordered->inode; in can_finish_ordered_extent() 376 if (WARN_ON_ONCE(len > ordered->bytes_left)) { in can_finish_ordered_extent() 380 ordered->file_offset, ordered->num_bytes, in can_finish_ordered_extent() 381 len, ordered->bytes_left); in can_finish_ordered_extent() 382 ordered->bytes_left = 0; in can_finish_ordered_extent() 384 ordered->bytes_left -= len; in can_finish_ordered_extent() [all …]
|
| H A D | direct-io.c | 17 struct btrfs_ordered_extent *ordered; member 40 struct btrfs_ordered_extent *ordered; in lock_extent_direct() local 66 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, in lock_extent_direct() 76 if (!ordered && in lock_extent_direct() 83 if (ordered) { in lock_extent_direct() 85 btrfs_put_ordered_extent(ordered); in lock_extent_direct() 105 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) in lock_extent_direct() 106 btrfs_start_ordered_extent(ordered); in lock_extent_direct() 109 btrfs_put_ordered_extent(ordered); in lock_extent_direct() 145 struct btrfs_ordered_extent *ordered; in btrfs_create_dio_extent() local [all …]
|
| H A D | ordered-data.h | 166 void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered, 223 struct btrfs_ordered_extent *ordered, u64 len); 224 void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered);
|
| H A D | inode.c | 1099 struct btrfs_ordered_extent *ordered; in submit_one_async_extent() local 1167 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, in submit_one_async_extent() 1169 if (IS_ERR(ordered)) { in submit_one_async_extent() 1171 ret = PTR_ERR(ordered); in submit_one_async_extent() 1180 btrfs_submit_compressed_write(ordered, in submit_one_async_extent() 1350 struct btrfs_ordered_extent *ordered; in cow_file_range() local 1414 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, in cow_file_range() 1416 if (IS_ERR(ordered)) { in cow_file_range() 1419 ret = PTR_ERR(ordered); in cow_file_range() 1424 ret = btrfs_reloc_clone_csums(ordered); in cow_file_range() [all …]
|
| H A D | compression.c | 329 btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len, in btrfs_finish_compressed_write_work() 384 void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, in btrfs_submit_compressed_write() argument 390 struct btrfs_inode *inode = ordered->inode; in btrfs_submit_compressed_write() 394 ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize)); in btrfs_submit_compressed_write() 395 ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize)); in btrfs_submit_compressed_write() 397 cb = alloc_compressed_bio(inode, ordered->file_offset, in btrfs_submit_compressed_write() 400 cb->start = ordered->file_offset; in btrfs_submit_compressed_write() 401 cb->len = ordered->num_bytes; in btrfs_submit_compressed_write() 403 cb->compressed_len = ordered->disk_num_bytes; in btrfs_submit_compressed_write() 407 cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; in btrfs_submit_compressed_write() [all …]
|
| H A D | extent_io.c | 543 btrfs_finish_ordered_extent(bbio->ordered, folio, start, len, in end_bbio_data_write() 754 struct btrfs_ordered_extent *ordered; in alloc_new_bio() local 756 ordered = btrfs_lookup_ordered_extent(inode, file_offset); in alloc_new_bio() 757 if (ordered) { in alloc_new_bio() 759 ordered->file_offset + in alloc_new_bio() 760 ordered->disk_num_bytes - file_offset); in alloc_new_bio() 761 bbio->ordered = ordered; in alloc_new_bio() 1158 struct btrfs_ordered_extent *ordered, in can_skip_one_ordered_range() argument 1205 ordered->file_offset + ordered->num_bytes) - cur; in can_skip_one_ordered_range() 1213 *fileoff = ordered->file_offset + ordered->num_bytes; in can_skip_one_ordered_range() [all …]
|
| H A D | zoned.c | 1858 static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered, in btrfs_rewrite_logical_zoned() argument 1861 struct extent_map_tree *em_tree = &ordered->inode->extent_tree; in btrfs_rewrite_logical_zoned() 1864 ordered->disk_bytenr = logical; in btrfs_rewrite_logical_zoned() 1867 em = btrfs_search_extent_mapping(em_tree, ordered->file_offset, in btrfs_rewrite_logical_zoned() 1868 ordered->num_bytes); in btrfs_rewrite_logical_zoned() 1876 static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered, in btrfs_zoned_split_ordered() argument 1881 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && in btrfs_zoned_split_ordered() 1882 btrfs_split_extent_map(ordered->inode, ordered->file_offset, in btrfs_zoned_split_ordered() 1883 ordered->num_bytes, len, logical)) in btrfs_zoned_split_ordered() 1886 new = btrfs_split_ordered_extent(ordered, len); in btrfs_zoned_split_ordered() [all …]
|
| H A D | bio.c | 93 refcount_inc(&orig_bbio->ordered->refs); in btrfs_split_bio() 94 bbio->ordered = orig_bbio->ordered; in btrfs_split_bio() 109 btrfs_put_ordered_extent(bbio->ordered); in btrfs_bio_end_io() 128 struct btrfs_ordered_extent *ordered = bbio->ordered; in btrfs_bio_end_io() local 131 btrfs_put_ordered_extent(ordered); in btrfs_bio_end_io() 730 list_add_tail(&bioc->rst_ordered_entry, &bbio->ordered->bioc_list); in btrfs_submit_chunk()
|
| /linux/Documentation/devicetree/bindings/scsi/ |
| H A D | hisilicon-sas.txt | 22 sources; the interrupts are ordered in 3 groups, as follows: 30 The phy interrupts are ordered into groups of 3 per phy 34 The interrupts are ordered in increasing order. 35 Fatal interrupts : the fatal interrupts are ordered as follows: 39 the interrupts are ordered in 3 groups, as follows: 47 interrupt. The interrupts are ordered in increasing 50 interrupt source. The interrupts are ordered in
|
| /linux/include/trace/events/ |
| H A D | btrfs.h | 501 const struct btrfs_ordered_extent *ordered), 503 TP_ARGS(inode, ordered), 521 __entry->file_offset = ordered->file_offset; 522 __entry->start = ordered->disk_bytenr; 523 __entry->len = ordered->num_bytes; 524 __entry->disk_len = ordered->disk_num_bytes; 525 __entry->bytes_left = ordered->bytes_left; 526 __entry->flags = ordered->flags; 527 __entry->compress_type = ordered->compress_type; 528 __entry->refs = refcount_read(&ordered->refs); [all …]
|
| /linux/tools/lib/subcmd/ |
| H A D | parse-options.c | 811 struct option *opt, *ordered = NULL, *group; in options__order() local 823 group = realloc(ordered, len); in options__order() 826 ordered = group; in options__order() 827 memcpy(&ordered[nr_parent], p, sizeof(*o) * (nr_opts - nr_parent)); in options__order() 832 memcpy(&ordered[nr_opts], o, sizeof(*o)); in options__order() 835 for (opt = group = ordered; opt->type != OPTION_END; opt++) { in options__order() 847 return ordered; in options__order() 885 struct option *ordered; in usage_with_options_internal() local 910 ordered = options__order(opts); in usage_with_options_internal() 911 if (ordered) in usage_with_options_internal() [all …]
|
| /linux/Documentation/core-api/ |
| H A D | refcount-vs-atomic.rst | 67 then further stores are ordered against this operation. 135 * fully ordered --> control dependency on success for stores 151 * fully ordered --> ACQUIRE ordering on success 164 * fully ordered --> RELEASE ordering + ACQUIRE ordering on success 177 * fully ordered --> RELEASE ordering + control dependency 192 * fully ordered --> RELEASE ordering + control dependency + hold
|
| /linux/Documentation/devicetree/bindings/sound/ |
| H A D | sirf-audio-port.txt | 6 - dmas: List of DMA controller phandle and DMA request line ordered pairs. 8 These strings correspond 1:1 with the ordered pairs in dmas.
|
| /linux/Documentation/ |
| H A D | atomic_t.txt | 156 atomic variable) can be fully ordered and no intermediate state is lost or 169 - RMW operations that have a return value are fully ordered; 183 Fully ordered primitives are ordered against everything prior and everything 184 subsequent. Therefore a fully ordered primitive is like having an smp_mb() 198 ordered, so it is advisable to place the barrier right next to the RMW atomic 203 provide full ordered atomics and these barriers are no-ops. 205 NOTE: when the atomic RmW ops are fully ordered, they should also imply a
|
| H A D | atomic_bitops.txt | 59 - RMW operations that have a return value are fully ordered. 61 - RMW operations that are conditional are fully ordered.
|
| /linux/Documentation/devicetree/bindings/ |
| H A D | dts-coding-style.rst | 51 ordered by unit address in ascending order. 56 2. Nodes without unit addresses shall be ordered alpha-numerically by the node 57 name. For a few node types, they can be ordered by the main property, e.g. 58 pin configuration states ordered by value of "pins" property. 61 ordered either alpha-numerically or by keeping the order from DTSI, where
|
| /linux/Documentation/arch/riscv/ |
| H A D | uabi.rst | 26 ordered first by category, in canonical order, as listed above, then 31 extensions are listed, they will be ordered alphabetically. 35 extensions are listed, they will be ordered alphabetically. 39 ordered alphabetically.
|
| /linux/Documentation/litmus-tests/ |
| H A D | README | 18 the RMW are ordered before the subsequential memory accesses. 24 cmpxchg-fail-ordered-1.litmus 28 cmpxchg-fail-ordered-2.litmus
|
| /linux/virt/kvm/ |
| H A D | Kconfig | 22 # Only strongly ordered architectures can select this, as it doesn't 30 # Weakly ordered architectures can only select this, advertising
|
| /linux/Documentation/admin-guide/perf/ |
| H A D | nvidia-pmu.rst | 32 strongly-ordered (SO) PCIE write traffic to local/remote memory. Please see 62 In this config, the PMU captures read and relaxed ordered (RO) writes from 155 to local memory. For PCIE traffic, this PMU captures read and relaxed ordered 281 PCIE1 traffic represents strongly ordered (SO) writes. 282 PCIE2 traffic represents reads and relaxed ordered (RO) writes. 332 PCIE1 traffic represents strongly ordered (SO) writes. 333 PCIE2 traffic represents reads and relaxed ordered (RO) writes.
|
| /linux/tools/memory-model/litmus-tests/ |
| H A D | CoWW+poonceonce.litmus | 7 * writes to the same variable are ordered.
|
| H A D | README | 7 successive reads from the same variable are ordered. 12 are ordered. 17 are ordered. 21 successive writes to the same variable are ordered. 109 This is the fully ordered (via smp_mb()) version of one of 117 This is the fully ordered (again, via smp_mb() version of store
|
| H A D | CoRR+poonceonce+Once.litmus | 7 * reads from the same variable are ordered.
|
| /linux/tools/perf/Documentation/ |
| H A D | perf.txt | 60 ordered-events - ordered events object debug messages
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-driver-input-cros-ec-keyb | 5 ordered by the physical positions of the keys, from left
|