Home
last modified time | relevance | path

Searched full:cs (Results 1 – 25 of 2011) sorted by relevance

12345678910>>...81

/linux/drivers/gpu/drm/i915/gt/ !
H A Dgen8_engine_cs.c16 u32 *cs, flags = 0; in gen8_emit_flush_rcs() local
58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs()
59 if (IS_ERR(cs)) in gen8_emit_flush_rcs()
60 return PTR_ERR(cs); in gen8_emit_flush_rcs()
63 cs = gen8_emit_pipe_control(cs, 0, 0); in gen8_emit_flush_rcs()
66 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, in gen8_emit_flush_rcs()
69 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); in gen8_emit_flush_rcs()
72 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); in gen8_emit_flush_rcs()
74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs()
81 u32 cmd, *cs; in gen8_emit_flush_xcs() local
[all …]
H A Dgen6_engine_cs.c32 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
60 u32 *cs; in gen6_emit_post_sync_nonzero_flush() local
62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
63 if (IS_ERR(cs)) in gen6_emit_post_sync_nonzero_flush()
64 return PTR_ERR(cs); in gen6_emit_post_sync_nonzero_flush()
66 *cs++ = GFX_OP_PIPE_CONTROL(5); in gen6_emit_post_sync_nonzero_flush()
67 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; in gen6_emit_post_sync_nonzero_flush()
68 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; in gen6_emit_post_sync_nonzero_flush()
69 *cs++ = 0; /* low dword */ in gen6_emit_post_sync_nonzero_flush()
70 *cs++ = 0; /* high dword */ in gen6_emit_post_sync_nonzero_flush()
[all …]
H A Dgen7_renderclear.c12 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) argument
102 static u32 batch_offset(const struct batch_chunk *bc, u32 *cs) in batch_offset() argument
104 return (cs - bc->start) * sizeof(*bc->start) + bc->offset; in batch_offset()
148 u32 *cs = batch_alloc_items(state, 32, 8); in gen7_fill_surface_state() local
149 u32 offset = batch_offset(state, cs); in gen7_fill_surface_state()
155 *cs++ = SURFACE_2D << 29 | in gen7_fill_surface_state()
159 *cs++ = batch_addr(state) + dst_offset; in gen7_fill_surface_state()
161 *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1); in gen7_fill_surface_state()
162 *cs++ = surface_w; in gen7_fill_surface_state()
163 *cs++ = 0; in gen7_fill_surface_state()
[all …]
H A Dgen8_engine_cs.h43 u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
44 u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
46 u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
47 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
48 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
50 u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
79 __gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1) in __gen8_emit_write_rcs() argument
81 *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0; in __gen8_emit_write_rcs()
82 *cs++ = flags1 | PIPE_CONTROL_QW_WRITE; in __gen8_emit_write_rcs()
83 *cs++ = offset; in __gen8_emit_write_rcs()
[all …]
H A Dintel_migrate.c336 u32 *cs; in emit_no_arbitration() local
338 cs = intel_ring_begin(rq, 2); in emit_no_arbitration()
339 if (IS_ERR(cs)) in emit_no_arbitration()
340 return PTR_ERR(cs); in emit_no_arbitration()
343 *cs++ = MI_ARB_ON_OFF; in emit_no_arbitration()
344 *cs++ = MI_NOOP; in emit_no_arbitration()
345 intel_ring_advance(rq, cs); in emit_no_arbitration()
376 u32 *hdr, *cs; in emit_pte() local
403 cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS); in emit_pte()
404 if (IS_ERR(cs)) in emit_pte()
[all …]
H A Dselftest_engine_pm.c34 static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value) in emit_wait() argument
36 *cs++ = MI_SEMAPHORE_WAIT | in emit_wait()
40 *cs++ = value; in emit_wait()
41 *cs++ = offset; in emit_wait()
42 *cs++ = 0; in emit_wait()
44 return cs; in emit_wait()
47 static u32 *emit_store(u32 *cs, u32 offset, u32 value) in emit_store() argument
49 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_store()
50 *cs++ = offset; in emit_store()
51 *cs++ = 0; in emit_store()
[all …]
H A Dselftest_lrc.c85 u32 *cs; in emit_semaphore_signal() local
91 cs = intel_ring_begin(rq, 4); in emit_semaphore_signal()
92 if (IS_ERR(cs)) { in emit_semaphore_signal()
94 return PTR_ERR(cs); in emit_semaphore_signal()
97 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_semaphore_signal()
98 *cs++ = offset; in emit_semaphore_signal()
99 *cs++ = 0; in emit_semaphore_signal()
100 *cs++ = 1; in emit_semaphore_signal()
102 intel_ring_advance(rq, cs); in emit_semaphore_signal()
415 u32 *cs; in __live_lrc_state() local
[all …]
/linux/kernel/time/ !
H A Dclocksource.c23 static void clocksource_enqueue(struct clocksource *cs);
25 static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end) in cycles_to_nsec_safe() argument
27 u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta); in cycles_to_nsec_safe()
29 if (likely(delta < cs->max_cycles)) in cycles_to_nsec_safe()
30 return clocksource_cyc2ns(delta, cs->mult, cs->shift); in cycles_to_nsec_safe()
32 return mul_u64_u32_shr(delta, cs->mult, cs->shift); in cycles_to_nsec_safe()
125 * a lower bound for cs->uncertainty_margin values when registering clocks.
144 * Default for maximum permissible skew when cs->uncertainty_margin is
145 * not specified, and the lower bound even when cs->uncertainty_margin
147 * clocks with unspecifed cs->uncertainty_margin, so this macro is used
[all …]
/linux/drivers/scsi/ !
H A Dmyrs.c104 static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) in myrs_qcmd() argument
106 void __iomem *base = cs->io_base; in myrs_qcmd()
108 union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox; in myrs_qcmd()
110 cs->write_cmd_mbox(next_mbox, mbox); in myrs_qcmd()
112 if (cs->prev_cmd_mbox1->words[0] == 0 || in myrs_qcmd()
113 cs->prev_cmd_mbox2->words[0] == 0) in myrs_qcmd()
114 cs->get_cmd_mbox(base); in myrs_qcmd()
116 cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1; in myrs_qcmd()
117 cs->prev_cmd_mbox1 = next_mbox; in myrs_qcmd()
119 if (++next_mbox > cs->last_cmd_mbox) in myrs_qcmd()
[all …]
/linux/drivers/gpu/drm/i915/pxp/ !
H A Dintel_pxp_cmd.c23 static u32 *pxp_emit_session_selection(u32 *cs, u32 idx) in pxp_emit_session_selection() argument
25 *cs++ = MFX_WAIT_PXP; in pxp_emit_session_selection()
28 *cs++ = MI_FLUSH_DW; in pxp_emit_session_selection()
29 *cs++ = 0; in pxp_emit_session_selection()
30 *cs++ = 0; in pxp_emit_session_selection()
33 *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx); in pxp_emit_session_selection()
35 *cs++ = MFX_WAIT_PXP; in pxp_emit_session_selection()
38 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN | in pxp_emit_session_selection()
40 *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT; in pxp_emit_session_selection()
41 *cs++ = 0; in pxp_emit_session_selection()
[all …]
/linux/drivers/net/ovpn/ !
H A Dcrypto.c40 void ovpn_crypto_state_release(struct ovpn_crypto_state *cs) in ovpn_crypto_state_release() argument
44 ks = rcu_access_pointer(cs->slots[0]); in ovpn_crypto_state_release()
46 RCU_INIT_POINTER(cs->slots[0], NULL); in ovpn_crypto_state_release()
50 ks = rcu_access_pointer(cs->slots[1]); in ovpn_crypto_state_release()
52 RCU_INIT_POINTER(cs->slots[1], NULL); in ovpn_crypto_state_release()
58 bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id) in ovpn_crypto_kill_key() argument
62 spin_lock_bh(&cs->lock); in ovpn_crypto_kill_key()
63 if (rcu_access_pointer(cs->slots[0])->key_id == key_id) { in ovpn_crypto_kill_key()
64 ks = rcu_replace_pointer(cs->slots[0], NULL, in ovpn_crypto_kill_key()
65 lockdep_is_held(&cs->lock)); in ovpn_crypto_kill_key()
[all …]
H A Dcrypto.h65 static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs) in ovpn_crypto_state_init() argument
67 RCU_INIT_POINTER(cs->slots[0], NULL); in ovpn_crypto_state_init()
68 RCU_INIT_POINTER(cs->slots[1], NULL); in ovpn_crypto_state_init()
69 cs->primary_idx = 0; in ovpn_crypto_state_init()
70 spin_lock_init(&cs->lock); in ovpn_crypto_state_init()
74 ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id) in ovpn_crypto_key_id_to_slot() argument
79 if (unlikely(!cs)) in ovpn_crypto_key_id_to_slot()
83 idx = READ_ONCE(cs->primary_idx); in ovpn_crypto_key_id_to_slot()
84 ks = rcu_dereference(cs->slots[idx]); in ovpn_crypto_key_id_to_slot()
91 ks = rcu_dereference(cs->slots[!idx]); in ovpn_crypto_key_id_to_slot()
[all …]
/linux/kernel/cgroup/ !
H A Dcpuset.c150 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local
152 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
157 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local
159 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
162 static inline int is_partition_valid(const struct cpuset *cs) in is_partition_valid() argument
164 return cs->partition_root_state > 0; in is_partition_valid()
167 static inline int is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() argument
169 return cs->partition_root_state < 0; in is_partition_invalid()
175 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() argument
177 if (cs->partition_root_state > 0) in make_partition_invalid()
[all …]
H A Dcpuset-v1.c11 struct cpuset *cs; member
148 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
155 if (val != cs->relax_domain_level) { in update_relax_domain_level()
156 cs->relax_domain_level = val; in update_relax_domain_level()
157 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
158 is_sched_load_balance(cs)) in update_relax_domain_level()
168 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
174 if (!is_cpuset_online(cs)) in cpuset_write_s64()
180 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
194 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
[all …]
/linux/drivers/accel/habanalabs/common/ !
H A Dcommand_submission.c23 * enum hl_cs_wait_status - cs wait status
24 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
25 * @CS_WAIT_STATUS_COMPLETED: cs completed
26 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
65 * CS outcome store supports the following operations: in hl_push_cs_outcome()
66 * push outcome - store a recent CS outcome in the store in hl_push_cs_outcome()
67 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store in hl_push_cs_outcome()
70 * a single CS outcome. in hl_push_cs_outcome()
90 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq); in hl_push_cs_outcome()
250 void cs_get(struct hl_cs *cs) in cs_get() argument
[all …]
H A Dhw_queue.c41 void hl_hw_queue_update_ci(struct hl_cs *cs) in hl_hw_queue_update_ci() argument
43 struct hl_device *hdev = cs->ctx->hdev; in hl_hw_queue_update_ci()
58 * 1. All queues of a non completion CS will never get a completion. in hl_hw_queue_update_ci()
62 if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT) in hl_hw_queue_update_ci()
63 atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); in hl_hw_queue_update_ci()
208 * more than once per CS for the same queue
281 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job()
301 /* Skip completion flow in case this is a non completion CS */ in ext_queue_schedule_job()
302 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job()
346 struct hl_device *hdev = job->cs->ctx->hdev; in int_queue_schedule_job()
[all …]
/linux/sound/core/ !
H A Dpcm_iec958.c14 * @cs: channel status buffer, at least four bytes
17 * Create the consumer format channel status data in @cs of maximum size
29 int snd_pcm_create_iec958_consumer_default(u8 *cs, size_t len) in snd_pcm_create_iec958_consumer_default() argument
34 memset(cs, 0, len); in snd_pcm_create_iec958_consumer_default()
36 cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE; in snd_pcm_create_iec958_consumer_default()
37 cs[1] = IEC958_AES1_CON_GENERAL; in snd_pcm_create_iec958_consumer_default()
38 cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC; in snd_pcm_create_iec958_consumer_default()
39 cs[3] = IEC958_AES3_CON_CLOCK_1000PPM | IEC958_AES3_CON_FS_NOTID; in snd_pcm_create_iec958_consumer_default()
42 cs[4] = IEC958_AES4_CON_WORDLEN_NOTID; in snd_pcm_create_iec958_consumer_default()
49 u8 *cs, size_t len) in fill_iec958_consumer() argument
[all …]
/linux/drivers/mfd/ !
H A Datmel-smc.c22 * atmel_smc_cs_conf_init - initialize a SMC CS conf
23 * @conf: the SMC CS conf to initialize
86 * atmel_smc_cs_conf_set_timing - set the SMC CS conf Txx parameter to a
88 * @conf: SMC CS conf descriptor
128 * atmel_smc_cs_conf_set_setup - set the SMC CS conf xx_SETUP parameter to a
130 * @conf: SMC CS conf descriptor
167 * atmel_smc_cs_conf_set_pulse - set the SMC CS conf xx_PULSE parameter to a
169 * @conf: SMC CS conf descriptor
206 * atmel_smc_cs_conf_set_cycle - set the SMC CS conf xx_CYCLE parameter to a
208 * @conf: SMC CS conf descriptor
[all …]
/linux/Documentation/devicetree/bindings/memory-controllers/ !
H A Dti-aemif.txt34 - CS-specific partition/range. If continuous, must be
38 - control partition which is common for all CS
56 Child chip-select (cs) nodes contain the memory devices nodes connected to
60 Required child cs node properties:
73 - ti,cs-chipselect: number of chipselect. Indicates on the aemif driver
79 Optional child cs node properties:
81 - ti,cs-bus-width: width of the asynchronous device's data bus
84 - ti,cs-select-strobe-mode: enable/disable select strobe mode
89 - ti,cs-extended-wait-mode: enable/disable extended wait mode
95 - ti,cs-min-turnaround-ns: minimum turn around time, ns
[all …]
/linux/arch/mips/bcm63xx/ !
H A Dcs.c24 static int is_valid_cs(unsigned int cs) in is_valid_cs() argument
26 if (cs > 6) in is_valid_cs()
35 int bcm63xx_set_cs_base(unsigned int cs, u32 base, unsigned int size) in bcm63xx_set_cs_base() argument
40 if (!is_valid_cs(cs)) in bcm63xx_set_cs_base()
55 bcm_mpi_writel(val, MPI_CSBASE_REG(cs)); in bcm63xx_set_cs_base()
66 int bcm63xx_set_cs_timing(unsigned int cs, unsigned int wait, in bcm63xx_set_cs_timing() argument
72 if (!is_valid_cs(cs)) in bcm63xx_set_cs_timing()
76 val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); in bcm63xx_set_cs_timing()
83 bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); in bcm63xx_set_cs_timing()
94 int bcm63xx_set_cs_param(unsigned int cs, u32 params) in bcm63xx_set_cs_param() argument
[all …]
/linux/arch/m68k/include/asm/ !
H A Dm5307sim.h51 #define MCFSIM_CSAR0 (MCF_MBAR + 0x80) /* CS 0 Address reg */
52 #define MCFSIM_CSMR0 (MCF_MBAR + 0x84) /* CS 0 Mask reg */
53 #define MCFSIM_CSCR0 (MCF_MBAR + 0x8a) /* CS 0 Control reg */
54 #define MCFSIM_CSAR1 (MCF_MBAR + 0x8c) /* CS 1 Address reg */
55 #define MCFSIM_CSMR1 (MCF_MBAR + 0x90) /* CS 1 Mask reg */
56 #define MCFSIM_CSCR1 (MCF_MBAR + 0x96) /* CS 1 Control reg */
59 #define MCFSIM_CSBAR (MCF_MBAR + 0x98) /* CS Base Address */
60 #define MCFSIM_CSBAMR (MCF_MBAR + 0x9c) /* CS Base Mask */
61 #define MCFSIM_CSMR2 (MCF_MBAR + 0x9e) /* CS 2 Mask reg */
62 #define MCFSIM_CSCR2 (MCF_MBAR + 0xa2) /* CS 2 Control reg */
[all …]
/linux/include/linux/mfd/syscon/ !
H A Datmel-smc.h20 #define ATMEL_SMC_SETUP(cs) (((cs) * 0x10)) argument
21 #define ATMEL_HSMC_SETUP(layout, cs) \ argument
22 ((layout)->timing_regs_offset + ((cs) * 0x14))
23 #define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4) argument
24 #define ATMEL_HSMC_PULSE(layout, cs) \ argument
25 ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x4)
26 #define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8) argument
27 #define ATMEL_HSMC_CYCLE(layout, cs) \ argument
28 ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x8)
34 #define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc) argument
[all …]
/linux/drivers/memory/ !
H A Domap-gpmc.c207 /* Structure to save gpmc cs context */
278 void gpmc_cs_write_reg(int cs, int idx, u32 val) in gpmc_cs_write_reg() argument
282 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; in gpmc_cs_write_reg()
286 static u32 gpmc_cs_read_reg(int cs, int idx) in gpmc_cs_read_reg() argument
290 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx; in gpmc_cs_read_reg()
307 * @cs: Chip Select Region.
310 * GPMC_CS_CONFIG1 GPMCFCLKDIVIDER for cs has to be setup
313 static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd) in gpmc_get_clk_period() argument
322 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); in gpmc_get_clk_period()
335 static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs, in gpmc_ns_to_clk_ticks() argument
[all …]
/linux/net/ceph/ !
H A Dstring_table.c13 struct ceph_string *cs, *exist; in ceph_find_or_create_string() local
40 cs = kmalloc(sizeof(*cs) + len + 1, GFP_NOFS); in ceph_find_or_create_string()
41 if (!cs) in ceph_find_or_create_string()
44 kref_init(&cs->kref); in ceph_find_or_create_string()
45 cs->len = len; in ceph_find_or_create_string()
46 memcpy(cs->str, str, len); in ceph_find_or_create_string()
47 cs->str[len] = 0; in ceph_find_or_create_string()
68 rb_link_node(&cs->node, parent, p); in ceph_find_or_create_string()
69 rb_insert_color(&cs->node, &string_tree); in ceph_find_or_create_string()
80 kfree(cs); in ceph_find_or_create_string()
[all …]
/linux/fs/fuse/ !
H A Ddev.c820 void fuse_copy_init(struct fuse_copy_state *cs, bool write, in fuse_copy_init() argument
823 memset(cs, 0, sizeof(*cs)); in fuse_copy_init()
824 cs->write = write; in fuse_copy_init()
825 cs->iter = iter; in fuse_copy_init()
829 static void fuse_copy_finish(struct fuse_copy_state *cs) in fuse_copy_finish() argument
831 if (cs->currbuf) { in fuse_copy_finish()
832 struct pipe_buffer *buf = cs->currbuf; in fuse_copy_finish()
834 if (cs->write) in fuse_copy_finish()
835 buf->len = PAGE_SIZE - cs->len; in fuse_copy_finish()
836 cs->currbuf = NULL; in fuse_copy_finish()
[all …]

12345678910>>...81