Lines Matching refs:sch
61 static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
63 union orb *orb = &get_eadm_private(sch)->orb;
68 orb->eadm.intparm = (u32)virt_to_phys(sch);
72 EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
74 cc = ssch(sch->schid, orb);
77 sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
88 static int eadm_subchannel_clear(struct subchannel *sch)
92 cc = csch(sch->schid);
96 sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
103 struct subchannel *sch = private->sch;
105 spin_lock_irq(&sch->lock);
107 EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
108 if (eadm_subchannel_clear(sch))
110 spin_unlock_irq(&sch->lock);
113 static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
115 struct eadm_private *private = get_eadm_private(sch);
123 static void eadm_subchannel_irq(struct subchannel *sch)
125 struct eadm_private *private = get_eadm_private(sch);
126 struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
142 eadm_subchannel_set_timeout(sch, 0);
148 css_sched_sch_todo(sch, SCH_TODO_EVAL);
161 struct subchannel *sch;
166 sch = private->sch;
167 spin_lock(&sch->lock);
171 spin_unlock(&sch->lock);
174 return sch;
176 spin_unlock(&sch->lock);
186 struct subchannel *sch;
190 sch = eadm_get_idle_sch();
191 if (!sch)
194 spin_lock_irqsave(&sch->lock, flags);
195 eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
196 ret = eadm_subchannel_start(sch, aob);
201 eadm_subchannel_set_timeout(sch, 0);
202 private = get_eadm_private(sch);
204 css_sched_sch_todo(sch, SCH_TODO_EVAL);
207 spin_unlock_irqrestore(&sch->lock, flags);
213 static int eadm_subchannel_probe(struct subchannel *sch)
225 spin_lock_irq(&sch->lock);
226 set_eadm_private(sch, private);
228 private->sch = sch;
229 sch->isc = EADM_SCH_ISC;
230 ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
232 set_eadm_private(sch, NULL);
233 spin_unlock_irq(&sch->lock);
237 spin_unlock_irq(&sch->lock);
246 static void eadm_quiesce(struct subchannel *sch)
248 struct eadm_private *private = get_eadm_private(sch);
252 spin_lock_irq(&sch->lock);
256 if (eadm_subchannel_clear(sch))
260 spin_unlock_irq(&sch->lock);
264 spin_lock_irq(&sch->lock);
268 eadm_subchannel_set_timeout(sch, 0);
270 ret = cio_disable_subchannel(sch);
273 spin_unlock_irq(&sch->lock);
276 static void eadm_subchannel_remove(struct subchannel *sch)
278 struct eadm_private *private = get_eadm_private(sch);
284 eadm_quiesce(sch);
286 spin_lock_irq(&sch->lock);
287 set_eadm_private(sch, NULL);
288 spin_unlock_irq(&sch->lock);
293 static void eadm_subchannel_shutdown(struct subchannel *sch)
295 eadm_quiesce(sch);
300 * @sch: subchannel
308 static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
313 spin_lock_irqsave(&sch->lock, flags);
314 if (!device_is_registered(&sch->dev))
317 if (work_pending(&sch->todo_work))
320 if (cio_update_schib(sch)) {
321 css_sched_sch_todo(sch, SCH_TODO_UNREG);
324 private = get_eadm_private(sch);
329 spin_unlock_irqrestore(&sch->lock, flags);