Lines Matching refs:sch

274 	struct subchannel *sch;
288 sch = to_subchannel(cdev->dev.parent);
309 io_subchannel_quiesce(sch);
544 struct subchannel *sch;
554 sch = to_subchannel(dev->parent);
555 if (!sch->lpm)
569 struct subchannel *sch = to_subchannel(dev);
572 rc = chsc_siosl(sch->schid);
575 sch->schid.ssid, sch->schid.sch_no, rc);
579 sch->schid.ssid, sch->schid.sch_no);
586 struct subchannel *sch = to_subchannel(dev);
588 return sysfs_emit(buf, "%02x\n", sch->vpm);
684 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
702 cdev->dev.dma_mask = sch->dev.dma_mask;
703 ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
733 static int io_subchannel_initialize_dev(struct subchannel *sch,
742 priv->dev_id.devno = sch->schib.pmcw.dev;
743 priv->dev_id.ssid = sch->schid.ssid;
752 cdev->ccwlock = &sch->lock;
753 cdev->dev.parent = &sch->dev;
763 if (!get_device(&sch->dev)) {
768 spin_lock_irq(&sch->lock);
769 sch_set_cdev(sch, cdev);
770 spin_unlock_irq(&sch->lock);
779 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
784 cdev = io_subchannel_allocate_dev(sch);
786 ret = io_subchannel_initialize_dev(sch, cdev);
795 static void sch_create_and_recog_new_device(struct subchannel *sch)
800 cdev = io_subchannel_create_ccwdev(sch);
803 css_sch_device_unregister(sch);
807 io_subchannel_recog(cdev, sch);
815 struct subchannel *sch;
819 sch = to_subchannel(cdev->dev.parent);
826 if (!device_is_registered(&sch->dev))
828 css_update_ssd_info(sch);
855 spin_lock_irqsave(&sch->lock, flags);
856 sch_set_cdev(sch, NULL);
857 spin_unlock_irqrestore(&sch->lock, flags);
902 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
908 spin_lock_irq(&sch->lock);
910 spin_unlock_irq(&sch->lock);
914 struct subchannel *sch)
921 if (!get_device(&sch->dev))
933 put_device(&sch->dev);
938 mutex_lock(&sch->reg_mutex);
939 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
940 mutex_unlock(&sch->reg_mutex);
944 cdev->private->dev_id.devno, sch->schid.ssid,
945 sch->schib.pmcw.dev, rc);
953 put_device(&sch->dev);
966 spin_lock_irq(&sch->lock);
967 cdev->ccwlock = &sch->lock;
968 if (!sch_is_pseudo_sch(sch))
969 sch_set_cdev(sch, cdev);
970 spin_unlock_irq(&sch->lock);
971 if (!sch_is_pseudo_sch(sch))
972 css_update_ssd_info(sch);
978 struct subchannel *sch = to_subchannel(cdev->dev.parent);
979 struct channel_subsystem *css = to_css(sch->dev.parent);
984 static void io_subchannel_irq(struct subchannel *sch)
988 cdev = sch_get_cdev(sch);
991 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
998 void io_subchannel_init_config(struct subchannel *sch)
1000 memset(&sch->config, 0, sizeof(sch->config));
1001 sch->config.csense = 1;
1004 static void io_subchannel_init_fields(struct subchannel *sch)
1006 if (cio_is_console(sch->schid))
1007 sch->opm = 0xff;
1009 sch->opm = chp_get_sch_opm(sch);
1010 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1011 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1015 sch->schib.pmcw.dev, sch->schid.ssid,
1016 sch->schid.sch_no, sch->schib.pmcw.pim,
1017 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1019 io_subchannel_init_config(sch);
1026 static int io_subchannel_probe(struct subchannel *sch)
1032 if (cio_is_console(sch->schid)) {
1033 rc = sysfs_create_group(&sch->dev.kobj,
1039 sch->schid.ssid, sch->schid.sch_no, rc);
1044 cdev = sch_get_cdev(sch);
1055 io_subchannel_init_fields(sch);
1056 rc = cio_commit_config(sch);
1059 rc = sysfs_create_group(&sch->dev.kobj,
1068 io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1076 set_io_private(sch, io_priv);
1077 css_schedule_eval(sch->schid);
1081 spin_lock_irq(&sch->lock);
1082 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1083 spin_unlock_irq(&sch->lock);
1087 static void io_subchannel_remove(struct subchannel *sch)
1089 struct io_subchannel_private *io_priv = to_io_private(sch);
1092 cdev = sch_get_cdev(sch);
1097 spin_lock_irq(&sch->lock);
1098 sch_set_cdev(sch, NULL);
1099 set_io_private(sch, NULL);
1100 spin_unlock_irq(&sch->lock);
1102 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1105 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1108 static void io_subchannel_verify(struct subchannel *sch)
1112 cdev = sch_get_cdev(sch);
1116 css_schedule_eval(sch->schid);
1119 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1123 cdev = sch_get_cdev(sch);
1126 if (cio_update_schib(sch))
1129 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1135 if (cio_clear(sch))
1146 static int io_subchannel_chp_event(struct subchannel *sch,
1149 struct ccw_device *cdev = sch_get_cdev(sch);
1153 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1158 sch->opm &= ~mask;
1159 sch->lpm &= ~mask;
1162 io_subchannel_terminate_path(sch, mask);
1165 sch->opm |= mask;
1166 sch->lpm |= mask;
1169 io_subchannel_verify(sch);
1172 if (cio_update_schib(sch))
1176 io_subchannel_terminate_path(sch, mask);
1179 if (cio_update_schib(sch))
1181 sch->lpm |= mask & sch->opm;
1184 io_subchannel_verify(sch);
1202 static void io_subchannel_quiesce(struct subchannel *sch)
1207 spin_lock_irq(&sch->lock);
1208 cdev = sch_get_cdev(sch);
1209 if (cio_is_console(sch->schid))
1211 if (!sch->schib.pmcw.ena)
1213 ret = cio_disable_subchannel(sch);
1224 spin_unlock_irq(&sch->lock);
1227 spin_lock_irq(&sch->lock);
1229 ret = cio_disable_subchannel(sch);
1232 spin_unlock_irq(&sch->lock);
1235 static void io_subchannel_shutdown(struct subchannel *sch)
1237 io_subchannel_quiesce(sch);
1251 struct subchannel *sch;
1257 sch = to_subchannel(cdev->dev.parent);
1258 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1323 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1332 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1368 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1371 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1373 cio_disable_subchannel(sch);
1391 static enum io_sch_action sch_get_action(struct subchannel *sch)
1396 cdev = sch_get_cdev(sch);
1397 rc = cio_update_schib(sch);
1420 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1425 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1441 * @sch: subchannel
1449 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1457 spin_lock_irqsave(&sch->lock, flags);
1458 if (!device_is_registered(&sch->dev))
1460 if (work_pending(&sch->todo_work))
1462 cdev = sch_get_cdev(sch);
1465 action = sch_get_action(sch);
1466 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1467 sch->schid.ssid, sch->schid.sch_no, process,
1478 io_subchannel_verify(sch);
1511 spin_unlock_irqrestore(&sch->lock, flags);
1527 spin_lock_irqsave(&sch->lock, flags);
1528 sch_set_cdev(sch, NULL);
1529 spin_unlock_irqrestore(&sch->lock, flags);
1540 css_sch_device_unregister(sch);
1545 dev_id.ssid = sch->schid.ssid;
1546 dev_id.devno = sch->schib.pmcw.dev;
1549 sch_create_and_recog_new_device(sch);
1552 rc = ccw_device_move_to_sch(cdev, sch);
1558 spin_lock_irqsave(&sch->lock, flags);
1560 spin_unlock_irqrestore(&sch->lock, flags);
1570 spin_unlock_irqrestore(&sch->lock, flags);
1590 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1596 io_subchannel_init_fields(sch);
1597 rc = cio_commit_config(sch);
1600 sch->driver = &io_subchannel_driver;
1601 io_subchannel_recog(cdev, sch);
1631 struct subchannel *sch;
1633 sch = cio_probe_console();
1634 if (IS_ERR(sch))
1635 return ERR_CAST(sch);
1640 io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1645 set_io_private(sch, io_priv);
1646 cdev = io_subchannel_create_ccwdev(sch);
1648 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1650 set_io_private(sch, NULL);
1651 put_device(&sch->dev);
1662 put_device(&sch->dev);
1668 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1669 struct io_subchannel_private *io_priv = to_io_private(sch);
1671 set_io_private(sch, NULL);
1672 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1674 put_device(&sch->dev);
1689 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1692 cio_tsch(sch);
1693 if (sch->schib.scsw.cmd.actl == 0)
1754 struct subchannel *sch;
1780 sch = to_subchannel(cdev->dev.parent);
1782 io_subchannel_quiesce(sch);
1837 struct subchannel *sch;
1842 sch = to_subchannel(cdev->dev.parent);
1862 if (!sch_is_pseudo_sch(sch))
1863 css_schedule_eval(sch->schid);
1866 spin_lock_irq(&sch->lock);
1867 sch_set_cdev(sch, NULL);
1868 spin_unlock_irq(&sch->lock);
1913 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1915 return chsc_siosl(sch->schid);