1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support for dynamic reconfiguration for PCI, Memory, and CPU
4 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 *
6 * Copyright (C) 2009 Nathan Fontenot
7 * Copyright (C) 2009 IBM Corporation
8 */
9
10 #define pr_fmt(fmt) "dlpar: " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/notifier.h>
14 #include <linux/spinlock.h>
15 #include <linux/cpu.h>
16 #include <linux/slab.h>
17 #include <linux/of.h>
18
19 #include "of_helpers.h"
20 #include "pseries.h"
21
22 #include <asm/machdep.h>
23 #include <linux/uaccess.h>
24 #include <asm/rtas.h>
25 #include <asm/rtas-work-area.h>
26 #include <asm/prom.h>
27
28 static struct workqueue_struct *pseries_hp_wq;
29
30 struct pseries_hp_work {
31 struct work_struct work;
32 struct pseries_hp_errorlog *errlog;
33 };
34
35 struct cc_workarea {
36 __be32 drc_index;
37 __be32 zero;
38 __be32 name_offset;
39 __be32 prop_length;
40 __be32 prop_offset;
41 };
42
dlpar_free_cc_property(struct property * prop)43 void dlpar_free_cc_property(struct property *prop)
44 {
45 kfree(prop->name);
46 kfree(prop->value);
47 kfree(prop);
48 }
49
dlpar_parse_cc_property(struct cc_workarea * ccwa)50 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
51 {
52 struct property *prop;
53 char *name;
54 char *value;
55
56 prop = kzalloc(sizeof(*prop), GFP_KERNEL);
57 if (!prop)
58 return NULL;
59
60 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
61 prop->name = kstrdup(name, GFP_KERNEL);
62 if (!prop->name) {
63 dlpar_free_cc_property(prop);
64 return NULL;
65 }
66
67 prop->length = be32_to_cpu(ccwa->prop_length);
68 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
69 prop->value = kmemdup(value, prop->length, GFP_KERNEL);
70 if (!prop->value) {
71 dlpar_free_cc_property(prop);
72 return NULL;
73 }
74
75 return prop;
76 }
77
dlpar_parse_cc_node(struct cc_workarea * ccwa)78 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
79 {
80 struct device_node *dn;
81 const char *name;
82
83 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
84 if (!dn)
85 return NULL;
86
87 name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
88 dn->full_name = kstrdup(name, GFP_KERNEL);
89 if (!dn->full_name) {
90 kfree(dn);
91 return NULL;
92 }
93
94 of_node_set_flag(dn, OF_DYNAMIC);
95 of_node_init(dn);
96
97 return dn;
98 }
99
dlpar_free_one_cc_node(struct device_node * dn)100 static void dlpar_free_one_cc_node(struct device_node *dn)
101 {
102 struct property *prop;
103
104 while (dn->properties) {
105 prop = dn->properties;
106 dn->properties = prop->next;
107 dlpar_free_cc_property(prop);
108 }
109
110 kfree(dn->full_name);
111 kfree(dn);
112 }
113
dlpar_free_cc_nodes(struct device_node * dn)114 void dlpar_free_cc_nodes(struct device_node *dn)
115 {
116 if (dn->child)
117 dlpar_free_cc_nodes(dn->child);
118
119 if (dn->sibling)
120 dlpar_free_cc_nodes(dn->sibling);
121
122 dlpar_free_one_cc_node(dn);
123 }
124
125 #define COMPLETE 0
126 #define NEXT_SIBLING 1
127 #define NEXT_CHILD 2
128 #define NEXT_PROPERTY 3
129 #define PREV_PARENT 4
130 #define MORE_MEMORY 5
131 #define ERR_CFG_USE -9003
132
dlpar_configure_connector(__be32 drc_index,struct device_node * parent)133 struct device_node *dlpar_configure_connector(__be32 drc_index,
134 struct device_node *parent)
135 {
136 struct device_node *dn;
137 struct device_node *first_dn = NULL;
138 struct device_node *last_dn = NULL;
139 struct property *property;
140 struct property *last_property = NULL;
141 struct cc_workarea *ccwa;
142 struct rtas_work_area *work_area;
143 char *data_buf;
144 int cc_token;
145 int rc = -1;
146
147 cc_token = rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR);
148 if (cc_token == RTAS_UNKNOWN_SERVICE)
149 return NULL;
150
151 work_area = rtas_work_area_alloc(SZ_4K);
152 data_buf = rtas_work_area_raw_buf(work_area);
153
154 ccwa = (struct cc_workarea *)&data_buf[0];
155 ccwa->drc_index = drc_index;
156 ccwa->zero = 0;
157
158 do {
159 do {
160 rc = rtas_call(cc_token, 2, 1, NULL,
161 rtas_work_area_phys(work_area), NULL);
162 } while (rtas_busy_delay(rc));
163
164 switch (rc) {
165 case COMPLETE:
166 break;
167
168 case NEXT_SIBLING:
169 dn = dlpar_parse_cc_node(ccwa);
170 if (!dn)
171 goto cc_error;
172
173 dn->parent = last_dn->parent;
174 last_dn->sibling = dn;
175 last_dn = dn;
176 break;
177
178 case NEXT_CHILD:
179 dn = dlpar_parse_cc_node(ccwa);
180 if (!dn)
181 goto cc_error;
182
183 if (!first_dn) {
184 dn->parent = parent;
185 first_dn = dn;
186 } else {
187 dn->parent = last_dn;
188 if (last_dn)
189 last_dn->child = dn;
190 }
191
192 last_dn = dn;
193 break;
194
195 case NEXT_PROPERTY:
196 property = dlpar_parse_cc_property(ccwa);
197 if (!property)
198 goto cc_error;
199
200 if (!last_dn->properties)
201 last_dn->properties = property;
202 else
203 last_property->next = property;
204
205 last_property = property;
206 break;
207
208 case PREV_PARENT:
209 last_dn = last_dn->parent;
210 break;
211
212 case MORE_MEMORY:
213 case ERR_CFG_USE:
214 default:
215 printk(KERN_ERR "Unexpected Error (%d) "
216 "returned from configure-connector\n", rc);
217 goto cc_error;
218 }
219 } while (rc);
220
221 cc_error:
222 rtas_work_area_free(work_area);
223
224 if (rc) {
225 if (first_dn)
226 dlpar_free_cc_nodes(first_dn);
227
228 return NULL;
229 }
230
231 return first_dn;
232 }
233
dlpar_attach_node(struct device_node * dn,struct device_node * parent)234 int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
235 {
236 int rc;
237
238 dn->parent = parent;
239
240 rc = of_attach_node(dn);
241 if (rc) {
242 printk(KERN_ERR "Failed to add device node %pOF\n", dn);
243 return rc;
244 }
245
246 return 0;
247 }
248
dlpar_detach_node(struct device_node * dn)249 int dlpar_detach_node(struct device_node *dn)
250 {
251 struct device_node *child;
252 int rc;
253
254 for_each_child_of_node(dn, child)
255 dlpar_detach_node(child);
256
257 rc = of_detach_node(dn);
258 if (rc)
259 return rc;
260
261 of_node_put(dn);
262
263 return 0;
264 }
dlpar_changeset_attach_cc_nodes(struct of_changeset * ocs,struct device_node * dn)265 static int dlpar_changeset_attach_cc_nodes(struct of_changeset *ocs,
266 struct device_node *dn)
267 {
268 int rc;
269
270 rc = of_changeset_attach_node(ocs, dn);
271
272 if (!rc && dn->child)
273 rc = dlpar_changeset_attach_cc_nodes(ocs, dn->child);
274 if (!rc && dn->sibling)
275 rc = dlpar_changeset_attach_cc_nodes(ocs, dn->sibling);
276
277 return rc;
278 }
279
280 #define DR_ENTITY_SENSE 9003
281 #define DR_ENTITY_PRESENT 1
282 #define DR_ENTITY_UNUSABLE 2
283 #define ALLOCATION_STATE 9003
284 #define ALLOC_UNUSABLE 0
285 #define ALLOC_USABLE 1
286 #define ISOLATION_STATE 9001
287 #define ISOLATE 0
288 #define UNISOLATE 1
289
dlpar_acquire_drc(u32 drc_index)290 int dlpar_acquire_drc(u32 drc_index)
291 {
292 int dr_status, rc;
293
294 rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
295 if (rc || dr_status != DR_ENTITY_UNUSABLE)
296 return -1;
297
298 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
299 if (rc)
300 return rc;
301
302 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
303 if (rc) {
304 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
305 return rc;
306 }
307
308 return 0;
309 }
310
dlpar_release_drc(u32 drc_index)311 int dlpar_release_drc(u32 drc_index)
312 {
313 int dr_status, rc;
314
315 rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
316 if (rc || dr_status != DR_ENTITY_PRESENT)
317 return -1;
318
319 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
320 if (rc)
321 return rc;
322
323 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
324 if (rc) {
325 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
326 return rc;
327 }
328
329 return 0;
330 }
331
dlpar_unisolate_drc(u32 drc_index)332 int dlpar_unisolate_drc(u32 drc_index)
333 {
334 int dr_status, rc;
335
336 rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
337 if (rc || dr_status != DR_ENTITY_PRESENT)
338 return -1;
339
340 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
341
342 return 0;
343 }
344
345 static struct device_node *
get_device_node_with_drc_index(u32 index)346 get_device_node_with_drc_index(u32 index)
347 {
348 struct device_node *np = NULL;
349 u32 node_index;
350 int rc;
351
352 for_each_node_with_property(np, "ibm,my-drc-index") {
353 rc = of_property_read_u32(np, "ibm,my-drc-index",
354 &node_index);
355 if (rc) {
356 pr_err("%s: %pOF: of_property_read_u32 %s: %d\n",
357 __func__, np, "ibm,my-drc-index", rc);
358 of_node_put(np);
359 return NULL;
360 }
361
362 if (index == node_index)
363 break;
364 }
365
366 return np;
367 }
368
369 static struct device_node *
get_device_node_with_drc_info(u32 index)370 get_device_node_with_drc_info(u32 index)
371 {
372 struct device_node *np = NULL;
373 struct of_drc_info drc;
374 struct property *info;
375 const __be32 *value;
376 u32 node_index;
377 int i, j, count;
378
379 for_each_node_with_property(np, "ibm,drc-info") {
380 info = of_find_property(np, "ibm,drc-info", NULL);
381 if (info == NULL) {
382 /* XXX can this happen? */
383 of_node_put(np);
384 return NULL;
385 }
386 value = of_prop_next_u32(info, NULL, &count);
387 if (value == NULL)
388 continue;
389 value++;
390 for (i = 0; i < count; i++) {
391 if (of_read_drc_info_cell(&info, &value, &drc))
392 break;
393 if (index > drc.last_drc_index)
394 continue;
395 node_index = drc.drc_index_start;
396 for (j = 0; j < drc.num_sequential_elems; j++) {
397 if (index == node_index)
398 return np;
399 node_index += drc.sequential_inc;
400 }
401 }
402 }
403
404 return NULL;
405 }
406
407 static struct device_node *
get_device_node_with_drc_indexes(u32 drc_index)408 get_device_node_with_drc_indexes(u32 drc_index)
409 {
410 struct device_node *np = NULL;
411 u32 nr_indexes, index;
412 int i, rc;
413
414 for_each_node_with_property(np, "ibm,drc-indexes") {
415 /*
416 * First element in the array is the total number of
417 * DRC indexes returned.
418 */
419 rc = of_property_read_u32_index(np, "ibm,drc-indexes",
420 0, &nr_indexes);
421 if (rc)
422 goto out_put_np;
423
424 /*
425 * Retrieve DRC index from the list and return the
426 * device node if matched with the specified index.
427 */
428 for (i = 0; i < nr_indexes; i++) {
429 rc = of_property_read_u32_index(np, "ibm,drc-indexes",
430 i+1, &index);
431 if (rc)
432 goto out_put_np;
433
434 if (drc_index == index)
435 return np;
436 }
437 }
438
439 return NULL;
440
441 out_put_np:
442 of_node_put(np);
443 return NULL;
444 }
445
dlpar_hp_dt_add(u32 index)446 static int dlpar_hp_dt_add(u32 index)
447 {
448 struct device_node *np, *nodes;
449 struct of_changeset ocs;
450 int rc;
451
452 /*
453 * Do not add device node(s) if already exists in the
454 * device tree.
455 */
456 np = get_device_node_with_drc_index(index);
457 if (np) {
458 pr_err("%s: Adding device node for index (%d), but "
459 "already exists in the device tree\n",
460 __func__, index);
461 rc = -EINVAL;
462 goto out;
463 }
464
465 /*
466 * Recent FW provides ibm,drc-info property. So search
467 * for the user specified DRC index from ibm,drc-info
468 * property. If this property is not available, search
469 * in the indexes array from ibm,drc-indexes property.
470 */
471 np = get_device_node_with_drc_info(index);
472
473 if (!np) {
474 np = get_device_node_with_drc_indexes(index);
475 if (!np)
476 return -EIO;
477 }
478
479 /* Next, configure the connector. */
480 nodes = dlpar_configure_connector(cpu_to_be32(index), np);
481 if (!nodes) {
482 rc = -EIO;
483 goto out;
484 }
485
486 /*
487 * Add the new nodes from dlpar_configure_connector() onto
488 * the device-tree.
489 */
490 of_changeset_init(&ocs);
491 rc = dlpar_changeset_attach_cc_nodes(&ocs, nodes);
492
493 if (!rc)
494 rc = of_changeset_apply(&ocs);
495 else
496 dlpar_free_cc_nodes(nodes);
497
498 of_changeset_destroy(&ocs);
499
500 out:
501 of_node_put(np);
502 return rc;
503 }
504
changeset_detach_node_recursive(struct of_changeset * ocs,struct device_node * node)505 static int changeset_detach_node_recursive(struct of_changeset *ocs,
506 struct device_node *node)
507 {
508 struct device_node *child;
509 int rc;
510
511 for_each_child_of_node(node, child) {
512 rc = changeset_detach_node_recursive(ocs, child);
513 if (rc) {
514 of_node_put(child);
515 return rc;
516 }
517 }
518
519 return of_changeset_detach_node(ocs, node);
520 }
521
dlpar_hp_dt_remove(u32 drc_index)522 static int dlpar_hp_dt_remove(u32 drc_index)
523 {
524 struct device_node *np;
525 struct of_changeset ocs;
526 u32 index;
527 int rc = 0;
528
529 /*
530 * Prune all nodes with a matching index.
531 */
532 of_changeset_init(&ocs);
533
534 for_each_node_with_property(np, "ibm,my-drc-index") {
535 rc = of_property_read_u32(np, "ibm,my-drc-index", &index);
536 if (rc) {
537 pr_err("%s: %pOF: of_property_read_u32 %s: %d\n",
538 __func__, np, "ibm,my-drc-index", rc);
539 of_node_put(np);
540 goto out;
541 }
542
543 if (index == drc_index) {
544 rc = changeset_detach_node_recursive(&ocs, np);
545 if (rc) {
546 of_node_put(np);
547 goto out;
548 }
549 }
550 }
551
552 rc = of_changeset_apply(&ocs);
553
554 out:
555 of_changeset_destroy(&ocs);
556 return rc;
557 }
558
dlpar_hp_dt(struct pseries_hp_errorlog * phpe)559 static int dlpar_hp_dt(struct pseries_hp_errorlog *phpe)
560 {
561 u32 drc_index;
562 int rc;
563
564 if (phpe->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX)
565 return -EINVAL;
566
567 drc_index = be32_to_cpu(phpe->_drc_u.drc_index);
568
569 lock_device_hotplug();
570
571 switch (phpe->action) {
572 case PSERIES_HP_ELOG_ACTION_ADD:
573 rc = dlpar_hp_dt_add(drc_index);
574 break;
575 case PSERIES_HP_ELOG_ACTION_REMOVE:
576 rc = dlpar_hp_dt_remove(drc_index);
577 break;
578 default:
579 pr_err("Invalid action (%d) specified\n", phpe->action);
580 rc = -EINVAL;
581 break;
582 }
583
584 unlock_device_hotplug();
585
586 return rc;
587 }
588
handle_dlpar_errorlog(struct pseries_hp_errorlog * hp_elog)589 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
590 {
591 int rc;
592
593 switch (hp_elog->resource) {
594 case PSERIES_HP_ELOG_RESOURCE_MEM:
595 rc = dlpar_memory(hp_elog);
596 break;
597 case PSERIES_HP_ELOG_RESOURCE_CPU:
598 rc = dlpar_cpu(hp_elog);
599 break;
600 case PSERIES_HP_ELOG_RESOURCE_PMEM:
601 rc = dlpar_hp_pmem(hp_elog);
602 break;
603 case PSERIES_HP_ELOG_RESOURCE_DT:
604 rc = dlpar_hp_dt(hp_elog);
605 break;
606
607 default:
608 pr_warn_ratelimited("Invalid resource (%d) specified\n",
609 hp_elog->resource);
610 rc = -EINVAL;
611 }
612
613 return rc;
614 }
615
pseries_hp_work_fn(struct work_struct * work)616 static void pseries_hp_work_fn(struct work_struct *work)
617 {
618 struct pseries_hp_work *hp_work =
619 container_of(work, struct pseries_hp_work, work);
620
621 handle_dlpar_errorlog(hp_work->errlog);
622
623 kfree(hp_work->errlog);
624 kfree(work);
625 }
626
queue_hotplug_event(struct pseries_hp_errorlog * hp_errlog)627 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
628 {
629 struct pseries_hp_work *work;
630 struct pseries_hp_errorlog *hp_errlog_copy;
631
632 hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
633 if (!hp_errlog_copy)
634 return;
635
636 work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
637 if (work) {
638 INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
639 work->errlog = hp_errlog_copy;
640 queue_work(pseries_hp_wq, (struct work_struct *)work);
641 } else {
642 kfree(hp_errlog_copy);
643 }
644 }
645
dlpar_parse_resource(char ** cmd,struct pseries_hp_errorlog * hp_elog)646 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
647 {
648 char *arg;
649
650 arg = strsep(cmd, " ");
651 if (!arg)
652 return -EINVAL;
653
654 if (sysfs_streq(arg, "memory")) {
655 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
656 } else if (sysfs_streq(arg, "cpu")) {
657 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
658 } else if (sysfs_streq(arg, "dt")) {
659 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_DT;
660 } else {
661 pr_err("Invalid resource specified.\n");
662 return -EINVAL;
663 }
664
665 return 0;
666 }
667
dlpar_parse_action(char ** cmd,struct pseries_hp_errorlog * hp_elog)668 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
669 {
670 char *arg;
671
672 arg = strsep(cmd, " ");
673 if (!arg)
674 return -EINVAL;
675
676 if (sysfs_streq(arg, "add")) {
677 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
678 } else if (sysfs_streq(arg, "remove")) {
679 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
680 } else {
681 pr_err("Invalid action specified.\n");
682 return -EINVAL;
683 }
684
685 return 0;
686 }
687
dlpar_parse_id_type(char ** cmd,struct pseries_hp_errorlog * hp_elog)688 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
689 {
690 char *arg;
691 u32 count, index;
692
693 arg = strsep(cmd, " ");
694 if (!arg)
695 return -EINVAL;
696
697 if (sysfs_streq(arg, "indexed-count")) {
698 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
699 arg = strsep(cmd, " ");
700 if (!arg) {
701 pr_err("No DRC count specified.\n");
702 return -EINVAL;
703 }
704
705 if (kstrtou32(arg, 0, &count)) {
706 pr_err("Invalid DRC count specified.\n");
707 return -EINVAL;
708 }
709
710 arg = strsep(cmd, " ");
711 if (!arg) {
712 pr_err("No DRC Index specified.\n");
713 return -EINVAL;
714 }
715
716 if (kstrtou32(arg, 0, &index)) {
717 pr_err("Invalid DRC Index specified.\n");
718 return -EINVAL;
719 }
720
721 hp_elog->_drc_u.ic.count = cpu_to_be32(count);
722 hp_elog->_drc_u.ic.index = cpu_to_be32(index);
723 } else if (sysfs_streq(arg, "index")) {
724 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
725 arg = strsep(cmd, " ");
726 if (!arg) {
727 pr_err("No DRC Index specified.\n");
728 return -EINVAL;
729 }
730
731 if (kstrtou32(arg, 0, &index)) {
732 pr_err("Invalid DRC Index specified.\n");
733 return -EINVAL;
734 }
735
736 hp_elog->_drc_u.drc_index = cpu_to_be32(index);
737 } else if (sysfs_streq(arg, "count")) {
738 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
739 arg = strsep(cmd, " ");
740 if (!arg) {
741 pr_err("No DRC count specified.\n");
742 return -EINVAL;
743 }
744
745 if (kstrtou32(arg, 0, &count)) {
746 pr_err("Invalid DRC count specified.\n");
747 return -EINVAL;
748 }
749
750 hp_elog->_drc_u.drc_count = cpu_to_be32(count);
751 } else {
752 pr_err("Invalid id_type specified.\n");
753 return -EINVAL;
754 }
755
756 return 0;
757 }
758
dlpar_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)759 static ssize_t dlpar_store(const struct class *class, const struct class_attribute *attr,
760 const char *buf, size_t count)
761 {
762 struct pseries_hp_errorlog hp_elog;
763 char *argbuf;
764 char *args;
765 int rc;
766
767 args = argbuf = kstrdup(buf, GFP_KERNEL);
768 if (!argbuf)
769 return -ENOMEM;
770
771 /*
772 * Parse out the request from the user, this will be in the form:
773 * <resource> <action> <id_type> <id>
774 */
775 rc = dlpar_parse_resource(&args, &hp_elog);
776 if (rc)
777 goto dlpar_store_out;
778
779 rc = dlpar_parse_action(&args, &hp_elog);
780 if (rc)
781 goto dlpar_store_out;
782
783 rc = dlpar_parse_id_type(&args, &hp_elog);
784 if (rc)
785 goto dlpar_store_out;
786
787 rc = handle_dlpar_errorlog(&hp_elog);
788
789 dlpar_store_out:
790 kfree(argbuf);
791
792 if (rc)
793 pr_err("Could not handle DLPAR request \"%s\"\n", buf);
794
795 return rc ? rc : count;
796 }
797
dlpar_show(const struct class * class,const struct class_attribute * attr,char * buf)798 static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr,
799 char *buf)
800 {
801 return sprintf(buf, "%s\n", "memory,cpu,dt");
802 }
803
804 static CLASS_ATTR_RW(dlpar);
805
dlpar_workqueue_init(void)806 int __init dlpar_workqueue_init(void)
807 {
808 if (pseries_hp_wq)
809 return 0;
810
811 pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0);
812
813 return pseries_hp_wq ? 0 : -ENOMEM;
814 }
815
dlpar_sysfs_init(void)816 static int __init dlpar_sysfs_init(void)
817 {
818 int rc;
819
820 rc = dlpar_workqueue_init();
821 if (rc)
822 return rc;
823
824 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
825 }
826 machine_device_initcall(pseries, dlpar_sysfs_init);
827
828