xref: /linux/drivers/cxl/core/core.h (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #ifndef __CXL_CORE_H__
5 #define __CXL_CORE_H__
6 
7 #include <cxl/mailbox.h>
8 #include <linux/rwsem.h>
9 
10 extern const struct device_type cxl_nvdimm_bridge_type;
11 extern const struct device_type cxl_nvdimm_type;
12 extern const struct device_type cxl_pmu_type;
13 
14 extern struct attribute_group cxl_base_attribute_group;
15 
16 enum cxl_detach_mode {
17 	DETACH_ONLY,
18 	DETACH_INVALIDATE,
19 };
20 
21 #ifdef CONFIG_CXL_REGION
22 extern struct device_attribute dev_attr_create_pmem_region;
23 extern struct device_attribute dev_attr_create_ram_region;
24 extern struct device_attribute dev_attr_delete_region;
25 extern struct device_attribute dev_attr_region;
26 extern const struct device_type cxl_pmem_region_type;
27 extern const struct device_type cxl_dax_region_type;
28 extern const struct device_type cxl_region_type;
29 
30 int cxl_decoder_detach(struct cxl_region *cxlr,
31 		       struct cxl_endpoint_decoder *cxled, int pos,
32 		       enum cxl_detach_mode mode);
33 
34 #define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
35 #define CXL_REGION_TYPE(x) (&cxl_region_type)
36 #define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
37 #define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
38 #define CXL_DAX_REGION_TYPE(x) (&cxl_dax_region_type)
39 int cxl_region_init(void);
40 void cxl_region_exit(void);
41 int cxl_get_poison_by_endpoint(struct cxl_port *port);
42 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
43 u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
44 		   u64 dpa);
45 
46 #else
cxl_dpa_to_hpa(struct cxl_region * cxlr,const struct cxl_memdev * cxlmd,u64 dpa)47 static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr,
48 				 const struct cxl_memdev *cxlmd, u64 dpa)
49 {
50 	return ULLONG_MAX;
51 }
52 static inline
cxl_dpa_to_region(const struct cxl_memdev * cxlmd,u64 dpa)53 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
54 {
55 	return NULL;
56 }
cxl_get_poison_by_endpoint(struct cxl_port * port)57 static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
58 {
59 	return 0;
60 }
cxl_decoder_detach(struct cxl_region * cxlr,struct cxl_endpoint_decoder * cxled,int pos,enum cxl_detach_mode mode)61 static inline int cxl_decoder_detach(struct cxl_region *cxlr,
62 				     struct cxl_endpoint_decoder *cxled,
63 				     int pos, enum cxl_detach_mode mode)
64 {
65 	return 0;
66 }
cxl_region_init(void)67 static inline int cxl_region_init(void)
68 {
69 	return 0;
70 }
cxl_region_exit(void)71 static inline void cxl_region_exit(void)
72 {
73 }
74 #define CXL_REGION_ATTR(x) NULL
75 #define CXL_REGION_TYPE(x) NULL
76 #define SET_CXL_REGION_ATTR(x)
77 #define CXL_PMEM_REGION_TYPE(x) NULL
78 #define CXL_DAX_REGION_TYPE(x) NULL
79 #endif
80 
81 struct cxl_send_command;
82 struct cxl_mem_query_commands;
83 int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
84 		  struct cxl_mem_query_commands __user *q);
85 int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s);
86 void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
87 				   resource_size_t length);
88 
89 struct dentry *cxl_debugfs_create_dir(const char *dir);
90 int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
91 		     enum cxl_partition_mode mode);
92 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
93 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
94 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
95 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
96 bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr);
97 
98 enum cxl_rcrb {
99 	CXL_RCRB_DOWNSTREAM,
100 	CXL_RCRB_UPSTREAM,
101 };
102 struct cxl_rcrb_info;
103 resource_size_t __rcrb_to_component(struct device *dev,
104 				    struct cxl_rcrb_info *ri,
105 				    enum cxl_rcrb which);
106 u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
107 
108 #define PCI_RCRB_CAP_LIST_ID_MASK	GENMASK(7, 0)
109 #define PCI_RCRB_CAP_HDR_ID_MASK	GENMASK(7, 0)
110 #define PCI_RCRB_CAP_HDR_NEXT_MASK	GENMASK(15, 8)
111 #define PCI_CAP_EXP_SIZEOF		0x3c
112 
113 struct cxl_rwsem {
114 	/*
115 	 * All changes to HPA (interleave configuration) occur with this
116 	 * lock held for write.
117 	 */
118 	struct rw_semaphore region;
119 	/*
120 	 * All changes to a device DPA space occur with this lock held
121 	 * for write.
122 	 */
123 	struct rw_semaphore dpa;
124 };
125 
126 extern struct cxl_rwsem cxl_rwsem;
127 
128 int cxl_memdev_init(void);
129 void cxl_memdev_exit(void);
130 void cxl_mbox_init(void);
131 
132 enum cxl_poison_trace_type {
133 	CXL_POISON_TRACE_LIST,
134 	CXL_POISON_TRACE_INJECT,
135 	CXL_POISON_TRACE_CLEAR,
136 };
137 
138 long cxl_pci_get_latency(struct pci_dev *pdev);
139 int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
140 int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
141 				       enum access_coordinate_class access);
142 bool cxl_need_node_perf_attrs_update(int nid);
143 int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
144 					struct access_coordinate *c);
145 
146 int cxl_ras_init(void);
147 void cxl_ras_exit(void);
148 int cxl_gpf_port_setup(struct cxl_dport *dport);
149 
150 #ifdef CONFIG_CXL_FEATURES
151 struct cxl_feat_entry *
152 cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
153 size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
154 		       enum cxl_get_feat_selection selection,
155 		       void *feat_out, size_t feat_out_size, u16 offset,
156 		       u16 *return_code);
157 int cxl_set_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
158 		    u8 feat_version, const void *feat_data,
159 		    size_t feat_data_size, u32 feat_flag, u16 offset,
160 		    u16 *return_code);
161 #endif
162 
163 #endif /* __CXL_CORE_H__ */
164