1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2023 Bootlin
4 *
5 */
6 #include "common.h"
7 #include "netlink.h"
8
9 #include <linux/phy.h>
10 #include <linux/phy_link_topology.h>
11 #include <linux/sfp.h>
12 #include <net/netdev_lock.h>
13
14 struct phy_req_info {
15 struct ethnl_req_info base;
16 struct phy_device_node *pdn;
17 };
18
19 #define PHY_REQINFO(__req_base) \
20 container_of(__req_base, struct phy_req_info, base)
21
22 const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = {
23 [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
24 };
25
26 /* Caller holds rtnl */
27 static ssize_t
ethnl_phy_reply_size(const struct ethnl_req_info * req_base,struct netlink_ext_ack * extack)28 ethnl_phy_reply_size(const struct ethnl_req_info *req_base,
29 struct netlink_ext_ack *extack)
30 {
31 struct phy_req_info *req_info = PHY_REQINFO(req_base);
32 struct phy_device_node *pdn = req_info->pdn;
33 struct phy_device *phydev = pdn->phy;
34 size_t size = 0;
35
36 ASSERT_RTNL();
37
38 /* ETHTOOL_A_PHY_INDEX */
39 size += nla_total_size(sizeof(u32));
40
41 /* ETHTOOL_A_DRVNAME */
42 if (phydev->drv)
43 size += nla_total_size(strlen(phydev->drv->name) + 1);
44
45 /* ETHTOOL_A_NAME */
46 size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1);
47
48 /* ETHTOOL_A_PHY_UPSTREAM_TYPE */
49 size += nla_total_size(sizeof(u32));
50
51 if (phy_on_sfp(phydev)) {
52 const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus);
53
54 /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
55 if (upstream_sfp_name)
56 size += nla_total_size(strlen(upstream_sfp_name) + 1);
57
58 /* ETHTOOL_A_PHY_UPSTREAM_INDEX */
59 size += nla_total_size(sizeof(u32));
60 }
61
62 /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */
63 if (phydev->sfp_bus) {
64 const char *sfp_name = sfp_get_name(phydev->sfp_bus);
65
66 if (sfp_name)
67 size += nla_total_size(strlen(sfp_name) + 1);
68 }
69
70 return size;
71 }
72
73 static int
ethnl_phy_fill_reply(const struct ethnl_req_info * req_base,struct sk_buff * skb)74 ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb)
75 {
76 struct phy_req_info *req_info = PHY_REQINFO(req_base);
77 struct phy_device_node *pdn = req_info->pdn;
78 struct phy_device *phydev = pdn->phy;
79 enum phy_upstream ptype;
80
81 ptype = pdn->upstream_type;
82
83 if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) ||
84 nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) ||
85 nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype))
86 return -EMSGSIZE;
87
88 if (phydev->drv &&
89 nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name))
90 return -EMSGSIZE;
91
92 if (ptype == PHY_UPSTREAM_PHY) {
93 struct phy_device *upstream = pdn->upstream.phydev;
94 const char *sfp_upstream_name;
95
96 /* Parent index */
97 if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex))
98 return -EMSGSIZE;
99
100 if (pdn->parent_sfp_bus) {
101 sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus);
102 if (sfp_upstream_name &&
103 nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
104 sfp_upstream_name))
105 return -EMSGSIZE;
106 }
107 }
108
109 if (phydev->sfp_bus) {
110 const char *sfp_name = sfp_get_name(phydev->sfp_bus);
111
112 if (sfp_name &&
113 nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
114 sfp_name))
115 return -EMSGSIZE;
116 }
117
118 return 0;
119 }
120
ethnl_phy_parse_request(struct ethnl_req_info * req_base,struct nlattr ** tb,struct netlink_ext_ack * extack)121 static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
122 struct nlattr **tb,
123 struct netlink_ext_ack *extack)
124 {
125 struct phy_link_topology *topo = req_base->dev->link_topo;
126 struct phy_req_info *req_info = PHY_REQINFO(req_base);
127 struct phy_device *phydev;
128
129 phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PHY_HEADER,
130 extack);
131 if (!phydev)
132 return 0;
133
134 if (IS_ERR(phydev))
135 return PTR_ERR(phydev);
136
137 if (!topo)
138 return 0;
139
140 req_info->pdn = xa_load(&topo->phys, phydev->phyindex);
141
142 return 0;
143 }
144
ethnl_phy_doit(struct sk_buff * skb,struct genl_info * info)145 int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
146 {
147 struct phy_req_info req_info = {};
148 struct nlattr **tb = info->attrs;
149 struct sk_buff *rskb;
150 void *reply_payload;
151 int reply_len;
152 int ret;
153
154 ret = ethnl_parse_header_dev_get(&req_info.base,
155 tb[ETHTOOL_A_PHY_HEADER],
156 genl_info_net(info), info->extack,
157 true);
158 if (ret < 0)
159 return ret;
160
161 rtnl_lock();
162 netdev_lock_ops(req_info.base.dev);
163
164 ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack);
165 if (ret < 0)
166 goto err_unlock;
167
168 /* No PHY, return early */
169 if (!req_info.pdn)
170 goto err_unlock;
171
172 ret = ethnl_phy_reply_size(&req_info.base, info->extack);
173 if (ret < 0)
174 goto err_unlock;
175 reply_len = ret + ethnl_reply_header_size();
176
177 rskb = ethnl_reply_init(reply_len, req_info.base.dev,
178 ETHTOOL_MSG_PHY_GET_REPLY,
179 ETHTOOL_A_PHY_HEADER,
180 info, &reply_payload);
181 if (!rskb) {
182 ret = -ENOMEM;
183 goto err_unlock;
184 }
185
186 ret = ethnl_phy_fill_reply(&req_info.base, rskb);
187 if (ret)
188 goto err_free_msg;
189
190 netdev_unlock_ops(req_info.base.dev);
191 rtnl_unlock();
192 ethnl_parse_header_dev_put(&req_info.base);
193 genlmsg_end(rskb, reply_payload);
194
195 return genlmsg_reply(rskb, info);
196
197 err_free_msg:
198 nlmsg_free(rskb);
199 err_unlock:
200 netdev_unlock_ops(req_info.base.dev);
201 rtnl_unlock();
202 ethnl_parse_header_dev_put(&req_info.base);
203 return ret;
204 }
205
206 struct ethnl_phy_dump_ctx {
207 struct phy_req_info *phy_req_info;
208 unsigned long ifindex;
209 unsigned long phy_index;
210 };
211
ethnl_phy_start(struct netlink_callback * cb)212 int ethnl_phy_start(struct netlink_callback *cb)
213 {
214 const struct genl_info *info = genl_info_dump(cb);
215 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
216 int ret;
217
218 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
219
220 ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL);
221 if (!ctx->phy_req_info)
222 return -ENOMEM;
223
224 ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base,
225 info->attrs[ETHTOOL_A_PHY_HEADER],
226 sock_net(cb->skb->sk), cb->extack,
227 false);
228 ctx->ifindex = 0;
229 ctx->phy_index = 0;
230
231 if (ret)
232 kfree(ctx->phy_req_info);
233
234 return ret;
235 }
236
ethnl_phy_done(struct netlink_callback * cb)237 int ethnl_phy_done(struct netlink_callback *cb)
238 {
239 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
240
241 if (ctx->phy_req_info->base.dev)
242 ethnl_parse_header_dev_put(&ctx->phy_req_info->base);
243
244 kfree(ctx->phy_req_info);
245
246 return 0;
247 }
248
ethnl_phy_dump_one_dev(struct sk_buff * skb,struct net_device * dev,struct netlink_callback * cb)249 static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev,
250 struct netlink_callback *cb)
251 {
252 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
253 struct phy_req_info *pri = ctx->phy_req_info;
254 struct phy_device_node *pdn;
255 int ret = 0;
256 void *ehdr;
257
258 if (!dev->link_topo)
259 return 0;
260
261 xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) {
262 ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY);
263 if (!ehdr) {
264 ret = -EMSGSIZE;
265 break;
266 }
267
268 ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER);
269 if (ret < 0) {
270 genlmsg_cancel(skb, ehdr);
271 break;
272 }
273
274 pri->pdn = pdn;
275 ret = ethnl_phy_fill_reply(&pri->base, skb);
276 if (ret < 0) {
277 genlmsg_cancel(skb, ehdr);
278 break;
279 }
280
281 genlmsg_end(skb, ehdr);
282 }
283
284 return ret;
285 }
286
ethnl_phy_dumpit(struct sk_buff * skb,struct netlink_callback * cb)287 int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
288 {
289 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
290 struct net *net = sock_net(skb->sk);
291 struct net_device *dev;
292 int ret = 0;
293
294 rtnl_lock();
295
296 if (ctx->phy_req_info->base.dev) {
297 dev = ctx->phy_req_info->base.dev;
298 netdev_lock_ops(dev);
299 ret = ethnl_phy_dump_one_dev(skb, dev, cb);
300 netdev_unlock_ops(dev);
301 } else {
302 for_each_netdev_dump(net, dev, ctx->ifindex) {
303 netdev_lock_ops(dev);
304 ret = ethnl_phy_dump_one_dev(skb, dev, cb);
305 netdev_unlock_ops(dev);
306 if (ret)
307 break;
308
309 ctx->phy_index = 0;
310 }
311 }
312 rtnl_unlock();
313
314 return ret;
315 }
316