1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/dcache.h> 4 #include <linux/module.h> 5 #include <linux/skbuff.h> 6 #include <linux/sock_diag.h> 7 #include <linux/types.h> 8 #include <linux/user_namespace.h> 9 #include <net/af_unix.h> 10 #include <net/netlink.h> 11 #include <net/tcp_states.h> 12 #include <uapi/linux/unix_diag.h> 13 14 #include "af_unix.h" 15 16 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) 17 { 18 /* might or might not have a hash table lock */ 19 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); 20 21 if (!addr) 22 return 0; 23 24 return nla_put(nlskb, UNIX_DIAG_NAME, 25 addr->len - offsetof(struct sockaddr_un, sun_path), 26 addr->name->sun_path); 27 } 28 29 static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) 30 { 31 struct dentry *dentry = unix_sk(sk)->path.dentry; 32 33 if (dentry) { 34 struct unix_diag_vfs uv = { 35 .udiag_vfs_ino = d_backing_inode(dentry)->i_ino, 36 .udiag_vfs_dev = dentry->d_sb->s_dev, 37 }; 38 39 return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); 40 } 41 42 return 0; 43 } 44 45 static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) 46 { 47 struct sock *peer; 48 int ino; 49 50 peer = unix_peer_get(sk); 51 if (peer) { 52 ino = sock_i_ino(peer); 53 sock_put(peer); 54 55 return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino); 56 } 57 58 return 0; 59 } 60 61 static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) 62 { 63 struct sk_buff *skb; 64 struct nlattr *attr; 65 u32 *buf; 66 int i; 67 68 if (READ_ONCE(sk->sk_state) == TCP_LISTEN) { 69 spin_lock(&sk->sk_receive_queue.lock); 70 71 attr = nla_reserve(nlskb, UNIX_DIAG_ICONS, 72 sk->sk_receive_queue.qlen * sizeof(u32)); 73 if (!attr) 74 goto errout; 75 76 buf = nla_data(attr); 77 i = 0; 78 skb_queue_walk(&sk->sk_receive_queue, skb) 79 buf[i++] = sock_i_ino(unix_peer(skb->sk)); 80 81 spin_unlock(&sk->sk_receive_queue.lock); 82 } 83 84 return 0; 85 86 errout: 87 spin_unlock(&sk->sk_receive_queue.lock); 88 return -EMSGSIZE; 89 } 90 91 static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) 92 { 93 struct unix_diag_rqlen rql; 94 95 if (READ_ONCE(sk->sk_state) == TCP_LISTEN) { 96 rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue); 97 rql.udiag_wqueue = sk->sk_max_ack_backlog; 98 } else { 99 rql.udiag_rqueue = (u32) unix_inq_len(sk); 100 rql.udiag_wqueue = (u32) unix_outq_len(sk); 101 } 102 103 return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); 104 } 105 106 static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb, 107 struct user_namespace *user_ns) 108 { 109 uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 110 return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid); 111 } 112 113 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, 114 struct user_namespace *user_ns, 115 u32 portid, u32 seq, u32 flags, int sk_ino) 116 { 117 struct nlmsghdr *nlh; 118 struct unix_diag_msg *rep; 119 120 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), 121 flags); 122 if (!nlh) 123 return -EMSGSIZE; 124 125 rep = nlmsg_data(nlh); 126 rep->udiag_family = AF_UNIX; 127 rep->udiag_type = sk->sk_type; 128 rep->udiag_state = READ_ONCE(sk->sk_state); 129 rep->pad = 0; 130 rep->udiag_ino = sk_ino; 131 sock_diag_save_cookie(sk, rep->udiag_cookie); 132 133 if ((req->udiag_show & UDIAG_SHOW_NAME) && 134 sk_diag_dump_name(sk, skb)) 135 goto out_nlmsg_trim; 136 137 if ((req->udiag_show & UDIAG_SHOW_VFS) && 138 sk_diag_dump_vfs(sk, skb)) 139 goto out_nlmsg_trim; 140 141 if ((req->udiag_show & UDIAG_SHOW_PEER) && 142 sk_diag_dump_peer(sk, skb)) 143 goto out_nlmsg_trim; 144 145 if ((req->udiag_show & UDIAG_SHOW_ICONS) && 146 sk_diag_dump_icons(sk, skb)) 147 goto out_nlmsg_trim; 148 149 if ((req->udiag_show & UDIAG_SHOW_RQLEN) && 150 sk_diag_show_rqlen(sk, skb)) 151 goto out_nlmsg_trim; 152 153 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && 154 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) 155 goto out_nlmsg_trim; 156 157 if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown))) 158 goto out_nlmsg_trim; 159 160 if ((req->udiag_show & UDIAG_SHOW_UID) && 161 sk_diag_dump_uid(sk, skb, user_ns)) 162 goto out_nlmsg_trim; 163 164 nlmsg_end(skb, nlh); 165 return 0; 166 167 out_nlmsg_trim: 168 nlmsg_cancel(skb, nlh); 169 return -EMSGSIZE; 170 } 171 172 static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 173 { 174 struct net *net = sock_net(skb->sk); 175 int num, s_num, slot, s_slot; 176 struct unix_diag_req *req; 177 178 req = nlmsg_data(cb->nlh); 179 180 s_slot = cb->args[0]; 181 num = s_num = cb->args[1]; 182 183 for (slot = s_slot; slot < UNIX_HASH_SIZE; s_num = 0, slot++) { 184 struct sock *sk; 185 186 num = 0; 187 spin_lock(&net->unx.table.locks[slot]); 188 sk_for_each(sk, &net->unx.table.buckets[slot]) { 189 int sk_ino; 190 191 if (num < s_num) 192 goto next; 193 194 if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state)))) 195 goto next; 196 197 sk_ino = sock_i_ino(sk); 198 if (!sk_ino) 199 goto next; 200 201 if (sk_diag_fill(sk, skb, req, sk_user_ns(skb->sk), 202 NETLINK_CB(cb->skb).portid, 203 cb->nlh->nlmsg_seq, 204 NLM_F_MULTI, sk_ino) < 0) { 205 spin_unlock(&net->unx.table.locks[slot]); 206 goto done; 207 } 208 next: 209 num++; 210 } 211 spin_unlock(&net->unx.table.locks[slot]); 212 } 213 done: 214 cb->args[0] = slot; 215 cb->args[1] = num; 216 217 return skb->len; 218 } 219 220 static struct sock *unix_lookup_by_ino(struct net *net, unsigned int ino) 221 { 222 struct sock *sk; 223 int i; 224 225 for (i = 0; i < UNIX_HASH_SIZE; i++) { 226 spin_lock(&net->unx.table.locks[i]); 227 sk_for_each(sk, &net->unx.table.buckets[i]) { 228 if (ino == sock_i_ino(sk)) { 229 sock_hold(sk); 230 spin_unlock(&net->unx.table.locks[i]); 231 return sk; 232 } 233 } 234 spin_unlock(&net->unx.table.locks[i]); 235 } 236 return NULL; 237 } 238 239 static int unix_diag_get_exact(struct sk_buff *in_skb, 240 const struct nlmsghdr *nlh, 241 struct unix_diag_req *req) 242 { 243 struct net *net = sock_net(in_skb->sk); 244 unsigned int extra_len; 245 struct sk_buff *rep; 246 struct sock *sk; 247 int err; 248 249 err = -EINVAL; 250 if (req->udiag_ino == 0) 251 goto out_nosk; 252 253 sk = unix_lookup_by_ino(net, req->udiag_ino); 254 err = -ENOENT; 255 if (sk == NULL) 256 goto out_nosk; 257 258 err = sock_diag_check_cookie(sk, req->udiag_cookie); 259 if (err) 260 goto out; 261 262 extra_len = 256; 263 again: 264 err = -ENOMEM; 265 rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL); 266 if (!rep) 267 goto out; 268 269 err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk), 270 NETLINK_CB(in_skb).portid, 271 nlh->nlmsg_seq, 0, req->udiag_ino); 272 if (err < 0) { 273 nlmsg_free(rep); 274 extra_len += 256; 275 if (extra_len >= PAGE_SIZE) 276 goto out; 277 278 goto again; 279 } 280 err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); 281 282 out: 283 if (sk) 284 sock_put(sk); 285 out_nosk: 286 return err; 287 } 288 289 static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 290 { 291 int hdrlen = sizeof(struct unix_diag_req); 292 293 if (nlmsg_len(h) < hdrlen) 294 return -EINVAL; 295 296 if (h->nlmsg_flags & NLM_F_DUMP) { 297 struct netlink_dump_control c = { 298 .dump = unix_diag_dump, 299 }; 300 return netlink_dump_start(sock_net(skb->sk)->diag_nlsk, skb, h, &c); 301 } else 302 return unix_diag_get_exact(skb, h, nlmsg_data(h)); 303 } 304 305 static const struct sock_diag_handler unix_diag_handler = { 306 .owner = THIS_MODULE, 307 .family = AF_UNIX, 308 .dump = unix_diag_handler_dump, 309 }; 310 311 static int __init unix_diag_init(void) 312 { 313 return sock_diag_register(&unix_diag_handler); 314 } 315 316 static void __exit unix_diag_exit(void) 317 { 318 sock_diag_unregister(&unix_diag_handler); 319 } 320 321 module_init(unix_diag_init); 322 module_exit(unix_diag_exit); 323 MODULE_LICENSE("GPL"); 324 MODULE_DESCRIPTION("UNIX socket monitoring via SOCK_DIAG"); 325 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */); 326