1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * SCSI Primary Commands (SPC) parsing and emulation.
4 *
5 * (c) Copyright 2002-2013 Datera, Inc.
6 *
7 * Nicholas A. Bellinger <nab@kernel.org>
8 */
9
10 #include <linux/hex.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/unaligned.h>
14
15 #include <scsi/scsi_proto.h>
16 #include <scsi/scsi_common.h>
17 #include <scsi/scsi_tcq.h>
18
19 #include <target/target_core_base.h>
20 #include <target/target_core_backend.h>
21 #include <target/target_core_fabric.h>
22
23 #include "target_core_internal.h"
24 #include "target_core_alua.h"
25 #include "target_core_pr.h"
26 #include "target_core_ua.h"
27 #include "target_core_xcopy.h"
28
29 #define PD_TEXT_ID_INFO_HDR_LEN 4
30
spc_fill_alua_data(struct se_lun * lun,unsigned char * buf)31 static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
32 {
33 struct t10_alua_tg_pt_gp *tg_pt_gp;
34
35 /*
36 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
37 */
38 buf[5] = 0x80;
39
40 /*
41 * Set TPGS field for explicit and/or implicit ALUA access type
42 * and opteration.
43 *
44 * See spc4r17 section 6.4.2 Table 135
45 */
46 rcu_read_lock();
47 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
48 if (tg_pt_gp)
49 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
50 rcu_read_unlock();
51 }
52
53 static u16
spc_find_scsi_transport_vd(int proto_id)54 spc_find_scsi_transport_vd(int proto_id)
55 {
56 switch (proto_id) {
57 case SCSI_PROTOCOL_FCP:
58 return SCSI_VERSION_DESCRIPTOR_FCP4;
59 case SCSI_PROTOCOL_ISCSI:
60 return SCSI_VERSION_DESCRIPTOR_ISCSI;
61 case SCSI_PROTOCOL_SAS:
62 return SCSI_VERSION_DESCRIPTOR_SAS3;
63 case SCSI_PROTOCOL_SBP:
64 return SCSI_VERSION_DESCRIPTOR_SBP3;
65 case SCSI_PROTOCOL_SRP:
66 return SCSI_VERSION_DESCRIPTOR_SRP;
67 default:
68 pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI"
69 " transport PROTOCOL IDENTIFIER %#x\n", proto_id);
70 return 0;
71 }
72 }
73
74 sense_reason_t
spc_emulate_inquiry_std(struct se_cmd * cmd,unsigned char * buf)75 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
76 {
77 struct se_lun *lun = cmd->se_lun;
78 struct se_portal_group *tpg = lun->lun_tpg;
79 struct se_device *dev = cmd->se_dev;
80 struct se_session *sess = cmd->se_sess;
81
82 /* Set RMB (removable media) for tape devices */
83 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
84 buf[1] = 0x80;
85
86 buf[2] = 0x06; /* SPC-4 */
87
88 /*
89 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
90 *
91 * SPC4 says:
92 * A RESPONSE DATA FORMAT field set to 2h indicates that the
93 * standard INQUIRY data is in the format defined in this
94 * standard. Response data format values less than 2h are
95 * obsolete. Response data format values greater than 2h are
96 * reserved.
97 */
98 buf[3] = 2;
99
100 /*
101 * Enable SCCS and TPGS fields for Emulated ALUA
102 */
103 spc_fill_alua_data(lun, buf);
104
105 /*
106 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
107 */
108 if (dev->dev_attrib.emulate_3pc)
109 buf[5] |= 0x8;
110 /*
111 * Set Protection (PROTECT) bit when DIF has been enabled on the
112 * device, and the fabric supports VERIFY + PASS. Also report
113 * PROTECT=1 if sess_prot_type has been configured to allow T10-PI
114 * to unprotected devices.
115 */
116 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
117 if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)
118 buf[5] |= 0x1;
119 }
120
121 /*
122 * Set MULTIP bit to indicate presence of multiple SCSI target ports
123 */
124 if (dev->export_count > 1)
125 buf[6] |= 0x10;
126
127 buf[7] = 0x2; /* CmdQue=1 */
128
129 /*
130 * ASCII data fields described as being left-aligned shall have any
131 * unused bytes at the end of the field (i.e., highest offset) and the
132 * unused bytes shall be filled with ASCII space characters (20h).
133 */
134 memset(&buf[8], 0x20,
135 INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN);
136 memcpy(&buf[8], dev->t10_wwn.vendor,
137 strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
138 memcpy(&buf[16], dev->t10_wwn.model,
139 strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
140 memcpy(&buf[32], dev->t10_wwn.revision,
141 strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
142
143 /*
144 * Set the VERSION DESCRIPTOR fields
145 */
146 put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]);
147 put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]);
148 put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]);
149 if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK)
150 put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]);
151
152 buf[4] = 91; /* Set additional length to 91 */
153
154 return 0;
155 }
156 EXPORT_SYMBOL(spc_emulate_inquiry_std);
157
158 /* unit serial number */
159 static sense_reason_t
spc_emulate_evpd_80(struct se_cmd * cmd,unsigned char * buf)160 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
161 {
162 struct se_device *dev = cmd->se_dev;
163 u16 len;
164
165 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
166 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
167 len++; /* Extra Byte for NULL Terminator */
168 buf[3] = len;
169 }
170 return 0;
171 }
172
173 /*
174 * Generate NAA IEEE Registered Extended designator
175 */
spc_gen_naa_6h_vendor_specific(struct se_device * dev,unsigned char * buf)176 void spc_gen_naa_6h_vendor_specific(struct se_device *dev,
177 unsigned char *buf)
178 {
179 unsigned char *p = &dev->t10_wwn.unit_serial[0];
180 u32 company_id = dev->t10_wwn.company_id;
181 int cnt, off = 0;
182 bool next = true;
183
184 /*
185 * Start NAA IEEE Registered Extended Identifier/Designator
186 */
187 buf[off] = 0x6 << 4;
188
189 /* IEEE COMPANY_ID */
190 buf[off++] |= (company_id >> 20) & 0xf;
191 buf[off++] = (company_id >> 12) & 0xff;
192 buf[off++] = (company_id >> 4) & 0xff;
193 buf[off] = (company_id & 0xf) << 4;
194
195 /*
196 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
197 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
198 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
199 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
200 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
201 * per device uniqeness.
202 */
203 for (cnt = off + 13; *p && off < cnt; p++) {
204 int val = hex_to_bin(*p);
205
206 if (val < 0)
207 continue;
208
209 if (next) {
210 next = false;
211 buf[off++] |= val;
212 } else {
213 next = true;
214 buf[off] = val << 4;
215 }
216 }
217 }
218
219 /*
220 * Device identification VPD, for a complete list of
221 * DESIGNATOR TYPEs see spc4r17 Table 459.
222 */
223 sense_reason_t
spc_emulate_evpd_83(struct se_cmd * cmd,unsigned char * buf)224 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
225 {
226 struct se_device *dev = cmd->se_dev;
227 struct se_lun *lun = cmd->se_lun;
228 struct se_portal_group *tpg = NULL;
229 struct t10_alua_lu_gp_member *lu_gp_mem;
230 struct t10_alua_tg_pt_gp *tg_pt_gp;
231 unsigned char *prod = &dev->t10_wwn.model[0];
232 u32 off = 0;
233 u16 len = 0, id_len;
234
235 off = 4;
236
237 /*
238 * NAA IEEE Registered Extended Assigned designator format, see
239 * spc4r17 section 7.7.3.6.5
240 *
241 * We depend upon a target_core_mod/ConfigFS provided
242 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
243 * value in order to return the NAA id.
244 */
245 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
246 goto check_t10_vend_desc;
247
248 /* CODE SET == Binary */
249 buf[off++] = 0x1;
250
251 /* Set ASSOCIATION == addressed logical unit: 0)b */
252 buf[off] = 0x00;
253
254 /* Identifier/Designator type == NAA identifier */
255 buf[off++] |= 0x3;
256 off++;
257
258 /* Identifier/Designator length */
259 buf[off++] = 0x10;
260
261 /* NAA IEEE Registered Extended designator */
262 spc_gen_naa_6h_vendor_specific(dev, &buf[off]);
263
264 len = 20;
265 off = (len + 4);
266
267 check_t10_vend_desc:
268 /*
269 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
270 */
271 id_len = 8; /* For Vendor field */
272
273 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)
274 id_len += sprintf(&buf[off+12], "%s:%s", prod,
275 &dev->t10_wwn.unit_serial[0]);
276 buf[off] = 0x2; /* ASCII */
277 buf[off+1] = 0x1; /* T10 Vendor ID */
278 buf[off+2] = 0x0;
279 /* left align Vendor ID and pad with spaces */
280 memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN);
281 memcpy(&buf[off+4], dev->t10_wwn.vendor,
282 strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
283 /* Extra Byte for NULL Terminator */
284 id_len++;
285 /* Identifier Length */
286 buf[off+3] = id_len;
287 /* Header size for Designation descriptor */
288 len += (id_len + 4);
289 off += (id_len + 4);
290
291 if (1) {
292 struct t10_alua_lu_gp *lu_gp;
293 u32 padding, scsi_name_len, scsi_target_len;
294 u16 lu_gp_id = 0;
295 u16 tg_pt_gp_id = 0;
296 u16 tpgt;
297
298 tpg = lun->lun_tpg;
299 /*
300 * Relative target port identifer, see spc4r17
301 * section 7.7.3.7
302 *
303 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
304 * section 7.5.1 Table 362
305 */
306 buf[off] = tpg->proto_id << 4;
307 buf[off++] |= 0x1; /* CODE SET == Binary */
308 buf[off] = 0x80; /* Set PIV=1 */
309 /* Set ASSOCIATION == target port: 01b */
310 buf[off] |= 0x10;
311 /* DESIGNATOR TYPE == Relative target port identifer */
312 buf[off++] |= 0x4;
313 off++; /* Skip over Reserved */
314 buf[off++] = 4; /* DESIGNATOR LENGTH */
315 /* Skip over Obsolete field in RTPI payload
316 * in Table 472 */
317 off += 2;
318 put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]);
319 off += 2;
320 len += 8; /* Header size + Designation descriptor */
321 /*
322 * Target port group identifier, see spc4r17
323 * section 7.7.3.8
324 *
325 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
326 * section 7.5.1 Table 362
327 */
328 rcu_read_lock();
329 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
330 if (!tg_pt_gp) {
331 rcu_read_unlock();
332 goto check_lu_gp;
333 }
334 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
335 rcu_read_unlock();
336
337 buf[off] = tpg->proto_id << 4;
338 buf[off++] |= 0x1; /* CODE SET == Binary */
339 buf[off] = 0x80; /* Set PIV=1 */
340 /* Set ASSOCIATION == target port: 01b */
341 buf[off] |= 0x10;
342 /* DESIGNATOR TYPE == Target port group identifier */
343 buf[off++] |= 0x5;
344 off++; /* Skip over Reserved */
345 buf[off++] = 4; /* DESIGNATOR LENGTH */
346 off += 2; /* Skip over Reserved Field */
347 put_unaligned_be16(tg_pt_gp_id, &buf[off]);
348 off += 2;
349 len += 8; /* Header size + Designation descriptor */
350 /*
351 * Logical Unit Group identifier, see spc4r17
352 * section 7.7.3.8
353 */
354 check_lu_gp:
355 lu_gp_mem = dev->dev_alua_lu_gp_mem;
356 if (!lu_gp_mem)
357 goto check_scsi_name;
358
359 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
360 lu_gp = lu_gp_mem->lu_gp;
361 if (!lu_gp) {
362 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
363 goto check_scsi_name;
364 }
365 lu_gp_id = lu_gp->lu_gp_id;
366 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
367
368 buf[off++] |= 0x1; /* CODE SET == Binary */
369 /* DESIGNATOR TYPE == Logical Unit Group identifier */
370 buf[off++] |= 0x6;
371 off++; /* Skip over Reserved */
372 buf[off++] = 4; /* DESIGNATOR LENGTH */
373 off += 2; /* Skip over Reserved Field */
374 put_unaligned_be16(lu_gp_id, &buf[off]);
375 off += 2;
376 len += 8; /* Header size + Designation descriptor */
377 /*
378 * SCSI name string designator, see spc4r17
379 * section 7.7.3.11
380 *
381 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
382 * section 7.5.1 Table 362
383 */
384 check_scsi_name:
385 buf[off] = tpg->proto_id << 4;
386 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
387 buf[off] = 0x80; /* Set PIV=1 */
388 /* Set ASSOCIATION == target port: 01b */
389 buf[off] |= 0x10;
390 /* DESIGNATOR TYPE == SCSI name string */
391 buf[off++] |= 0x8;
392 off += 2; /* Skip over Reserved and length */
393 /*
394 * SCSI name string identifer containing, $FABRIC_MOD
395 * dependent information. For LIO-Target and iSCSI
396 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
397 * UTF-8 encoding.
398 */
399 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
400 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
401 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
402 scsi_name_len += 1 /* Include NULL terminator */;
403 /*
404 * The null-terminated, null-padded (see 4.4.2) SCSI
405 * NAME STRING field contains a UTF-8 format string.
406 * The number of bytes in the SCSI NAME STRING field
407 * (i.e., the value in the DESIGNATOR LENGTH field)
408 * shall be no larger than 256 and shall be a multiple
409 * of four.
410 */
411 padding = ((-scsi_name_len) & 3);
412 if (padding)
413 scsi_name_len += padding;
414 if (scsi_name_len > 256)
415 scsi_name_len = 256;
416
417 buf[off-1] = scsi_name_len;
418 off += scsi_name_len;
419 /* Header size + Designation descriptor */
420 len += (scsi_name_len + 4);
421
422 /*
423 * Target device designator
424 */
425 buf[off] = tpg->proto_id << 4;
426 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
427 buf[off] = 0x80; /* Set PIV=1 */
428 /* Set ASSOCIATION == target device: 10b */
429 buf[off] |= 0x20;
430 /* DESIGNATOR TYPE == SCSI name string */
431 buf[off++] |= 0x8;
432 off += 2; /* Skip over Reserved and length */
433 /*
434 * SCSI name string identifer containing, $FABRIC_MOD
435 * dependent information. For LIO-Target and iSCSI
436 * Target Port, this means "<iSCSI name>" in
437 * UTF-8 encoding.
438 */
439 scsi_target_len = sprintf(&buf[off], "%s",
440 tpg->se_tpg_tfo->tpg_get_wwn(tpg));
441 scsi_target_len += 1 /* Include NULL terminator */;
442 /*
443 * The null-terminated, null-padded (see 4.4.2) SCSI
444 * NAME STRING field contains a UTF-8 format string.
445 * The number of bytes in the SCSI NAME STRING field
446 * (i.e., the value in the DESIGNATOR LENGTH field)
447 * shall be no larger than 256 and shall be a multiple
448 * of four.
449 */
450 padding = ((-scsi_target_len) & 3);
451 if (padding)
452 scsi_target_len += padding;
453 if (scsi_target_len > 256)
454 scsi_target_len = 256;
455
456 buf[off-1] = scsi_target_len;
457 off += scsi_target_len;
458
459 /* Header size + Designation descriptor */
460 len += (scsi_target_len + 4);
461 }
462 put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */
463 return 0;
464 }
465 EXPORT_SYMBOL(spc_emulate_evpd_83);
466
467 /* Extended INQUIRY Data VPD Page */
468 static sense_reason_t
spc_emulate_evpd_86(struct se_cmd * cmd,unsigned char * buf)469 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
470 {
471 struct se_device *dev = cmd->se_dev;
472 struct se_session *sess = cmd->se_sess;
473
474 buf[3] = 0x3c;
475 /*
476 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
477 * only for TYPE3 protection.
478 */
479 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
480 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT ||
481 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
482 buf[4] = 0x5;
483 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
484 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
485 buf[4] = 0x4;
486 }
487
488 /* logical unit supports type 1 and type 3 protection */
489 if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
490 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
491 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
492 buf[4] |= (0x3 << 3);
493 }
494
495 /* Set HEADSUP, ORDSUP, SIMPSUP */
496 buf[5] = 0x07;
497
498 /* If WriteCache emulation is enabled, set V_SUP */
499 if (target_check_wce(dev))
500 buf[6] = 0x01;
501 /* If an LBA map is present set R_SUP */
502 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
503 if (!list_empty(&dev->t10_alua.lba_map_list))
504 buf[8] = 0x10;
505 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
506 return 0;
507 }
508
509 /* Block Limits VPD page */
510 static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd * cmd,unsigned char * buf)511 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
512 {
513 struct se_device *dev = cmd->se_dev;
514 u32 mtl = 0;
515 int have_tp = 0, opt, min;
516 u32 io_max_blocks;
517
518 /*
519 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
520 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
521 * different page length for Thin Provisioning.
522 */
523 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
524 have_tp = 1;
525
526 buf[0] = dev->transport->get_device_type(dev);
527
528 /* Set WSNZ to 1 */
529 buf[4] = 0x01;
530 /*
531 * Set MAXIMUM COMPARE AND WRITE LENGTH
532 */
533 if (dev->dev_attrib.emulate_caw)
534 buf[5] = 0x01;
535
536 /*
537 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
538 */
539 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
540 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
541 else
542 put_unaligned_be16(1, &buf[6]);
543
544 /*
545 * Set MAXIMUM TRANSFER LENGTH
546 *
547 * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
548 * enforcing maximum HW scatter-gather-list entry limit
549 */
550 if (cmd->se_tfo->max_data_sg_nents) {
551 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
552 dev->dev_attrib.block_size;
553 }
554 io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors,
555 dev->dev_attrib.hw_block_size,
556 dev->dev_attrib.block_size);
557 put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]);
558
559 /*
560 * Set OPTIMAL TRANSFER LENGTH
561 */
562 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
563 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
564 else
565 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
566
567 put_unaligned_be16(12, &buf[2]);
568
569 if (!have_tp)
570 goto try_atomic;
571
572 /*
573 * Set MAXIMUM UNMAP LBA COUNT
574 */
575 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
576
577 /*
578 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
579 */
580 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
581 &buf[24]);
582
583 /*
584 * Set OPTIMAL UNMAP GRANULARITY
585 */
586 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
587
588 /*
589 * UNMAP GRANULARITY ALIGNMENT
590 */
591 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
592 &buf[32]);
593 if (dev->dev_attrib.unmap_granularity_alignment != 0)
594 buf[32] |= 0x80; /* Set the UGAVALID bit */
595
596 /*
597 * MAXIMUM WRITE SAME LENGTH
598 */
599 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
600
601 put_unaligned_be16(40, &buf[2]);
602
603 try_atomic:
604 /*
605 * ATOMIC
606 */
607 if (!dev->dev_attrib.atomic_max_len)
608 goto done;
609
610 if (dev->dev_attrib.atomic_max_len < io_max_blocks)
611 put_unaligned_be32(dev->dev_attrib.atomic_max_len, &buf[44]);
612 else
613 put_unaligned_be32(io_max_blocks, &buf[44]);
614
615 put_unaligned_be32(dev->dev_attrib.atomic_alignment, &buf[48]);
616 put_unaligned_be32(dev->dev_attrib.atomic_granularity, &buf[52]);
617 put_unaligned_be32(dev->dev_attrib.atomic_max_with_boundary, &buf[56]);
618 put_unaligned_be32(dev->dev_attrib.atomic_max_boundary, &buf[60]);
619
620 put_unaligned_be16(60, &buf[2]);
621 done:
622 return 0;
623 }
624
625 /* Block Device Characteristics VPD page */
626 static sense_reason_t
spc_emulate_evpd_b1(struct se_cmd * cmd,unsigned char * buf)627 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
628 {
629 struct se_device *dev = cmd->se_dev;
630
631 buf[0] = dev->transport->get_device_type(dev);
632 buf[3] = 0x3c;
633 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
634
635 return 0;
636 }
637
638 /* Thin Provisioning VPD */
639 static sense_reason_t
spc_emulate_evpd_b2(struct se_cmd * cmd,unsigned char * buf)640 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
641 {
642 struct se_device *dev = cmd->se_dev;
643
644 /*
645 * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
646 *
647 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
648 * zero, then the page length shall be set to 0004h. If the DP bit
649 * is set to one, then the page length shall be set to the value
650 * defined in table 162.
651 */
652 buf[0] = dev->transport->get_device_type(dev);
653
654 /*
655 * Set Hardcoded length mentioned above for DP=0
656 */
657 put_unaligned_be16(0x0004, &buf[2]);
658
659 /*
660 * The THRESHOLD EXPONENT field indicates the threshold set size in
661 * LBAs as a power of 2 (i.e., the threshold set size is equal to
662 * 2(threshold exponent)).
663 *
664 * Note that this is currently set to 0x00 as mkp says it will be
665 * changing again. We can enable this once it has settled in T10
666 * and is actually used by Linux/SCSI ML code.
667 */
668 buf[4] = 0x00;
669
670 /*
671 * A TPU bit set to one indicates that the device server supports
672 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
673 * that the device server does not support the UNMAP command.
674 */
675 if (dev->dev_attrib.emulate_tpu != 0)
676 buf[5] = 0x80;
677
678 /*
679 * A TPWS bit set to one indicates that the device server supports
680 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
681 * A TPWS bit set to zero indicates that the device server does not
682 * support the use of the WRITE SAME (16) command to unmap LBAs.
683 */
684 if (dev->dev_attrib.emulate_tpws != 0)
685 buf[5] |= 0x40 | 0x20;
686
687 /*
688 * The unmap_zeroes_data set means that the underlying device supports
689 * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This
690 * satisfies the SBC requirements for LBPRZ, meaning that a subsequent
691 * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
692 * See sbc4r36 6.6.4.
693 */
694 if (((dev->dev_attrib.emulate_tpu != 0) ||
695 (dev->dev_attrib.emulate_tpws != 0)) &&
696 (dev->dev_attrib.unmap_zeroes_data != 0))
697 buf[5] |= 0x04;
698
699 return 0;
700 }
701
702 /* Referrals VPD page */
703 static sense_reason_t
spc_emulate_evpd_b3(struct se_cmd * cmd,unsigned char * buf)704 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
705 {
706 struct se_device *dev = cmd->se_dev;
707
708 buf[0] = dev->transport->get_device_type(dev);
709 buf[3] = 0x0c;
710 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
711 put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
712
713 return 0;
714 }
715
716 static sense_reason_t
717 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
718
719 static struct {
720 uint8_t page;
721 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
722 } evpd_handlers[] = {
723 { .page = 0x00, .emulate = spc_emulate_evpd_00 },
724 { .page = 0x80, .emulate = spc_emulate_evpd_80 },
725 { .page = 0x83, .emulate = spc_emulate_evpd_83 },
726 { .page = 0x86, .emulate = spc_emulate_evpd_86 },
727 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
728 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
729 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
730 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
731 };
732
733 /* supported vital product data pages */
734 static sense_reason_t
spc_emulate_evpd_00(struct se_cmd * cmd,unsigned char * buf)735 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
736 {
737 int p;
738
739 /*
740 * Only report the INQUIRY EVPD=1 pages after a valid NAA
741 * Registered Extended LUN WWN has been set via ConfigFS
742 * during device creation/restart.
743 */
744 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
745 buf[3] = ARRAY_SIZE(evpd_handlers);
746 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
747 buf[p + 4] = evpd_handlers[p].page;
748 }
749
750 return 0;
751 }
752
753 static sense_reason_t
spc_emulate_inquiry(struct se_cmd * cmd)754 spc_emulate_inquiry(struct se_cmd *cmd)
755 {
756 struct se_device *dev = cmd->se_dev;
757 unsigned char *rbuf;
758 unsigned char *cdb = cmd->t_task_cdb;
759 unsigned char *buf;
760 sense_reason_t ret;
761 int p;
762 int len = 0;
763
764 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
765 if (!buf) {
766 pr_err("Unable to allocate response buffer for INQUIRY\n");
767 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
768 }
769
770 buf[0] = dev->transport->get_device_type(dev);
771
772 if (!(cdb[1] & 0x1)) {
773 if (cdb[2]) {
774 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
775 cdb[2]);
776 ret = TCM_INVALID_CDB_FIELD;
777 goto out;
778 }
779
780 ret = spc_emulate_inquiry_std(cmd, buf);
781 len = buf[4] + 5;
782 goto out;
783 }
784
785 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
786 if (cdb[2] == evpd_handlers[p].page) {
787 buf[1] = cdb[2];
788 ret = evpd_handlers[p].emulate(cmd, buf);
789 len = get_unaligned_be16(&buf[2]) + 4;
790 goto out;
791 }
792 }
793
794 pr_debug("Unknown VPD Code: 0x%02x\n", cdb[2]);
795 ret = TCM_INVALID_CDB_FIELD;
796
797 out:
798 rbuf = transport_kmap_data_sg(cmd);
799 if (rbuf) {
800 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
801 transport_kunmap_data_sg(cmd);
802 }
803 kfree(buf);
804
805 if (!ret)
806 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len);
807 return ret;
808 }
809
spc_modesense_rwrecovery(struct se_cmd * cmd,u8 pc,u8 * p)810 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
811 {
812 p[0] = 0x01;
813 p[1] = 0x0a;
814
815 /* No changeable values for now */
816 if (pc == 1)
817 goto out;
818
819 out:
820 return 12;
821 }
822
spc_modesense_control(struct se_cmd * cmd,u8 pc,u8 * p)823 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
824 {
825 struct se_device *dev = cmd->se_dev;
826 struct se_session *sess = cmd->se_sess;
827
828 p[0] = 0x0a;
829 p[1] = 0x0a;
830
831 /* No changeable values for now */
832 if (pc == 1)
833 goto out;
834
835 /* GLTSD: No implicit save of log parameters */
836 p[2] = (1 << 1);
837 if (target_sense_desc_format(dev))
838 /* D_SENSE: Descriptor format sense data for 64bit sectors */
839 p[2] |= (1 << 2);
840
841 /*
842 * From spc4r23, 7.4.7 Control mode page
843 *
844 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
845 * restrictions on the algorithm used for reordering commands
846 * having the SIMPLE task attribute (see SAM-4).
847 *
848 * Table 368 -- QUEUE ALGORITHM MODIFIER field
849 * Code Description
850 * 0h Restricted reordering
851 * 1h Unrestricted reordering allowed
852 * 2h to 7h Reserved
853 * 8h to Fh Vendor specific
854 *
855 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
856 * the device server shall order the processing sequence of commands
857 * having the SIMPLE task attribute such that data integrity is maintained
858 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
859 * requests is halted at any time, the final value of all data observable
860 * on the medium shall be the same as if all the commands had been processed
861 * with the ORDERED task attribute).
862 *
863 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
864 * device server may reorder the processing sequence of commands having the
865 * SIMPLE task attribute in any manner. Any data integrity exposures related to
866 * command sequence order shall be explicitly handled by the application client
867 * through the selection of appropriate ommands and task attributes.
868 */
869 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
870 /*
871 * From spc4r17, section 7.4.6 Control mode Page
872 *
873 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
874 *
875 * 00b: The logical unit shall clear any unit attention condition
876 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
877 * status and shall not establish a unit attention condition when a com-
878 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
879 * status.
880 *
881 * 10b: The logical unit shall not clear any unit attention condition
882 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
883 * status and shall not establish a unit attention condition when
884 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
885 * CONFLICT status.
886 *
887 * 11b a The logical unit shall not clear any unit attention condition
888 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
889 * status and shall establish a unit attention condition for the
890 * initiator port associated with the I_T nexus on which the BUSY,
891 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
892 * Depending on the status, the additional sense code shall be set to
893 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
894 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
895 * command, a unit attention condition shall be established only once
896 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
897 * to the number of commands completed with one of those status codes.
898 */
899 switch (dev->dev_attrib.emulate_ua_intlck_ctrl) {
900 case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA:
901 p[4] = 0x30;
902 break;
903 case TARGET_UA_INTLCK_CTRL_NO_CLEAR:
904 p[4] = 0x20;
905 break;
906 default: /* TARGET_UA_INTLCK_CTRL_CLEAR */
907 p[4] = 0x00;
908 break;
909 }
910 /*
911 * From spc4r17, section 7.4.6 Control mode Page
912 *
913 * Task Aborted Status (TAS) bit set to zero.
914 *
915 * A task aborted status (TAS) bit set to zero specifies that aborted
916 * tasks shall be terminated by the device server without any response
917 * to the application client. A TAS bit set to one specifies that tasks
918 * aborted by the actions of an I_T nexus other than the I_T nexus on
919 * which the command was received shall be completed with TASK ABORTED
920 * status (see SAM-4).
921 */
922 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
923 /*
924 * From spc4r30, section 7.5.7 Control mode page
925 *
926 * Application Tag Owner (ATO) bit set to one.
927 *
928 * If the ATO bit is set to one the device server shall not modify the
929 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
930 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
931 * TAG field.
932 */
933 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
934 if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type)
935 p[5] |= 0x80;
936 }
937
938 p[8] = 0xff;
939 p[9] = 0xff;
940 p[11] = 30;
941
942 out:
943 return 12;
944 }
945
spc_modesense_caching(struct se_cmd * cmd,u8 pc,u8 * p)946 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
947 {
948 struct se_device *dev = cmd->se_dev;
949
950 p[0] = 0x08;
951 p[1] = 0x12;
952
953 /* No changeable values for now */
954 if (pc == 1)
955 goto out;
956
957 if (target_check_wce(dev))
958 p[2] = 0x04; /* Write Cache Enable */
959 p[12] = 0x20; /* Disabled Read Ahead */
960
961 out:
962 return 20;
963 }
964
spc_modesense_informational_exceptions(struct se_cmd * cmd,u8 pc,unsigned char * p)965 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
966 {
967 p[0] = 0x1c;
968 p[1] = 0x0a;
969
970 /* No changeable values for now */
971 if (pc == 1)
972 goto out;
973
974 out:
975 return 12;
976 }
977
978 static struct {
979 uint8_t page;
980 uint8_t subpage;
981 int (*emulate)(struct se_cmd *, u8, unsigned char *);
982 } modesense_handlers[] = {
983 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
984 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
985 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
986 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
987 };
988
spc_modesense_write_protect(unsigned char * buf,int type)989 static void spc_modesense_write_protect(unsigned char *buf, int type)
990 {
991 /*
992 * I believe that the WP bit (bit 7) in the mode header is the same for
993 * all device types..
994 */
995 switch (type) {
996 case TYPE_DISK:
997 case TYPE_TAPE:
998 default:
999 buf[0] |= 0x80; /* WP bit */
1000 break;
1001 }
1002 }
1003
spc_modesense_dpofua(unsigned char * buf,int type)1004 static void spc_modesense_dpofua(unsigned char *buf, int type)
1005 {
1006 switch (type) {
1007 case TYPE_DISK:
1008 buf[0] |= 0x10; /* DPOFUA bit */
1009 break;
1010 default:
1011 break;
1012 }
1013 }
1014
spc_modesense_blockdesc(unsigned char * buf,u64 blocks,u32 block_size)1015 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
1016 {
1017 *buf++ = 8;
1018 put_unaligned_be32(min(blocks, 0xffffffffull), buf);
1019 buf += 4;
1020 put_unaligned_be32(block_size, buf);
1021 return 9;
1022 }
1023
spc_modesense_long_blockdesc(unsigned char * buf,u64 blocks,u32 block_size)1024 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
1025 {
1026 if (blocks <= 0xffffffff)
1027 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
1028
1029 *buf++ = 1; /* LONGLBA */
1030 buf += 2;
1031 *buf++ = 16;
1032 put_unaligned_be64(blocks, buf);
1033 buf += 12;
1034 put_unaligned_be32(block_size, buf);
1035
1036 return 17;
1037 }
1038
spc_emulate_modesense(struct se_cmd * cmd)1039 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1040 {
1041 struct se_device *dev = cmd->se_dev;
1042 char *cdb = cmd->t_task_cdb;
1043 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
1044 int type = dev->transport->get_device_type(dev);
1045 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
1046 bool dbd = !!(cdb[1] & 0x08);
1047 bool llba = ten ? !!(cdb[1] & 0x10) : false;
1048 u8 pc = cdb[2] >> 6;
1049 u8 page = cdb[2] & 0x3f;
1050 u8 subpage = cdb[3];
1051 int length = 0;
1052 int ret;
1053 int i;
1054
1055 memset(buf, 0, SE_MODE_PAGE_BUF);
1056
1057 /*
1058 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
1059 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
1060 */
1061 length = ten ? 3 : 2;
1062
1063 /* DEVICE-SPECIFIC PARAMETER */
1064 if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
1065 spc_modesense_write_protect(&buf[length], type);
1066
1067 /*
1068 * SBC only allows us to enable FUA and DPO together. Fortunately
1069 * DPO is explicitly specified as a hint, so a noop is a perfectly
1070 * valid implementation.
1071 */
1072 if (target_check_fua(dev))
1073 spc_modesense_dpofua(&buf[length], type);
1074
1075 ++length;
1076
1077 /* BLOCK DESCRIPTOR */
1078
1079 /*
1080 * For now we only include a block descriptor for disk (SBC)
1081 * devices; other command sets use a slightly different format.
1082 */
1083 if (!dbd && type == TYPE_DISK) {
1084 u64 blocks = dev->transport->get_blocks(dev);
1085 u32 block_size = dev->dev_attrib.block_size;
1086
1087 if (ten) {
1088 if (llba) {
1089 length += spc_modesense_long_blockdesc(&buf[length],
1090 blocks, block_size);
1091 } else {
1092 length += 3;
1093 length += spc_modesense_blockdesc(&buf[length],
1094 blocks, block_size);
1095 }
1096 } else {
1097 length += spc_modesense_blockdesc(&buf[length], blocks,
1098 block_size);
1099 }
1100 } else {
1101 if (ten)
1102 length += 4;
1103 else
1104 length += 1;
1105 }
1106
1107 if (page == 0x3f) {
1108 if (subpage != 0x00 && subpage != 0xff) {
1109 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
1110 return TCM_INVALID_CDB_FIELD;
1111 }
1112
1113 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
1114 /*
1115 * Tricky way to say all subpage 00h for
1116 * subpage==0, all subpages for subpage==0xff
1117 * (and we just checked above that those are
1118 * the only two possibilities).
1119 */
1120 if ((modesense_handlers[i].subpage & ~subpage) == 0) {
1121 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1122 if (!ten && length + ret >= 255)
1123 break;
1124 length += ret;
1125 }
1126 }
1127
1128 goto set_length;
1129 }
1130
1131 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1132 if (modesense_handlers[i].page == page &&
1133 modesense_handlers[i].subpage == subpage) {
1134 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1135 goto set_length;
1136 }
1137
1138 /*
1139 * We don't intend to implement:
1140 * - obsolete page 03h "format parameters" (checked by Solaris)
1141 */
1142 if (page != 0x03)
1143 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
1144 page, subpage);
1145
1146 return TCM_UNKNOWN_MODE_PAGE;
1147
1148 set_length:
1149 if (ten)
1150 put_unaligned_be16(length - 2, buf);
1151 else
1152 buf[0] = length - 1;
1153
1154 rbuf = transport_kmap_data_sg(cmd);
1155 if (rbuf) {
1156 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
1157 transport_kunmap_data_sg(cmd);
1158 }
1159
1160 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length);
1161 return 0;
1162 }
1163
spc_emulate_modeselect(struct se_cmd * cmd)1164 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1165 {
1166 char *cdb = cmd->t_task_cdb;
1167 bool ten = cdb[0] == MODE_SELECT_10;
1168 int off = ten ? 8 : 4;
1169 bool pf = !!(cdb[1] & 0x10);
1170 u8 page, subpage;
1171 unsigned char *buf;
1172 unsigned char tbuf[SE_MODE_PAGE_BUF];
1173 int length;
1174 sense_reason_t ret = 0;
1175 int i;
1176
1177 if (!cmd->data_length) {
1178 target_complete_cmd(cmd, SAM_STAT_GOOD);
1179 return 0;
1180 }
1181
1182 if (cmd->data_length < off + 2)
1183 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1184
1185 buf = transport_kmap_data_sg(cmd);
1186 if (!buf)
1187 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1188
1189 if (!pf) {
1190 ret = TCM_INVALID_CDB_FIELD;
1191 goto out;
1192 }
1193
1194 page = buf[off] & 0x3f;
1195 subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
1196
1197 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1198 if (modesense_handlers[i].page == page &&
1199 modesense_handlers[i].subpage == subpage) {
1200 memset(tbuf, 0, SE_MODE_PAGE_BUF);
1201 length = modesense_handlers[i].emulate(cmd, 0, tbuf);
1202 goto check_contents;
1203 }
1204
1205 ret = TCM_UNKNOWN_MODE_PAGE;
1206 goto out;
1207
1208 check_contents:
1209 if (cmd->data_length < off + length) {
1210 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
1211 goto out;
1212 }
1213
1214 if (memcmp(buf + off, tbuf, length))
1215 ret = TCM_INVALID_PARAMETER_LIST;
1216
1217 out:
1218 transport_kunmap_data_sg(cmd);
1219
1220 if (!ret)
1221 target_complete_cmd(cmd, SAM_STAT_GOOD);
1222 return ret;
1223 }
1224
spc_emulate_request_sense(struct se_cmd * cmd)1225 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
1226 {
1227 unsigned char *cdb = cmd->t_task_cdb;
1228 unsigned char *rbuf;
1229 u8 ua_asc = 0, ua_ascq = 0;
1230 unsigned char buf[SE_SENSE_BUF];
1231 bool desc_format = target_sense_desc_format(cmd->se_dev);
1232
1233 memset(buf, 0, SE_SENSE_BUF);
1234
1235 if (cdb[1] & 0x01) {
1236 pr_err("REQUEST_SENSE description emulation not"
1237 " supported\n");
1238 return TCM_INVALID_CDB_FIELD;
1239 }
1240
1241 rbuf = transport_kmap_data_sg(cmd);
1242 if (!rbuf)
1243 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1244
1245 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
1246 scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
1247 ua_asc, ua_ascq);
1248 else
1249 scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
1250
1251 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1252 transport_kunmap_data_sg(cmd);
1253
1254 target_complete_cmd(cmd, SAM_STAT_GOOD);
1255 return 0;
1256 }
1257
spc_emulate_report_luns(struct se_cmd * cmd)1258 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1259 {
1260 struct se_dev_entry *deve;
1261 struct se_session *sess = cmd->se_sess;
1262 struct se_node_acl *nacl;
1263 struct scsi_lun slun;
1264 unsigned char *buf;
1265 u32 lun_count = 0, offset = 8;
1266 __be32 len;
1267
1268 buf = transport_kmap_data_sg(cmd);
1269 if (cmd->data_length && !buf)
1270 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1271
1272 /*
1273 * If no struct se_session pointer is present, this struct se_cmd is
1274 * coming via a target_core_mod PASSTHROUGH op, and not through
1275 * a $FABRIC_MOD. In that case, report LUN=0 only.
1276 */
1277 if (!sess)
1278 goto done;
1279
1280 nacl = sess->se_node_acl;
1281
1282 rcu_read_lock();
1283 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
1284 /*
1285 * We determine the correct LUN LIST LENGTH even once we
1286 * have reached the initial allocation length.
1287 * See SPC2-R20 7.19.
1288 */
1289 lun_count++;
1290 if (offset >= cmd->data_length)
1291 continue;
1292
1293 int_to_scsilun(deve->mapped_lun, &slun);
1294 memcpy(buf + offset, &slun,
1295 min(8u, cmd->data_length - offset));
1296 offset += 8;
1297 }
1298 rcu_read_unlock();
1299
1300 /*
1301 * See SPC3 r07, page 159.
1302 */
1303 done:
1304 /*
1305 * If no LUNs are accessible, report virtual LUN 0.
1306 */
1307 if (lun_count == 0) {
1308 int_to_scsilun(0, &slun);
1309 if (cmd->data_length > 8)
1310 memcpy(buf + offset, &slun,
1311 min(8u, cmd->data_length - offset));
1312 lun_count = 1;
1313 }
1314
1315 if (buf) {
1316 len = cpu_to_be32(lun_count * 8);
1317 memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
1318 transport_kunmap_data_sg(cmd);
1319 }
1320
1321 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8);
1322 return 0;
1323 }
1324 EXPORT_SYMBOL(spc_emulate_report_luns);
1325
1326 static sense_reason_t
spc_emulate_testunitready(struct se_cmd * cmd)1327 spc_emulate_testunitready(struct se_cmd *cmd)
1328 {
1329 target_complete_cmd(cmd, SAM_STAT_GOOD);
1330 return 0;
1331 }
1332
set_dpofua_usage_bits(u8 * usage_bits,struct se_device * dev)1333 static void set_dpofua_usage_bits(u8 *usage_bits, struct se_device *dev)
1334 {
1335 if (!target_check_fua(dev))
1336 usage_bits[1] &= ~0x18;
1337 else
1338 usage_bits[1] |= 0x18;
1339 }
1340
set_dpofua_usage_bits32(u8 * usage_bits,struct se_device * dev)1341 static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev)
1342 {
1343 if (!target_check_fua(dev))
1344 usage_bits[10] &= ~0x18;
1345 else
1346 usage_bits[10] |= 0x18;
1347 }
1348
1349 static const struct target_opcode_descriptor tcm_opcode_read6 = {
1350 .support = SCSI_SUPPORT_FULL,
1351 .opcode = READ_6,
1352 .cdb_size = 6,
1353 .usage_bits = {READ_6, 0x1f, 0xff, 0xff,
1354 0xff, SCSI_CONTROL_MASK},
1355 };
1356
1357 static const struct target_opcode_descriptor tcm_opcode_read10 = {
1358 .support = SCSI_SUPPORT_FULL,
1359 .opcode = READ_10,
1360 .cdb_size = 10,
1361 .usage_bits = {READ_10, 0xf8, 0xff, 0xff,
1362 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1363 0xff, SCSI_CONTROL_MASK},
1364 .update_usage_bits = set_dpofua_usage_bits,
1365 };
1366
1367 static const struct target_opcode_descriptor tcm_opcode_read12 = {
1368 .support = SCSI_SUPPORT_FULL,
1369 .opcode = READ_12,
1370 .cdb_size = 12,
1371 .usage_bits = {READ_12, 0xf8, 0xff, 0xff,
1372 0xff, 0xff, 0xff, 0xff,
1373 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1374 .update_usage_bits = set_dpofua_usage_bits,
1375 };
1376
1377 static const struct target_opcode_descriptor tcm_opcode_read16 = {
1378 .support = SCSI_SUPPORT_FULL,
1379 .opcode = READ_16,
1380 .cdb_size = 16,
1381 .usage_bits = {READ_16, 0xf8, 0xff, 0xff,
1382 0xff, 0xff, 0xff, 0xff,
1383 0xff, 0xff, 0xff, 0xff,
1384 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1385 .update_usage_bits = set_dpofua_usage_bits,
1386 };
1387
1388 static const struct target_opcode_descriptor tcm_opcode_write6 = {
1389 .support = SCSI_SUPPORT_FULL,
1390 .opcode = WRITE_6,
1391 .cdb_size = 6,
1392 .usage_bits = {WRITE_6, 0x1f, 0xff, 0xff,
1393 0xff, SCSI_CONTROL_MASK},
1394 };
1395
1396 static const struct target_opcode_descriptor tcm_opcode_write10 = {
1397 .support = SCSI_SUPPORT_FULL,
1398 .opcode = WRITE_10,
1399 .cdb_size = 10,
1400 .usage_bits = {WRITE_10, 0xf8, 0xff, 0xff,
1401 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1402 0xff, SCSI_CONTROL_MASK},
1403 .update_usage_bits = set_dpofua_usage_bits,
1404 };
1405
1406 static const struct target_opcode_descriptor tcm_opcode_write_verify10 = {
1407 .support = SCSI_SUPPORT_FULL,
1408 .opcode = WRITE_VERIFY,
1409 .cdb_size = 10,
1410 .usage_bits = {WRITE_VERIFY, 0xf0, 0xff, 0xff,
1411 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1412 0xff, SCSI_CONTROL_MASK},
1413 .update_usage_bits = set_dpofua_usage_bits,
1414 };
1415
1416 static const struct target_opcode_descriptor tcm_opcode_write12 = {
1417 .support = SCSI_SUPPORT_FULL,
1418 .opcode = WRITE_12,
1419 .cdb_size = 12,
1420 .usage_bits = {WRITE_12, 0xf8, 0xff, 0xff,
1421 0xff, 0xff, 0xff, 0xff,
1422 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1423 .update_usage_bits = set_dpofua_usage_bits,
1424 };
1425
1426 static const struct target_opcode_descriptor tcm_opcode_write16 = {
1427 .support = SCSI_SUPPORT_FULL,
1428 .opcode = WRITE_16,
1429 .cdb_size = 16,
1430 .usage_bits = {WRITE_16, 0xf8, 0xff, 0xff,
1431 0xff, 0xff, 0xff, 0xff,
1432 0xff, 0xff, 0xff, 0xff,
1433 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1434 .update_usage_bits = set_dpofua_usage_bits,
1435 };
1436
1437 static const struct target_opcode_descriptor tcm_opcode_write_verify16 = {
1438 .support = SCSI_SUPPORT_FULL,
1439 .opcode = WRITE_VERIFY_16,
1440 .cdb_size = 16,
1441 .usage_bits = {WRITE_VERIFY_16, 0xf0, 0xff, 0xff,
1442 0xff, 0xff, 0xff, 0xff,
1443 0xff, 0xff, 0xff, 0xff,
1444 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1445 .update_usage_bits = set_dpofua_usage_bits,
1446 };
1447
tcm_is_ws_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)1448 static bool tcm_is_ws_enabled(const struct target_opcode_descriptor *descr,
1449 struct se_cmd *cmd)
1450 {
1451 struct exec_cmd_ops *ops = cmd->protocol_data;
1452 struct se_device *dev = cmd->se_dev;
1453
1454 return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) ||
1455 !!ops->execute_write_same;
1456 }
1457
1458 static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
1459 .support = SCSI_SUPPORT_FULL,
1460 .serv_action_valid = 1,
1461 .opcode = VARIABLE_LENGTH_CMD,
1462 .service_action = WRITE_SAME_32,
1463 .cdb_size = 32,
1464 .usage_bits = {VARIABLE_LENGTH_CMD, SCSI_CONTROL_MASK, 0x00, 0x00,
1465 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0x18,
1466 0x00, WRITE_SAME_32, 0xe8, 0x00,
1467 0xff, 0xff, 0xff, 0xff,
1468 0xff, 0xff, 0xff, 0xff,
1469 0x00, 0x00, 0x00, 0x00,
1470 0x00, 0x00, 0x00, 0x00,
1471 0xff, 0xff, 0xff, 0xff},
1472 .enabled = tcm_is_ws_enabled,
1473 .update_usage_bits = set_dpofua_usage_bits32,
1474 };
1475
tcm_is_atomic_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)1476 static bool tcm_is_atomic_enabled(const struct target_opcode_descriptor *descr,
1477 struct se_cmd *cmd)
1478 {
1479 return cmd->se_dev->dev_attrib.atomic_max_len;
1480 }
1481
1482 static struct target_opcode_descriptor tcm_opcode_write_atomic16 = {
1483 .support = SCSI_SUPPORT_FULL,
1484 .opcode = WRITE_ATOMIC_16,
1485 .cdb_size = 16,
1486 .usage_bits = {WRITE_ATOMIC_16, 0xf8, 0xff, 0xff,
1487 0xff, 0xff, 0xff, 0xff,
1488 0xff, 0xff, 0xff, 0xff,
1489 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1490 .enabled = tcm_is_atomic_enabled,
1491 .update_usage_bits = set_dpofua_usage_bits,
1492 };
1493
tcm_is_caw_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)1494 static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
1495 struct se_cmd *cmd)
1496 {
1497 struct se_device *dev = cmd->se_dev;
1498
1499 return dev->dev_attrib.emulate_caw;
1500 }
1501
1502 static const struct target_opcode_descriptor tcm_opcode_compare_write = {
1503 .support = SCSI_SUPPORT_FULL,
1504 .opcode = COMPARE_AND_WRITE,
1505 .cdb_size = 16,
1506 .usage_bits = {COMPARE_AND_WRITE, 0x18, 0xff, 0xff,
1507 0xff, 0xff, 0xff, 0xff,
1508 0xff, 0xff, 0x00, 0x00,
1509 0x00, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1510 .enabled = tcm_is_caw_enabled,
1511 .update_usage_bits = set_dpofua_usage_bits,
1512 };
1513
1514 static const struct target_opcode_descriptor tcm_opcode_read_capacity = {
1515 .support = SCSI_SUPPORT_FULL,
1516 .opcode = READ_CAPACITY,
1517 .cdb_size = 10,
1518 .usage_bits = {READ_CAPACITY, 0x00, 0xff, 0xff,
1519 0xff, 0xff, 0x00, 0x00,
1520 0x01, SCSI_CONTROL_MASK},
1521 };
1522
1523 static const struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
1524 .support = SCSI_SUPPORT_FULL,
1525 .serv_action_valid = 1,
1526 .opcode = SERVICE_ACTION_IN_16,
1527 .service_action = SAI_READ_CAPACITY_16,
1528 .cdb_size = 16,
1529 .usage_bits = {SERVICE_ACTION_IN_16, SAI_READ_CAPACITY_16, 0x00, 0x00,
1530 0x00, 0x00, 0x00, 0x00,
1531 0x00, 0x00, 0xff, 0xff,
1532 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1533 };
1534
tcm_is_rep_ref_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)1535 static bool tcm_is_rep_ref_enabled(const struct target_opcode_descriptor *descr,
1536 struct se_cmd *cmd)
1537 {
1538 struct se_device *dev = cmd->se_dev;
1539
1540 spin_lock(&dev->t10_alua.lba_map_lock);
1541 if (list_empty(&dev->t10_alua.lba_map_list)) {
1542 spin_unlock(&dev->t10_alua.lba_map_lock);
1543 return false;
1544 }
1545 spin_unlock(&dev->t10_alua.lba_map_lock);
1546 return true;
1547 }
1548
1549 static const struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
1550 .support = SCSI_SUPPORT_FULL,
1551 .serv_action_valid = 1,
1552 .opcode = SERVICE_ACTION_IN_16,
1553 .service_action = SAI_REPORT_REFERRALS,
1554 .cdb_size = 16,
1555 .usage_bits = {SERVICE_ACTION_IN_16, SAI_REPORT_REFERRALS, 0x00, 0x00,
1556 0x00, 0x00, 0x00, 0x00,
1557 0x00, 0x00, 0xff, 0xff,
1558 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1559 .enabled = tcm_is_rep_ref_enabled,
1560 };
1561
1562 static const struct target_opcode_descriptor tcm_opcode_sync_cache = {
1563 .support = SCSI_SUPPORT_FULL,
1564 .opcode = SYNCHRONIZE_CACHE,
1565 .cdb_size = 10,
1566 .usage_bits = {SYNCHRONIZE_CACHE, 0x02, 0xff, 0xff,
1567 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1568 0xff, SCSI_CONTROL_MASK},
1569 };
1570
1571 static const struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
1572 .support = SCSI_SUPPORT_FULL,
1573 .opcode = SYNCHRONIZE_CACHE_16,
1574 .cdb_size = 16,
1575 .usage_bits = {SYNCHRONIZE_CACHE_16, 0x02, 0xff, 0xff,
1576 0xff, 0xff, 0xff, 0xff,
1577 0xff, 0xff, 0xff, 0xff,
1578 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1579 };
1580
tcm_is_unmap_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)1581 static bool tcm_is_unmap_enabled(const struct target_opcode_descriptor *descr,
1582 struct se_cmd *cmd)
1583 {
1584 struct exec_cmd_ops *ops = cmd->protocol_data;
1585 struct se_device *dev = cmd->se_dev;
1586
1587 return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
1588 }
1589
1590 static const struct target_opcode_descriptor tcm_opcode_unmap = {
1591 .support = SCSI_SUPPORT_FULL,
1592 .opcode = UNMAP,
1593 .cdb_size = 10,
1594 .usage_bits = {UNMAP, 0x00, 0x00, 0x00,
1595 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0xff,
1596 0xff, SCSI_CONTROL_MASK},
1597 .enabled = tcm_is_unmap_enabled,
1598 };
1599
1600 static const struct target_opcode_descriptor tcm_opcode_write_same = {
1601 .support = SCSI_SUPPORT_FULL,
1602 .opcode = WRITE_SAME,
1603 .cdb_size = 10,
1604 .usage_bits = {WRITE_SAME, 0xe8, 0xff, 0xff,
1605 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1606 0xff, SCSI_CONTROL_MASK},
1607 .enabled = tcm_is_ws_enabled,
1608 };
1609
1610 static const struct target_opcode_descriptor tcm_opcode_write_same16 = {
1611 .support = SCSI_SUPPORT_FULL,
1612 .opcode = WRITE_SAME_16,
1613 .cdb_size = 16,
1614 .usage_bits = {WRITE_SAME_16, 0xe8, 0xff, 0xff,
1615 0xff, 0xff, 0xff, 0xff,
1616 0xff, 0xff, 0xff, 0xff,
1617 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1618 .enabled = tcm_is_ws_enabled,
1619 };
1620
1621 static const struct target_opcode_descriptor tcm_opcode_verify = {
1622 .support = SCSI_SUPPORT_FULL,
1623 .opcode = VERIFY,
1624 .cdb_size = 10,
1625 .usage_bits = {VERIFY, 0x00, 0xff, 0xff,
1626 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
1627 0xff, SCSI_CONTROL_MASK},
1628 };
1629
1630 static const struct target_opcode_descriptor tcm_opcode_verify16 = {
1631 .support = SCSI_SUPPORT_FULL,
1632 .opcode = VERIFY_16,
1633 .cdb_size = 16,
1634 .usage_bits = {VERIFY_16, 0x00, 0xff, 0xff,
1635 0xff, 0xff, 0xff, 0xff,
1636 0xff, 0xff, 0xff, 0xff,
1637 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
1638 };
1639
1640 static const struct target_opcode_descriptor tcm_opcode_start_stop = {
1641 .support = SCSI_SUPPORT_FULL,
1642 .opcode = START_STOP,
1643 .cdb_size = 6,
1644 .usage_bits = {START_STOP, 0x01, 0x00, 0x00,
1645 0x01, SCSI_CONTROL_MASK},
1646 };
1647
1648 static const struct target_opcode_descriptor tcm_opcode_mode_select = {
1649 .support = SCSI_SUPPORT_FULL,
1650 .opcode = MODE_SELECT,
1651 .cdb_size = 6,
1652 .usage_bits = {MODE_SELECT, 0x10, 0x00, 0x00,
1653 0xff, SCSI_CONTROL_MASK},
1654 };
1655
1656 static const struct target_opcode_descriptor tcm_opcode_mode_select10 = {
1657 .support = SCSI_SUPPORT_FULL,
1658 .opcode = MODE_SELECT_10,
1659 .cdb_size = 10,
1660 .usage_bits = {MODE_SELECT_10, 0x10, 0x00, 0x00,
1661 0x00, 0x00, 0x00, 0xff,
1662 0xff, SCSI_CONTROL_MASK},
1663 };
1664
1665 static const struct target_opcode_descriptor tcm_opcode_mode_sense = {
1666 .support = SCSI_SUPPORT_FULL,
1667 .opcode = MODE_SENSE,
1668 .cdb_size = 6,
1669 .usage_bits = {MODE_SENSE, 0x08, 0xff, 0xff,
1670 0xff, SCSI_CONTROL_MASK},
1671 };
1672
1673 static const struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
1674 .support = SCSI_SUPPORT_FULL,
1675 .opcode = MODE_SENSE_10,
1676 .cdb_size = 10,
1677 .usage_bits = {MODE_SENSE_10, 0x18, 0xff, 0xff,
1678 0x00, 0x00, 0x00, 0xff,
1679 0xff, SCSI_CONTROL_MASK},
1680 };
1681
1682 static const struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
1683 .support = SCSI_SUPPORT_FULL,
1684 .serv_action_valid = 1,
1685 .opcode = PERSISTENT_RESERVE_IN,
1686 .service_action = PRI_READ_KEYS,
1687 .cdb_size = 10,
1688 .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_KEYS, 0x00, 0x00,
1689 0x00, 0x00, 0x00, 0xff,
1690 0xff, SCSI_CONTROL_MASK},
1691 };
1692
1693 static const struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
1694 .support = SCSI_SUPPORT_FULL,
1695 .serv_action_valid = 1,
1696 .opcode = PERSISTENT_RESERVE_IN,
1697 .service_action = PRI_READ_RESERVATION,
1698 .cdb_size = 10,
1699 .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_RESERVATION, 0x00, 0x00,
1700 0x00, 0x00, 0x00, 0xff,
1701 0xff, SCSI_CONTROL_MASK},
1702 };
1703
tcm_is_pr_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)1704 static bool tcm_is_pr_enabled(const struct target_opcode_descriptor *descr,
1705 struct se_cmd *cmd)
1706 {
1707 struct se_device *dev = cmd->se_dev;
1708
1709 if (!dev->dev_attrib.emulate_pr)
1710 return false;
1711
1712 if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1713 return true;
1714
1715 switch (descr->opcode) {
1716 case RESERVE_6:
1717 case RESERVE_10:
1718 case RELEASE_6:
1719 case RELEASE_10:
1720 /*
1721 * The pr_ops which are used by the backend modules don't
1722 * support these commands.
1723 */
1724 return false;
1725 case PERSISTENT_RESERVE_OUT:
1726 switch (descr->service_action) {
1727 case PRO_REGISTER_AND_MOVE:
1728 case PRO_REPLACE_LOST_RESERVATION:
1729 /*
1730 * The backend modules don't have access to ports and
1731 * I_T nexuses so they can't handle these type of
1732 * requests.
1733 */
1734 return false;
1735 }
1736 break;
1737 case PERSISTENT_RESERVE_IN:
1738 if (descr->service_action == PRI_READ_FULL_STATUS)
1739 return false;
1740 break;
1741 }
1742
1743 return true;
1744 }
1745
1746 static const struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
1747 .support = SCSI_SUPPORT_FULL,
1748 .serv_action_valid = 1,
1749 .opcode = PERSISTENT_RESERVE_IN,
1750 .service_action = PRI_REPORT_CAPABILITIES,
1751 .cdb_size = 10,
1752 .usage_bits = {PERSISTENT_RESERVE_IN, PRI_REPORT_CAPABILITIES, 0x00, 0x00,
1753 0x00, 0x00, 0x00, 0xff,
1754 0xff, SCSI_CONTROL_MASK},
1755 .enabled = tcm_is_pr_enabled,
1756 };
1757
1758 static const struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
1759 .support = SCSI_SUPPORT_FULL,
1760 .serv_action_valid = 1,
1761 .opcode = PERSISTENT_RESERVE_IN,
1762 .service_action = PRI_READ_FULL_STATUS,
1763 .cdb_size = 10,
1764 .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_FULL_STATUS, 0x00, 0x00,
1765 0x00, 0x00, 0x00, 0xff,
1766 0xff, SCSI_CONTROL_MASK},
1767 .enabled = tcm_is_pr_enabled,
1768 };
1769
1770 static const struct target_opcode_descriptor tcm_opcode_pro_register = {
1771 .support = SCSI_SUPPORT_FULL,
1772 .serv_action_valid = 1,
1773 .opcode = PERSISTENT_RESERVE_OUT,
1774 .service_action = PRO_REGISTER,
1775 .cdb_size = 10,
1776 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER, 0xff, 0x00,
1777 0x00, 0xff, 0xff, 0xff,
1778 0xff, SCSI_CONTROL_MASK},
1779 .enabled = tcm_is_pr_enabled,
1780 };
1781
1782 static const struct target_opcode_descriptor tcm_opcode_pro_reserve = {
1783 .support = SCSI_SUPPORT_FULL,
1784 .serv_action_valid = 1,
1785 .opcode = PERSISTENT_RESERVE_OUT,
1786 .service_action = PRO_RESERVE,
1787 .cdb_size = 10,
1788 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RESERVE, 0xff, 0x00,
1789 0x00, 0xff, 0xff, 0xff,
1790 0xff, SCSI_CONTROL_MASK},
1791 .enabled = tcm_is_pr_enabled,
1792 };
1793
1794 static const struct target_opcode_descriptor tcm_opcode_pro_release = {
1795 .support = SCSI_SUPPORT_FULL,
1796 .serv_action_valid = 1,
1797 .opcode = PERSISTENT_RESERVE_OUT,
1798 .service_action = PRO_RELEASE,
1799 .cdb_size = 10,
1800 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RELEASE, 0xff, 0x00,
1801 0x00, 0xff, 0xff, 0xff,
1802 0xff, SCSI_CONTROL_MASK},
1803 .enabled = tcm_is_pr_enabled,
1804 };
1805
1806 static const struct target_opcode_descriptor tcm_opcode_pro_clear = {
1807 .support = SCSI_SUPPORT_FULL,
1808 .serv_action_valid = 1,
1809 .opcode = PERSISTENT_RESERVE_OUT,
1810 .service_action = PRO_CLEAR,
1811 .cdb_size = 10,
1812 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_CLEAR, 0xff, 0x00,
1813 0x00, 0xff, 0xff, 0xff,
1814 0xff, SCSI_CONTROL_MASK},
1815 .enabled = tcm_is_pr_enabled,
1816 };
1817
1818 static const struct target_opcode_descriptor tcm_opcode_pro_preempt = {
1819 .support = SCSI_SUPPORT_FULL,
1820 .serv_action_valid = 1,
1821 .opcode = PERSISTENT_RESERVE_OUT,
1822 .service_action = PRO_PREEMPT,
1823 .cdb_size = 10,
1824 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT, 0xff, 0x00,
1825 0x00, 0xff, 0xff, 0xff,
1826 0xff, SCSI_CONTROL_MASK},
1827 .enabled = tcm_is_pr_enabled,
1828 };
1829
1830 static const struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
1831 .support = SCSI_SUPPORT_FULL,
1832 .serv_action_valid = 1,
1833 .opcode = PERSISTENT_RESERVE_OUT,
1834 .service_action = PRO_PREEMPT_AND_ABORT,
1835 .cdb_size = 10,
1836 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT_AND_ABORT, 0xff, 0x00,
1837 0x00, 0xff, 0xff, 0xff,
1838 0xff, SCSI_CONTROL_MASK},
1839 .enabled = tcm_is_pr_enabled,
1840 };
1841
1842 static const struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
1843 .support = SCSI_SUPPORT_FULL,
1844 .serv_action_valid = 1,
1845 .opcode = PERSISTENT_RESERVE_OUT,
1846 .service_action = PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
1847 .cdb_size = 10,
1848 .usage_bits = {
1849 PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
1850 0xff, 0x00,
1851 0x00, 0xff, 0xff, 0xff,
1852 0xff, SCSI_CONTROL_MASK},
1853 .enabled = tcm_is_pr_enabled,
1854 };
1855
1856 static const struct target_opcode_descriptor tcm_opcode_pro_register_move = {
1857 .support = SCSI_SUPPORT_FULL,
1858 .serv_action_valid = 1,
1859 .opcode = PERSISTENT_RESERVE_OUT,
1860 .service_action = PRO_REGISTER_AND_MOVE,
1861 .cdb_size = 10,
1862 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_MOVE, 0xff, 0x00,
1863 0x00, 0xff, 0xff, 0xff,
1864 0xff, SCSI_CONTROL_MASK},
1865 .enabled = tcm_is_pr_enabled,
1866 };
1867
1868 static const struct target_opcode_descriptor tcm_opcode_release = {
1869 .support = SCSI_SUPPORT_FULL,
1870 .opcode = RELEASE_6,
1871 .cdb_size = 6,
1872 .usage_bits = {RELEASE_6, 0x00, 0x00, 0x00,
1873 0x00, SCSI_CONTROL_MASK},
1874 .enabled = tcm_is_pr_enabled,
1875 };
1876
1877 static const struct target_opcode_descriptor tcm_opcode_release10 = {
1878 .support = SCSI_SUPPORT_FULL,
1879 .opcode = RELEASE_10,
1880 .cdb_size = 10,
1881 .usage_bits = {RELEASE_10, 0x00, 0x00, 0x00,
1882 0x00, 0x00, 0x00, 0xff,
1883 0xff, SCSI_CONTROL_MASK},
1884 .enabled = tcm_is_pr_enabled,
1885 };
1886
1887 static const struct target_opcode_descriptor tcm_opcode_reserve = {
1888 .support = SCSI_SUPPORT_FULL,
1889 .opcode = RESERVE_6,
1890 .cdb_size = 6,
1891 .usage_bits = {RESERVE_6, 0x00, 0x00, 0x00,
1892 0x00, SCSI_CONTROL_MASK},
1893 .enabled = tcm_is_pr_enabled,
1894 };
1895
1896 static const struct target_opcode_descriptor tcm_opcode_reserve10 = {
1897 .support = SCSI_SUPPORT_FULL,
1898 .opcode = RESERVE_10,
1899 .cdb_size = 10,
1900 .usage_bits = {RESERVE_10, 0x00, 0x00, 0x00,
1901 0x00, 0x00, 0x00, 0xff,
1902 0xff, SCSI_CONTROL_MASK},
1903 .enabled = tcm_is_pr_enabled,
1904 };
1905
1906 static const struct target_opcode_descriptor tcm_opcode_request_sense = {
1907 .support = SCSI_SUPPORT_FULL,
1908 .opcode = REQUEST_SENSE,
1909 .cdb_size = 6,
1910 .usage_bits = {REQUEST_SENSE, 0x00, 0x00, 0x00,
1911 0xff, SCSI_CONTROL_MASK},
1912 };
1913
1914 static const struct target_opcode_descriptor tcm_opcode_inquiry = {
1915 .support = SCSI_SUPPORT_FULL,
1916 .opcode = INQUIRY,
1917 .cdb_size = 6,
1918 .usage_bits = {INQUIRY, 0x01, 0xff, 0xff,
1919 0xff, SCSI_CONTROL_MASK},
1920 };
1921
tcm_is_3pc_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)1922 static bool tcm_is_3pc_enabled(const struct target_opcode_descriptor *descr,
1923 struct se_cmd *cmd)
1924 {
1925 struct se_device *dev = cmd->se_dev;
1926
1927 return dev->dev_attrib.emulate_3pc;
1928 }
1929
1930 static const struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
1931 .support = SCSI_SUPPORT_FULL,
1932 .serv_action_valid = 1,
1933 .opcode = EXTENDED_COPY,
1934 .cdb_size = 16,
1935 .usage_bits = {EXTENDED_COPY, 0x00, 0x00, 0x00,
1936 0x00, 0x00, 0x00, 0x00,
1937 0x00, 0x00, 0xff, 0xff,
1938 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1939 .enabled = tcm_is_3pc_enabled,
1940 };
1941
1942 static const struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
1943 .support = SCSI_SUPPORT_FULL,
1944 .serv_action_valid = 1,
1945 .opcode = RECEIVE_COPY_RESULTS,
1946 .service_action = RCR_SA_OPERATING_PARAMETERS,
1947 .cdb_size = 16,
1948 .usage_bits = {RECEIVE_COPY_RESULTS, RCR_SA_OPERATING_PARAMETERS,
1949 0x00, 0x00,
1950 0x00, 0x00, 0x00, 0x00,
1951 0x00, 0x00, 0xff, 0xff,
1952 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1953 .enabled = tcm_is_3pc_enabled,
1954 };
1955
1956 static const struct target_opcode_descriptor tcm_opcode_report_luns = {
1957 .support = SCSI_SUPPORT_FULL,
1958 .opcode = REPORT_LUNS,
1959 .cdb_size = 12,
1960 .usage_bits = {REPORT_LUNS, 0x00, 0xff, 0x00,
1961 0x00, 0x00, 0xff, 0xff,
1962 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1963 };
1964
1965 static const struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
1966 .support = SCSI_SUPPORT_FULL,
1967 .opcode = TEST_UNIT_READY,
1968 .cdb_size = 6,
1969 .usage_bits = {TEST_UNIT_READY, 0x00, 0x00, 0x00,
1970 0x00, SCSI_CONTROL_MASK},
1971 };
1972
1973 static const struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
1974 .support = SCSI_SUPPORT_FULL,
1975 .serv_action_valid = 1,
1976 .opcode = MAINTENANCE_IN,
1977 .service_action = MI_REPORT_TARGET_PGS,
1978 .cdb_size = 12,
1979 .usage_bits = {MAINTENANCE_IN, 0xE0 | MI_REPORT_TARGET_PGS, 0x00, 0x00,
1980 0x00, 0x00, 0xff, 0xff,
1981 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
1982 };
1983
spc_rsoc_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)1984 static bool spc_rsoc_enabled(const struct target_opcode_descriptor *descr,
1985 struct se_cmd *cmd)
1986 {
1987 struct se_device *dev = cmd->se_dev;
1988
1989 return dev->dev_attrib.emulate_rsoc;
1990 }
1991
1992 static const struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
1993 .support = SCSI_SUPPORT_FULL,
1994 .serv_action_valid = 1,
1995 .opcode = MAINTENANCE_IN,
1996 .service_action = MI_REPORT_SUPPORTED_OPERATION_CODES,
1997 .cdb_size = 12,
1998 .usage_bits = {MAINTENANCE_IN, MI_REPORT_SUPPORTED_OPERATION_CODES,
1999 0x87, 0xff,
2000 0xff, 0xff, 0xff, 0xff,
2001 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
2002 .enabled = spc_rsoc_enabled,
2003 };
2004
2005 static struct target_opcode_descriptor tcm_opcode_report_identifying_information = {
2006 .support = SCSI_SUPPORT_FULL,
2007 .serv_action_valid = 1,
2008 .opcode = MAINTENANCE_IN,
2009 .service_action = MI_REPORT_IDENTIFYING_INFORMATION,
2010 .cdb_size = 12,
2011 .usage_bits = {MAINTENANCE_IN, MI_REPORT_IDENTIFYING_INFORMATION,
2012 0x00, 0x00,
2013 0x00, 0x00, 0xff, 0xff,
2014 0xff, 0xff, 0xff, SCSI_CONTROL_MASK},
2015 };
2016
tcm_is_set_tpg_enabled(const struct target_opcode_descriptor * descr,struct se_cmd * cmd)2017 static bool tcm_is_set_tpg_enabled(const struct target_opcode_descriptor *descr,
2018 struct se_cmd *cmd)
2019 {
2020 struct t10_alua_tg_pt_gp *l_tg_pt_gp;
2021 struct se_lun *l_lun = cmd->se_lun;
2022
2023 rcu_read_lock();
2024 l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
2025 if (!l_tg_pt_gp) {
2026 rcu_read_unlock();
2027 return false;
2028 }
2029 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
2030 rcu_read_unlock();
2031 return false;
2032 }
2033 rcu_read_unlock();
2034
2035 return true;
2036 }
2037
2038 static const struct target_opcode_descriptor tcm_opcode_set_tpg = {
2039 .support = SCSI_SUPPORT_FULL,
2040 .serv_action_valid = 1,
2041 .opcode = MAINTENANCE_OUT,
2042 .service_action = MO_SET_TARGET_PGS,
2043 .cdb_size = 12,
2044 .usage_bits = {MAINTENANCE_OUT, MO_SET_TARGET_PGS, 0x00, 0x00,
2045 0x00, 0x00, 0xff, 0xff,
2046 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
2047 .enabled = tcm_is_set_tpg_enabled,
2048 };
2049
2050 static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
2051 &tcm_opcode_read6,
2052 &tcm_opcode_read10,
2053 &tcm_opcode_read12,
2054 &tcm_opcode_read16,
2055 &tcm_opcode_write6,
2056 &tcm_opcode_write10,
2057 &tcm_opcode_write_verify10,
2058 &tcm_opcode_write12,
2059 &tcm_opcode_write16,
2060 &tcm_opcode_write_verify16,
2061 &tcm_opcode_write_same32,
2062 &tcm_opcode_write_atomic16,
2063 &tcm_opcode_compare_write,
2064 &tcm_opcode_read_capacity,
2065 &tcm_opcode_read_capacity16,
2066 &tcm_opcode_read_report_refferals,
2067 &tcm_opcode_sync_cache,
2068 &tcm_opcode_sync_cache16,
2069 &tcm_opcode_unmap,
2070 &tcm_opcode_write_same,
2071 &tcm_opcode_write_same16,
2072 &tcm_opcode_verify,
2073 &tcm_opcode_verify16,
2074 &tcm_opcode_start_stop,
2075 &tcm_opcode_mode_select,
2076 &tcm_opcode_mode_select10,
2077 &tcm_opcode_mode_sense,
2078 &tcm_opcode_mode_sense10,
2079 &tcm_opcode_pri_read_keys,
2080 &tcm_opcode_pri_read_resrv,
2081 &tcm_opcode_pri_read_caps,
2082 &tcm_opcode_pri_read_full_status,
2083 &tcm_opcode_pro_register,
2084 &tcm_opcode_pro_reserve,
2085 &tcm_opcode_pro_release,
2086 &tcm_opcode_pro_clear,
2087 &tcm_opcode_pro_preempt,
2088 &tcm_opcode_pro_preempt_abort,
2089 &tcm_opcode_pro_reg_ign_exist,
2090 &tcm_opcode_pro_register_move,
2091 &tcm_opcode_release,
2092 &tcm_opcode_release10,
2093 &tcm_opcode_reserve,
2094 &tcm_opcode_reserve10,
2095 &tcm_opcode_request_sense,
2096 &tcm_opcode_inquiry,
2097 &tcm_opcode_extended_copy_lid1,
2098 &tcm_opcode_rcv_copy_res_op_params,
2099 &tcm_opcode_report_luns,
2100 &tcm_opcode_test_unit_ready,
2101 &tcm_opcode_report_target_pgs,
2102 &tcm_opcode_report_supp_opcodes,
2103 &tcm_opcode_set_tpg,
2104 &tcm_opcode_report_identifying_information,
2105 };
2106
2107 static int
spc_rsoc_encode_command_timeouts_descriptor(unsigned char * buf,u8 ctdp,const struct target_opcode_descriptor * descr)2108 spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
2109 const struct target_opcode_descriptor *descr)
2110 {
2111 if (!ctdp)
2112 return 0;
2113
2114 put_unaligned_be16(0xa, buf);
2115 buf[3] = descr->specific_timeout;
2116 put_unaligned_be32(descr->nominal_timeout, &buf[4]);
2117 put_unaligned_be32(descr->recommended_timeout, &buf[8]);
2118
2119 return 12;
2120 }
2121
2122 static int
spc_rsoc_encode_command_descriptor(unsigned char * buf,u8 ctdp,const struct target_opcode_descriptor * descr)2123 spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
2124 const struct target_opcode_descriptor *descr)
2125 {
2126 int td_size = 0;
2127
2128 buf[0] = descr->opcode;
2129
2130 put_unaligned_be16(descr->service_action, &buf[2]);
2131
2132 buf[5] = (ctdp << 1) | descr->serv_action_valid;
2133 put_unaligned_be16(descr->cdb_size, &buf[6]);
2134
2135 td_size = spc_rsoc_encode_command_timeouts_descriptor(&buf[8], ctdp,
2136 descr);
2137
2138 return 8 + td_size;
2139 }
2140
2141 static int
spc_rsoc_encode_one_command_descriptor(unsigned char * buf,u8 ctdp,const struct target_opcode_descriptor * descr,struct se_device * dev)2142 spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
2143 const struct target_opcode_descriptor *descr,
2144 struct se_device *dev)
2145 {
2146 int td_size = 0;
2147
2148 if (!descr) {
2149 buf[1] = (ctdp << 7) | SCSI_SUPPORT_NOT_SUPPORTED;
2150 return 2;
2151 }
2152
2153 buf[1] = (ctdp << 7) | SCSI_SUPPORT_FULL;
2154 put_unaligned_be16(descr->cdb_size, &buf[2]);
2155 memcpy(&buf[4], descr->usage_bits, descr->cdb_size);
2156 if (descr->update_usage_bits)
2157 descr->update_usage_bits(&buf[4], dev);
2158
2159 td_size = spc_rsoc_encode_command_timeouts_descriptor(
2160 &buf[4 + descr->cdb_size], ctdp, descr);
2161
2162 return 4 + descr->cdb_size + td_size;
2163 }
2164
2165 static sense_reason_t
spc_rsoc_get_descr(struct se_cmd * cmd,const struct target_opcode_descriptor ** opcode)2166 spc_rsoc_get_descr(struct se_cmd *cmd, const struct target_opcode_descriptor **opcode)
2167 {
2168 const struct target_opcode_descriptor *descr;
2169 struct se_session *sess = cmd->se_sess;
2170 unsigned char *cdb = cmd->t_task_cdb;
2171 u8 opts = cdb[2] & 0x3;
2172 u8 requested_opcode;
2173 u16 requested_sa;
2174 int i;
2175
2176 requested_opcode = cdb[3];
2177 requested_sa = ((u16)cdb[4]) << 8 | cdb[5];
2178 *opcode = NULL;
2179
2180 if (opts > 3) {
2181 pr_debug("TARGET_CORE[%s]: Invalid REPORT SUPPORTED OPERATION CODES"
2182 " with unsupported REPORTING OPTIONS %#x for 0x%08llx from %s\n",
2183 cmd->se_tfo->fabric_name, opts,
2184 cmd->se_lun->unpacked_lun,
2185 sess->se_node_acl->initiatorname);
2186 return TCM_INVALID_CDB_FIELD;
2187 }
2188
2189 for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
2190 descr = tcm_supported_opcodes[i];
2191 if (descr->opcode != requested_opcode)
2192 continue;
2193
2194 switch (opts) {
2195 case 0x1:
2196 /*
2197 * If the REQUESTED OPERATION CODE field specifies an
2198 * operation code for which the device server implements
2199 * service actions, then the device server shall
2200 * terminate the command with CHECK CONDITION status,
2201 * with the sense key set to ILLEGAL REQUEST, and the
2202 * additional sense code set to INVALID FIELD IN CDB
2203 */
2204 if (descr->serv_action_valid)
2205 return TCM_INVALID_CDB_FIELD;
2206
2207 if (!descr->enabled || descr->enabled(descr, cmd)) {
2208 *opcode = descr;
2209 return TCM_NO_SENSE;
2210 }
2211 break;
2212 case 0x2:
2213 /*
2214 * If the REQUESTED OPERATION CODE field specifies an
2215 * operation code for which the device server does not
2216 * implement service actions, then the device server
2217 * shall terminate the command with CHECK CONDITION
2218 * status, with the sense key set to ILLEGAL REQUEST,
2219 * and the additional sense code set to INVALID FIELD IN CDB.
2220 */
2221 if (descr->serv_action_valid &&
2222 descr->service_action == requested_sa) {
2223 if (!descr->enabled || descr->enabled(descr,
2224 cmd)) {
2225 *opcode = descr;
2226 return TCM_NO_SENSE;
2227 }
2228 } else if (!descr->serv_action_valid)
2229 return TCM_INVALID_CDB_FIELD;
2230 break;
2231 case 0x3:
2232 /*
2233 * The command support data for the operation code and
2234 * service action a specified in the REQUESTED OPERATION
2235 * CODE field and REQUESTED SERVICE ACTION field shall
2236 * be returned in the one_command parameter data format.
2237 */
2238 if (descr->service_action == requested_sa)
2239 if (!descr->enabled || descr->enabled(descr,
2240 cmd)) {
2241 *opcode = descr;
2242 return TCM_NO_SENSE;
2243 }
2244 break;
2245 }
2246 }
2247
2248 return TCM_NO_SENSE;
2249 }
2250
2251 static sense_reason_t
spc_emulate_report_supp_op_codes(struct se_cmd * cmd)2252 spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
2253 {
2254 int descr_num = ARRAY_SIZE(tcm_supported_opcodes);
2255 const struct target_opcode_descriptor *descr = NULL;
2256 unsigned char *cdb = cmd->t_task_cdb;
2257 u8 rctd = (cdb[2] >> 7) & 0x1;
2258 unsigned char *buf = NULL;
2259 int response_length = 0;
2260 u8 opts = cdb[2] & 0x3;
2261 unsigned char *rbuf;
2262 sense_reason_t ret = 0;
2263 int i;
2264
2265 if (!cmd->se_dev->dev_attrib.emulate_rsoc)
2266 return TCM_UNSUPPORTED_SCSI_OPCODE;
2267
2268 rbuf = transport_kmap_data_sg(cmd);
2269 if (cmd->data_length && !rbuf) {
2270 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2271 goto out;
2272 }
2273
2274 if (opts == 0)
2275 response_length = 4 + (8 + rctd * 12) * descr_num;
2276 else {
2277 ret = spc_rsoc_get_descr(cmd, &descr);
2278 if (ret)
2279 goto out;
2280
2281 if (descr)
2282 response_length = 4 + descr->cdb_size + rctd * 12;
2283 else
2284 response_length = 2;
2285 }
2286
2287 buf = kzalloc(response_length, GFP_KERNEL);
2288 if (!buf) {
2289 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2290 goto out;
2291 }
2292 response_length = 0;
2293
2294 if (opts == 0) {
2295 response_length += 4;
2296
2297 for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
2298 descr = tcm_supported_opcodes[i];
2299 if (descr->enabled && !descr->enabled(descr, cmd))
2300 continue;
2301
2302 response_length += spc_rsoc_encode_command_descriptor(
2303 &buf[response_length], rctd, descr);
2304 }
2305 put_unaligned_be32(response_length - 4, buf);
2306 } else {
2307 response_length = spc_rsoc_encode_one_command_descriptor(
2308 &buf[response_length], rctd, descr,
2309 cmd->se_dev);
2310 }
2311
2312 memcpy(rbuf, buf, min_t(u32, response_length, cmd->data_length));
2313 out:
2314 kfree(buf);
2315 transport_kunmap_data_sg(cmd);
2316
2317 if (!ret)
2318 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, response_length);
2319 return ret;
2320 }
2321
2322 static sense_reason_t
spc_fill_pd_text_id_info(struct se_cmd * cmd,u8 * cdb)2323 spc_fill_pd_text_id_info(struct se_cmd *cmd, u8 *cdb)
2324 {
2325 struct se_device *dev = cmd->se_dev;
2326 unsigned char *buf;
2327 unsigned char *rbuf;
2328 u32 buf_len;
2329 u16 data_len;
2330
2331 buf_len = get_unaligned_be32(&cdb[6]);
2332 if (buf_len < PD_TEXT_ID_INFO_HDR_LEN)
2333 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2334
2335 data_len = strlen(dev->t10_wwn.pd_text_id_info);
2336 if (data_len > 0)
2337 /* trailing null */
2338 data_len += 1;
2339
2340 data_len = data_len + PD_TEXT_ID_INFO_HDR_LEN;
2341
2342 if (data_len < buf_len)
2343 buf_len = data_len;
2344
2345 buf = kzalloc(buf_len, GFP_KERNEL);
2346 if (!buf) {
2347 pr_err("Unable to allocate response buffer for IDENTITY INFO\n");
2348 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2349 }
2350
2351 scnprintf(&buf[PD_TEXT_ID_INFO_HDR_LEN], buf_len - PD_TEXT_ID_INFO_HDR_LEN, "%s",
2352 dev->t10_wwn.pd_text_id_info);
2353
2354 put_unaligned_be16(data_len, &buf[2]);
2355
2356 rbuf = transport_kmap_data_sg(cmd);
2357 if (!rbuf) {
2358 pr_err("transport_kmap_data_sg() failed in %s\n", __func__);
2359 kfree(buf);
2360 return TCM_OUT_OF_RESOURCES;
2361 }
2362
2363 memcpy(rbuf, buf, buf_len);
2364 transport_kunmap_data_sg(cmd);
2365 kfree(buf);
2366
2367 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, buf_len);
2368 return TCM_NO_SENSE;
2369 }
2370
2371 static sense_reason_t
spc_emulate_report_id_info(struct se_cmd * cmd)2372 spc_emulate_report_id_info(struct se_cmd *cmd)
2373 {
2374 u8 *cdb = cmd->t_task_cdb;
2375 sense_reason_t rc;
2376
2377 switch ((cdb[10] >> 1)) {
2378 case 2:
2379 rc = spc_fill_pd_text_id_info(cmd, cdb);
2380 break;
2381 default:
2382 return TCM_UNSUPPORTED_SCSI_OPCODE;
2383 }
2384
2385 return rc;
2386 }
2387
2388 sense_reason_t
spc_parse_cdb(struct se_cmd * cmd,unsigned int * size)2389 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
2390 {
2391 struct se_device *dev = cmd->se_dev;
2392 unsigned char *cdb = cmd->t_task_cdb;
2393
2394 switch (cdb[0]) {
2395 case RESERVE_6:
2396 case RESERVE_10:
2397 case RELEASE_6:
2398 case RELEASE_10:
2399 if (!dev->dev_attrib.emulate_pr)
2400 return TCM_UNSUPPORTED_SCSI_OPCODE;
2401
2402 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
2403 return TCM_UNSUPPORTED_SCSI_OPCODE;
2404 break;
2405 case PERSISTENT_RESERVE_IN:
2406 case PERSISTENT_RESERVE_OUT:
2407 if (!dev->dev_attrib.emulate_pr)
2408 return TCM_UNSUPPORTED_SCSI_OPCODE;
2409 break;
2410 }
2411
2412 switch (cdb[0]) {
2413 case MODE_SELECT:
2414 *size = cdb[4];
2415 cmd->execute_cmd = spc_emulate_modeselect;
2416 break;
2417 case MODE_SELECT_10:
2418 *size = get_unaligned_be16(&cdb[7]);
2419 cmd->execute_cmd = spc_emulate_modeselect;
2420 break;
2421 case MODE_SENSE:
2422 *size = cdb[4];
2423 cmd->execute_cmd = spc_emulate_modesense;
2424 break;
2425 case MODE_SENSE_10:
2426 *size = get_unaligned_be16(&cdb[7]);
2427 cmd->execute_cmd = spc_emulate_modesense;
2428 break;
2429 case LOG_SELECT:
2430 case LOG_SENSE:
2431 *size = get_unaligned_be16(&cdb[7]);
2432 break;
2433 case PERSISTENT_RESERVE_IN:
2434 *size = get_unaligned_be16(&cdb[7]);
2435 cmd->execute_cmd = target_scsi3_emulate_pr_in;
2436 break;
2437 case PERSISTENT_RESERVE_OUT:
2438 *size = get_unaligned_be32(&cdb[5]);
2439 cmd->execute_cmd = target_scsi3_emulate_pr_out;
2440 break;
2441 case RELEASE_6:
2442 case RELEASE_10:
2443 if (cdb[0] == RELEASE_10)
2444 *size = get_unaligned_be16(&cdb[7]);
2445 else
2446 *size = cmd->data_length;
2447
2448 cmd->execute_cmd = target_scsi2_reservation_release;
2449 break;
2450 case RESERVE_6:
2451 case RESERVE_10:
2452 /*
2453 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2454 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2455 */
2456 if (cdb[0] == RESERVE_10)
2457 *size = get_unaligned_be16(&cdb[7]);
2458 else
2459 *size = cmd->data_length;
2460
2461 cmd->execute_cmd = target_scsi2_reservation_reserve;
2462 break;
2463 case REQUEST_SENSE:
2464 *size = cdb[4];
2465 cmd->execute_cmd = spc_emulate_request_sense;
2466 break;
2467 case INQUIRY:
2468 *size = get_unaligned_be16(&cdb[3]);
2469
2470 /*
2471 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
2472 * See spc4r17 section 5.3
2473 */
2474 cmd->sam_task_attr = TCM_HEAD_TAG;
2475 cmd->execute_cmd = spc_emulate_inquiry;
2476 break;
2477 case SECURITY_PROTOCOL_IN:
2478 case SECURITY_PROTOCOL_OUT:
2479 *size = get_unaligned_be32(&cdb[6]);
2480 break;
2481 case EXTENDED_COPY:
2482 *size = get_unaligned_be32(&cdb[10]);
2483 cmd->execute_cmd = target_do_xcopy;
2484 break;
2485 case RECEIVE_COPY_RESULTS:
2486 *size = get_unaligned_be32(&cdb[10]);
2487 cmd->execute_cmd = target_do_receive_copy_results;
2488 break;
2489 case READ_ATTRIBUTE:
2490 case WRITE_ATTRIBUTE:
2491 *size = get_unaligned_be32(&cdb[10]);
2492 break;
2493 case RECEIVE_DIAGNOSTIC:
2494 case SEND_DIAGNOSTIC:
2495 *size = get_unaligned_be16(&cdb[3]);
2496 break;
2497 case WRITE_BUFFER:
2498 *size = get_unaligned_be24(&cdb[6]);
2499 break;
2500 case REPORT_LUNS:
2501 cmd->execute_cmd = spc_emulate_report_luns;
2502 *size = get_unaligned_be32(&cdb[6]);
2503 /*
2504 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
2505 * See spc4r17 section 5.3
2506 */
2507 cmd->sam_task_attr = TCM_HEAD_TAG;
2508 break;
2509 case TEST_UNIT_READY:
2510 cmd->execute_cmd = spc_emulate_testunitready;
2511 *size = 0;
2512 break;
2513 case MAINTENANCE_IN:
2514 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2515 /*
2516 * MAINTENANCE_IN from SCC-2
2517 * Check for emulated MI_REPORT_TARGET_PGS
2518 */
2519 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
2520 cmd->execute_cmd =
2521 target_emulate_report_target_port_groups;
2522 }
2523 if ((cdb[1] & 0x1f) ==
2524 MI_REPORT_SUPPORTED_OPERATION_CODES)
2525 cmd->execute_cmd =
2526 spc_emulate_report_supp_op_codes;
2527 if ((cdb[1] & 0x1f) ==
2528 MI_REPORT_IDENTIFYING_INFORMATION) {
2529 cmd->execute_cmd =
2530 spc_emulate_report_id_info;
2531 }
2532 *size = get_unaligned_be32(&cdb[6]);
2533 } else {
2534 /*
2535 * GPCMD_SEND_KEY from multi media commands
2536 */
2537 *size = get_unaligned_be16(&cdb[8]);
2538 }
2539 break;
2540 case MAINTENANCE_OUT:
2541 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2542 /*
2543 * MAINTENANCE_OUT from SCC-2
2544 * Check for emulated MO_SET_TARGET_PGS.
2545 */
2546 if (cdb[1] == MO_SET_TARGET_PGS) {
2547 cmd->execute_cmd =
2548 target_emulate_set_target_port_groups;
2549 }
2550 *size = get_unaligned_be32(&cdb[6]);
2551 } else {
2552 /*
2553 * GPCMD_SEND_KEY from multi media commands
2554 */
2555 *size = get_unaligned_be16(&cdb[8]);
2556 }
2557 break;
2558 default:
2559 return TCM_UNSUPPORTED_SCSI_OPCODE;
2560 }
2561
2562 return 0;
2563 }
2564 EXPORT_SYMBOL(spc_parse_cdb);
2565