static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
{
- u8 treq = to_nvmet_port(item)->disc_addr.treq &
- NVME_TREQ_SECURE_CHANNEL_MASK;
+ u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item));
int i;
for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
+ u8 treq = nvmet_port_disc_addr_treq_mask(port);
u8 sectype;
int i;
found:
nvmet_port_init_tsas_tcp(port, sectype);
+ /*
+ * The TLS implementation currently does not support
+ * secure concatenation, so TREQ is always set to 'required'
+ * if TLS is enabled.
+ */
+ if (sectype == NVMF_TCP_SECTYPE_TLS13) {
+ treq |= NVMF_TREQ_REQUIRED;
+ } else {
+ treq |= NVMF_TREQ_NOT_SPECIFIED;
+ }
+ port->disc_addr.treq = treq;
return count;
}
ana_groups_group);
}
+static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
+{
+ return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
+}
+
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;