iscsi_target.c 126 KB
Newer Older
1 2 3
/*******************************************************************************
 * This file contains main functions related to the iSCSI Target Core Driver.
 *
4
 * (c) Copyright 2007-2013 Datera, Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 ******************************************************************************/

Herbert Xu's avatar
Herbert Xu committed
19
#include <crypto/hash.h>
20 21 22
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/completion.h>
23
#include <linux/module.h>
24
#include <linux/vmalloc.h>
Al Viro's avatar
Al Viro committed
25
#include <linux/idr.h>
26
#include <linux/delay.h>
27
#include <linux/sched/signal.h>
28
#include <asm/unaligned.h>
29
#include <linux/inet.h>
30
#include <net/ipv6.h>
31
#include <scsi/scsi_proto.h>
32
#include <scsi/iscsi_proto.h>
33
#include <scsi/scsi_tcq.h>
34
#include <target/target_core_base.h>
35
#include <target/target_core_fabric.h>
36

37
#include <target/iscsi/iscsi_target_core.h>
38 39 40 41 42 43 44 45 46 47 48 49
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_login.h"
#include "iscsi_target_tmr.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_device.h"
50
#include <target/iscsi/iscsi_target_stat.h>
51

52 53
#include <target/iscsi/iscsi_transport.h>

54 55 56
static LIST_HEAD(g_tiqn_list);
static LIST_HEAD(g_np_list);
static DEFINE_SPINLOCK(tiqn_lock);
57
static DEFINE_MUTEX(np_lock);
58 59

static struct idr tiqn_idr;
60
DEFINE_IDA(sess_ida);
61 62 63 64 65 66 67 68 69 70
struct mutex auth_id_lock;

struct iscsit_global *iscsit_global;

struct kmem_cache *lio_qr_cache;
struct kmem_cache *lio_dr_cache;
struct kmem_cache *lio_ooo_cache;
struct kmem_cache *lio_r2t_cache;

static int iscsit_handle_immediate_data(struct iscsi_cmd *,
71
			struct iscsi_scsi_req *, u32);
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
{
	struct iscsi_tiqn *tiqn = NULL;

	spin_lock(&tiqn_lock);
	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
		if (!strcmp(tiqn->tiqn, buf)) {

			spin_lock(&tiqn->tiqn_state_lock);
			if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
				tiqn->tiqn_access_count++;
				spin_unlock(&tiqn->tiqn_state_lock);
				spin_unlock(&tiqn_lock);
				return tiqn;
			}
			spin_unlock(&tiqn->tiqn_state_lock);
		}
	}
	spin_unlock(&tiqn_lock);

	return NULL;
}

static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
{
	spin_lock(&tiqn->tiqn_state_lock);
	if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
		tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
		spin_unlock(&tiqn->tiqn_state_lock);
		return 0;
	}
	spin_unlock(&tiqn->tiqn_state_lock);

	return -1;
}

void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
{
	spin_lock(&tiqn->tiqn_state_lock);
	tiqn->tiqn_access_count--;
	spin_unlock(&tiqn->tiqn_state_lock);
}

/*
 * Note that IQN formatting is expected to be done in userspace, and
 * no explict IQN format checks are done here.
 */
struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
{
	struct iscsi_tiqn *tiqn = NULL;
	int ret;

125
	if (strlen(buf) >= ISCSI_IQN_LEN) {
126 127 128 129 130
		pr_err("Target IQN exceeds %d bytes\n",
				ISCSI_IQN_LEN);
		return ERR_PTR(-EINVAL);
	}

131
	tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
132
	if (!tiqn)
133 134 135 136 137 138 139 140 141 142 143 144 145
		return ERR_PTR(-ENOMEM);

	sprintf(tiqn->tiqn, "%s", buf);
	INIT_LIST_HEAD(&tiqn->tiqn_list);
	INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
	spin_lock_init(&tiqn->tiqn_state_lock);
	spin_lock_init(&tiqn->tiqn_tpg_lock);
	spin_lock_init(&tiqn->sess_err_stats.lock);
	spin_lock_init(&tiqn->login_stats.lock);
	spin_lock_init(&tiqn->logout_stats.lock);

	tiqn->tiqn_state = TIQN_STATE_ACTIVE;

146
	idr_preload(GFP_KERNEL);
147
	spin_lock(&tiqn_lock);
148 149

	ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
150
	if (ret < 0) {
151
		pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
152
		spin_unlock(&tiqn_lock);
153
		idr_preload_end();
154 155 156
		kfree(tiqn);
		return ERR_PTR(ret);
	}
157
	tiqn->tiqn_index = ret;
158
	list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
159

160
	spin_unlock(&tiqn_lock);
161
	idr_preload_end();
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232

	pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);

	return tiqn;

}

static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
{
	/*
	 * Wait for accesses to said struct iscsi_tiqn to end.
	 */
	spin_lock(&tiqn->tiqn_state_lock);
	while (tiqn->tiqn_access_count != 0) {
		spin_unlock(&tiqn->tiqn_state_lock);
		msleep(10);
		spin_lock(&tiqn->tiqn_state_lock);
	}
	spin_unlock(&tiqn->tiqn_state_lock);
}

void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
{
	/*
	 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
	 * while holding tiqn->tiqn_state_lock.  This means that all subsequent
	 * attempts to access this struct iscsi_tiqn will fail from both transport
	 * fabric and control code paths.
	 */
	if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
		pr_err("iscsit_set_tiqn_shutdown() failed\n");
		return;
	}

	iscsit_wait_for_tiqn(tiqn);

	spin_lock(&tiqn_lock);
	list_del(&tiqn->tiqn_list);
	idr_remove(&tiqn_idr, tiqn->tiqn_index);
	spin_unlock(&tiqn_lock);

	pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
			tiqn->tiqn);
	kfree(tiqn);
}

int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
{
	int ret;
	/*
	 * Determine if the network portal is accepting storage traffic.
	 */
	spin_lock_bh(&np->np_thread_lock);
	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
		spin_unlock_bh(&np->np_thread_lock);
		return -1;
	}
	spin_unlock_bh(&np->np_thread_lock);
	/*
	 * Determine if the portal group is accepting storage traffic.
	 */
	spin_lock_bh(&tpg->tpg_state_lock);
	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
		spin_unlock_bh(&tpg->tpg_state_lock);
		return -1;
	}
	spin_unlock_bh(&tpg->tpg_state_lock);

	/*
	 * Here we serialize access across the TIQN+TPG Tuple.
	 */
233
	ret = down_interruptible(&tpg->np_login_sem);
234
	if (ret != 0)
235 236
		return -1;

237 238 239 240 241 242 243
	spin_lock_bh(&tpg->tpg_state_lock);
	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
		spin_unlock_bh(&tpg->tpg_state_lock);
		up(&tpg->np_login_sem);
		return -1;
	}
	spin_unlock_bh(&tpg->tpg_state_lock);
244 245 246 247

	return 0;
}

248 249 250 251 252 253 254 255 256 257
void iscsit_login_kref_put(struct kref *kref)
{
	struct iscsi_tpg_np *tpg_np = container_of(kref,
				struct iscsi_tpg_np, tpg_np_kref);

	complete(&tpg_np->tpg_np_comp);
}

int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
		       struct iscsi_tpg_np *tpg_np)
258 259 260
{
	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;

261
	up(&tpg->np_login_sem);
262

263 264
	if (tpg_np)
		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
265 266 267 268 269 270 271

	if (tiqn)
		iscsit_put_tiqn_for_login(tiqn);

	return 0;
}

272
bool iscsit_check_np_match(
273
	struct sockaddr_storage *sockaddr,
274
	struct iscsi_np *np,
275 276 277 278
	int network_transport)
{
	struct sockaddr_in *sock_in, *sock_in_e;
	struct sockaddr_in6 *sock_in6, *sock_in6_e;
279
	bool ip_match = false;
280
	u16 port, port_e;
281

282 283 284 285 286 287 288 289 290 291
	if (sockaddr->ss_family == AF_INET6) {
		sock_in6 = (struct sockaddr_in6 *)sockaddr;
		sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;

		if (!memcmp(&sock_in6->sin6_addr.in6_u,
			    &sock_in6_e->sin6_addr.in6_u,
			    sizeof(struct in6_addr)))
			ip_match = true;

		port = ntohs(sock_in6->sin6_port);
292
		port_e = ntohs(sock_in6_e->sin6_port);
293 294 295 296 297 298 299 300
	} else {
		sock_in = (struct sockaddr_in *)sockaddr;
		sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;

		if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
			ip_match = true;

		port = ntohs(sock_in->sin_port);
301
		port_e = ntohs(sock_in_e->sin_port);
302 303
	}

304
	if (ip_match && (port_e == port) &&
305 306 307 308 309 310
	    (np->np_network_transport == network_transport))
		return true;

	return false;
}

311 312 313
/*
 * Called with mutex np_lock held
 */
314
static struct iscsi_np *iscsit_get_np(
315
	struct sockaddr_storage *sockaddr,
316 317 318 319 320
	int network_transport)
{
	struct iscsi_np *np;
	bool match;

321
	list_for_each_entry(np, &g_np_list, np_list) {
322
		spin_lock_bh(&np->np_thread_lock);
323
		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
324
			spin_unlock_bh(&np->np_thread_lock);
325 326 327
			continue;
		}

328
		match = iscsit_check_np_match(sockaddr, np, network_transport);
329
		if (match) {
330 331 332 333 334 335
			/*
			 * Increment the np_exports reference count now to
			 * prevent iscsit_del_np() below from being called
			 * while iscsi_tpg_add_network_portal() is called.
			 */
			np->np_exports++;
336
			spin_unlock_bh(&np->np_thread_lock);
337 338
			return np;
		}
339
		spin_unlock_bh(&np->np_thread_lock);
340 341 342 343 344 345
	}

	return NULL;
}

struct iscsi_np *iscsit_add_np(
346
	struct sockaddr_storage *sockaddr,
347 348 349 350
	int network_transport)
{
	struct iscsi_np *np;
	int ret;
351 352 353

	mutex_lock(&np_lock);

354 355 356 357
	/*
	 * Locate the existing struct iscsi_np if already active..
	 */
	np = iscsit_get_np(sockaddr, network_transport);
358 359
	if (np) {
		mutex_unlock(&np_lock);
360
		return np;
361
	}
362

363
	np = kzalloc(sizeof(*np), GFP_KERNEL);
364
	if (!np) {
365
		mutex_unlock(&np_lock);
366 367 368 369 370 371 372 373 374
		return ERR_PTR(-ENOMEM);
	}

	np->np_flags |= NPF_IP_NETWORK;
	np->np_network_transport = network_transport;
	spin_lock_init(&np->np_thread_lock);
	init_completion(&np->np_restart_comp);
	INIT_LIST_HEAD(&np->np_list);

375
	timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0);
376

377 378 379
	ret = iscsi_target_setup_login_socket(np, sockaddr);
	if (ret != 0) {
		kfree(np);
380
		mutex_unlock(&np_lock);
381 382 383 384 385 386 387 388
		return ERR_PTR(ret);
	}

	np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
	if (IS_ERR(np->np_thread)) {
		pr_err("Unable to create kthread: iscsi_np\n");
		ret = PTR_ERR(np->np_thread);
		kfree(np);
389
		mutex_unlock(&np_lock);
390 391 392 393 394 395 396 397 398 399
		return ERR_PTR(ret);
	}
	/*
	 * Increment the np_exports reference count now to prevent
	 * iscsit_del_np() below from being run while a new call to
	 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
	 * active.  We don't need to hold np->np_thread_lock at this
	 * point because iscsi_np has not been added to g_np_list yet.
	 */
	np->np_exports = 1;
400
	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
401 402

	list_add_tail(&np->np_list, &g_np_list);
403
	mutex_unlock(&np_lock);
404

405 406
	pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
		&np->np_sockaddr, np->np_transport->name);
407 408 409 410 411 412 413

	return np;
}

int iscsit_reset_np_thread(
	struct iscsi_np *np,
	struct iscsi_tpg_np *tpg_np,
414 415
	struct iscsi_portal_group *tpg,
	bool shutdown)
416 417 418 419 420 421 422
{
	spin_lock_bh(&np->np_thread_lock);
	if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
		spin_unlock_bh(&np->np_thread_lock);
		return 0;
	}
	np->np_thread_state = ISCSI_NP_THREAD_RESET;
423
	atomic_inc(&np->np_reset_count);
424 425 426 427 428 429 430 431 432

	if (np->np_thread) {
		spin_unlock_bh(&np->np_thread_lock);
		send_sig(SIGINT, np->np_thread, 1);
		wait_for_completion(&np->np_restart_comp);
		spin_lock_bh(&np->np_thread_lock);
	}
	spin_unlock_bh(&np->np_thread_lock);

433 434 435 436 437 438
	if (tpg_np && shutdown) {
		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);

		wait_for_completion(&tpg_np->tpg_np_comp);
	}

439 440 441
	return 0;
}

442
static void iscsit_free_np(struct iscsi_np *np)
443
{
444 445
	if (np->np_socket)
		sock_release(np->np_socket);
446 447 448 449 450 451 452
}

int iscsit_del_np(struct iscsi_np *np)
{
	spin_lock_bh(&np->np_thread_lock);
	np->np_exports--;
	if (np->np_exports) {
453
		np->enabled = true;
454 455 456 457 458 459 460 461 462 463 464 465 466
		spin_unlock_bh(&np->np_thread_lock);
		return 0;
	}
	np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
	spin_unlock_bh(&np->np_thread_lock);

	if (np->np_thread) {
		/*
		 * We need to send the signal to wakeup Linux/Net
		 * which may be sleeping in sock_accept()..
		 */
		send_sig(SIGINT, np->np_thread, 1);
		kthread_stop(np->np_thread);
467
		np->np_thread = NULL;
468
	}
469 470

	np->np_transport->iscsit_free_np(np);
471

472
	mutex_lock(&np_lock);
473
	list_del(&np->np_list);
474
	mutex_unlock(&np_lock);
475

476 477
	pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
		&np->np_sockaddr, np->np_transport->name);
478

479
	iscsit_put_transport(np->np_transport);
480 481 482 483
	kfree(np);
	return 0;
}

484
static void iscsit_get_rx_pdu(struct iscsi_conn *);
485

Varun Prakash's avatar
Varun Prakash committed
486
int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
487
{
488
	return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
489
}
Varun Prakash's avatar
Varun Prakash committed
490
EXPORT_SYMBOL(iscsit_queue_rsp);
491

Varun Prakash's avatar
Varun Prakash committed
492
void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
493 494
{
	spin_lock_bh(&conn->cmd_lock);
495 496
	if (!list_empty(&cmd->i_conn_node) &&
	    !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
497 498 499
		list_del_init(&cmd->i_conn_node);
	spin_unlock_bh(&conn->cmd_lock);

500
	__iscsit_free_cmd(cmd, true);
501
}
Varun Prakash's avatar
Varun Prakash committed
502
EXPORT_SYMBOL(iscsit_aborted_task);
503

504
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
505
				      u32, u32, const void *, void *);
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);

static int
iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
			  const void *data_buf, u32 data_buf_len)
{
	struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
	struct kvec *iov;
	u32 niov = 0, tx_size = ISCSI_HDR_LEN;
	int ret;

	iov = &cmd->iov_misc[0];
	iov[niov].iov_base	= cmd->pdu;
	iov[niov++].iov_len	= ISCSI_HDR_LEN;

	if (conn->conn_ops->HeaderDigest) {
		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];

		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
					  ISCSI_HDR_LEN, 0, NULL,
526
					  header_digest);
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552

		iov[0].iov_len += ISCSI_CRC_LEN;
		tx_size += ISCSI_CRC_LEN;
		pr_debug("Attaching CRC32C HeaderDigest"
			 " to opcode 0x%x 0x%08x\n",
			 hdr->opcode, *header_digest);
	}

	if (data_buf_len) {
		u32 padding = ((-data_buf_len) & 3);

		iov[niov].iov_base	= (void *)data_buf;
		iov[niov++].iov_len	= data_buf_len;
		tx_size += data_buf_len;

		if (padding != 0) {
			iov[niov].iov_base = &cmd->pad_bytes;
			iov[niov++].iov_len = padding;
			tx_size += padding;
			pr_debug("Attaching %u additional"
				 " padding bytes.\n", padding);
		}

		if (conn->conn_ops->DataDigest) {
			iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
						  data_buf, data_buf_len,
553 554
						  padding, &cmd->pad_bytes,
						  &cmd->data_crc);
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598

			iov[niov].iov_base = &cmd->data_crc;
			iov[niov++].iov_len = ISCSI_CRC_LEN;
			tx_size += ISCSI_CRC_LEN;
			pr_debug("Attached DataDigest for %u"
				 " bytes opcode 0x%x, CRC 0x%08x\n",
				 data_buf_len, hdr->opcode, cmd->data_crc);
		}
	}

	cmd->iov_misc_count = niov;
	cmd->tx_size = tx_size;

	ret = iscsit_send_tx_data(cmd, conn, 1);
	if (ret < 0) {
		iscsit_tx_thread_wait_for_tcp(conn);
		return ret;
	}

	return 0;
}

static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32);
static void iscsit_unmap_iovec(struct iscsi_cmd *);
static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
				    u32, u32, u32, u8 *);
static int
iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
		       const struct iscsi_datain *datain)
{
	struct kvec *iov;
	u32 iov_count = 0, tx_size = 0;
	int ret, iov_ret;

	iov = &cmd->iov_data[0];
	iov[iov_count].iov_base	= cmd->pdu;
	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
	tx_size += ISCSI_HDR_LEN;

	if (conn->conn_ops->HeaderDigest) {
		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];

		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
					  ISCSI_HDR_LEN, 0, NULL,
599
					  header_digest);
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664

		iov[0].iov_len += ISCSI_CRC_LEN;
		tx_size += ISCSI_CRC_LEN;

		pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
			 *header_digest);
	}

	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
				   datain->offset, datain->length);
	if (iov_ret < 0)
		return -1;

	iov_count += iov_ret;
	tx_size += datain->length;

	cmd->padding = ((-datain->length) & 3);
	if (cmd->padding) {
		iov[iov_count].iov_base		= cmd->pad_bytes;
		iov[iov_count++].iov_len	= cmd->padding;
		tx_size += cmd->padding;

		pr_debug("Attaching %u padding bytes\n", cmd->padding);
	}

	if (conn->conn_ops->DataDigest) {
		cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
							 cmd, datain->offset,
							 datain->length,
							 cmd->padding,
							 cmd->pad_bytes);

		iov[iov_count].iov_base	= &cmd->data_crc;
		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
		tx_size += ISCSI_CRC_LEN;

		pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
			 datain->length + cmd->padding, cmd->data_crc);
	}

	cmd->iov_data_count = iov_count;
	cmd->tx_size = tx_size;

	ret = iscsit_fe_sendpage_sg(cmd, conn);

	iscsit_unmap_iovec(cmd);

	if (ret < 0) {
		iscsit_tx_thread_wait_for_tcp(conn);
		return ret;
	}

	return 0;
}

static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
			   struct iscsi_datain_req *dr, const void *buf,
			   u32 buf_len)
{
	if (dr)
		return iscsit_xmit_datain_pdu(conn, cmd, buf);
	else
		return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
}

665 666 667 668 669
static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
{
	return TARGET_PROT_NORMAL;
}

670 671 672
static struct iscsit_transport iscsi_target_transport = {
	.name			= "iSCSI/TCP",
	.transport_type		= ISCSI_TCP,
673
	.rdma_shutdown		= false,
674 675 676 677 678 679
	.owner			= NULL,
	.iscsit_setup_np	= iscsit_setup_np,
	.iscsit_accept_np	= iscsit_accept_np,
	.iscsit_free_np		= iscsit_free_np,
	.iscsit_get_login_rx	= iscsit_get_login_rx,
	.iscsit_put_login_tx	= iscsit_put_login_tx,
680
	.iscsit_get_dataout	= iscsit_build_r2ts_for_cmd,
681 682 683 684
	.iscsit_immediate_queue	= iscsit_immediate_queue,
	.iscsit_response_queue	= iscsit_response_queue,
	.iscsit_queue_data_in	= iscsit_queue_rsp,
	.iscsit_queue_status	= iscsit_queue_rsp,
685
	.iscsit_aborted_task	= iscsit_aborted_task,
686
	.iscsit_xmit_pdu	= iscsit_xmit_pdu,
687
	.iscsit_get_rx_pdu	= iscsit_get_rx_pdu,
688
	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
689 690
};

691 692
static int __init iscsi_target_init_module(void)
{
693
	int ret = 0, size;
694 695

	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
696
	iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
697
	if (!iscsit_global)
698
		return -1;
699

700
	spin_lock_init(&iscsit_global->ts_bitmap_lock);
701 702 703
	mutex_init(&auth_id_lock);
	idr_init(&tiqn_idr);

704 705
	ret = target_register_template(&iscsi_ops);
	if (ret)
706 707
		goto out;

708 709
	size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
	iscsit_global->ts_bitmap = vzalloc(size);
710
	if (!iscsit_global->ts_bitmap)
711 712 713 714 715 716 717 718
		goto configfs_out;

	lio_qr_cache = kmem_cache_create("lio_qr_cache",
			sizeof(struct iscsi_queue_req),
			__alignof__(struct iscsi_queue_req), 0, NULL);
	if (!lio_qr_cache) {
		pr_err("nable to kmem_cache_create() for"
				" lio_qr_cache\n");
719
		goto bitmap_out;
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
	}

	lio_dr_cache = kmem_cache_create("lio_dr_cache",
			sizeof(struct iscsi_datain_req),
			__alignof__(struct iscsi_datain_req), 0, NULL);
	if (!lio_dr_cache) {
		pr_err("Unable to kmem_cache_create() for"
				" lio_dr_cache\n");
		goto qr_out;
	}

	lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
			sizeof(struct iscsi_ooo_cmdsn),
			__alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
	if (!lio_ooo_cache) {
		pr_err("Unable to kmem_cache_create() for"
				" lio_ooo_cache\n");
		goto dr_out;
	}

	lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
			sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
			0, NULL);
	if (!lio_r2t_cache) {
		pr_err("Unable to kmem_cache_create() for"
				" lio_r2t_cache\n");
		goto ooo_out;
	}

749 750
	iscsit_register_transport(&iscsi_target_transport);

751 752 753 754 755
	if (iscsit_load_discovery_tpg() < 0)
		goto r2t_out;

	return ret;
r2t_out:
756
	iscsit_unregister_transport(&iscsi_target_transport);
757 758 759 760 761 762 763
	kmem_cache_destroy(lio_r2t_cache);
ooo_out:
	kmem_cache_destroy(lio_ooo_cache);
dr_out:
	kmem_cache_destroy(lio_dr_cache);
qr_out:
	kmem_cache_destroy(lio_qr_cache);
764 765
bitmap_out:
	vfree(iscsit_global->ts_bitmap);
766
configfs_out:
767 768 769 770
	/* XXX: this probably wants it to be it's own unwind step.. */
	if (iscsit_global->discovery_tpg)
		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
	target_unregister_template(&iscsi_ops);
771 772 773 774 775 776 777 778
out:
	kfree(iscsit_global);
	return -ENOMEM;
}

static void __exit iscsi_target_cleanup_module(void)
{
	iscsit_release_discovery_tpg();
779
	iscsit_unregister_transport(&iscsi_target_transport);
780 781 782 783 784
	kmem_cache_destroy(lio_qr_cache);
	kmem_cache_destroy(lio_dr_cache);
	kmem_cache_destroy(lio_ooo_cache);
	kmem_cache_destroy(lio_r2t_cache);

785 786 787 788 789
	/*
	 * Shutdown discovery sessions and disable discovery TPG
	 */
	if (iscsit_global->discovery_tpg)
		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
790

791
	target_unregister_template(&iscsi_ops);
792

793
	vfree(iscsit_global->ts_bitmap);
794 795 796
	kfree(iscsit_global);
}

Varun Prakash's avatar
Varun Prakash committed
797
int iscsit_add_reject(
798
	struct iscsi_conn *conn,
799
	u8 reason,
800
	unsigned char *buf)
801 802 803
{
	struct iscsi_cmd *cmd;

804
	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
805 806 807 808
	if (!cmd)
		return -1;

	cmd->iscsi_opcode = ISCSI_OP_REJECT;
809
	cmd->reject_reason = reason;
810

811
	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
812 813
	if (!cmd->buf_ptr) {
		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
814
		iscsit_free_cmd(cmd, false);
815 816 817 818
		return -1;
	}

	spin_lock_bh(&conn->cmd_lock);
819
	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
820 821 822 823 824
	spin_unlock_bh(&conn->cmd_lock);

	cmd->i_state = ISTATE_SEND_REJECT;
	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);

825
	return -1;
826
}
Varun Prakash's avatar
Varun Prakash committed
827
EXPORT_SYMBOL(iscsit_add_reject);
828

829 830
static int iscsit_add_reject_from_cmd(
	struct iscsi_cmd *cmd,
831
	u8 reason,
832 833
	bool add_to_conn,
	unsigned char *buf)
834 835
{
	struct iscsi_conn *conn;
836
	const bool do_put = cmd->se_cmd.se_tfo != NULL;
837 838 839 840 841 842 843 844 845

	if (!cmd->conn) {
		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
				cmd->init_task_tag);
		return -1;
	}
	conn = cmd->conn;

	cmd->iscsi_opcode = ISCSI_OP_REJECT;
846
	cmd->reject_reason = reason;
847

848
	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
849 850
	if (!cmd->buf_ptr) {
		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
851
		iscsit_free_cmd(cmd, false);
852 853 854 855 856
		return -1;
	}

	if (add_to_conn) {
		spin_lock_bh(&conn->cmd_lock);
857
		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
858 859 860 861 862
		spin_unlock_bh(&conn->cmd_lock);
	}

	cmd->i_state = ISTATE_SEND_REJECT;
	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
863 864 865 866
	/*
	 * Perform the kref_put now if se_cmd has already been setup by
	 * scsit_setup_scsi_cmd()
	 */
867
	if (do_put) {
868
		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
869
		target_put_sess_cmd(&cmd->se_cmd);
870
	}
871 872
	return -1;
}
873

874 875 876 877 878 879 880 881 882
static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
				 unsigned char *buf)
{
	return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
}

int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
{
	return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
883
}
Varun Prakash's avatar
Varun Prakash committed
884
EXPORT_SYMBOL(iscsit_reject_cmd);
885 886 887

/*
 * Map some portion of the allocated scatterlist to an iovec, suitable for
888
 * kernel sockets to copy data in/out.
889 890 891 892 893 894 895 896 897 898 899 900
 */
static int iscsit_map_iovec(
	struct iscsi_cmd *cmd,
	struct kvec *iov,
	u32 data_offset,
	u32 data_length)
{
	u32 i = 0;
	struct scatterlist *sg;
	unsigned int page_off;

	/*
901
	 * We know each entry in t_data_sg contains a page.
902
	 */
903 904 905 906 907 908 909 910
	u32 ent = data_offset / PAGE_SIZE;

	if (ent >= cmd->se_cmd.t_data_nents) {
		pr_err("Initial page entry out-of-bounds\n");
		return -1;
	}

	sg = &cmd->se_cmd.t_data_sg[ent];
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
	page_off = (data_offset % PAGE_SIZE);

	cmd->first_data_sg = sg;
	cmd->first_data_sg_off = page_off;

	while (data_length) {
		u32 cur_len = min_t(u32, data_length, sg->length - page_off);

		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
		iov[i].iov_len = cur_len;

		data_length -= cur_len;
		page_off = 0;
		sg = sg_next(sg);
		i++;
	}

	cmd->kmapped_nents = i;

	return i;
}

static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
{
	u32 i;
	struct scatterlist *sg;

	sg = cmd->first_data_sg;

	for (i = 0; i < cmd->kmapped_nents; i++)
		kunmap(sg_page(&sg[i]));
}

static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
{
946 947
	LIST_HEAD(ack_list);
	struct iscsi_cmd *cmd, *cmd_p;
948 949 950

	conn->exp_statsn = exp_statsn;

951 952 953
	if (conn->sess->sess_ops->RDMAExtensions)
		return;

954
	spin_lock_bh(&conn->cmd_lock);
955
	list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
956 957
		spin_lock(&cmd->istate_lock);
		if ((cmd->i_state == ISTATE_SENT_STATUS) &&
958
		    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
959 960
			cmd->i_state = ISTATE_REMOVE;
			spin_unlock(&cmd->istate_lock);
961
			list_move_tail(&cmd->i_conn_node, &ack_list);
962 963 964 965 966
			continue;
		}
		spin_unlock(&cmd->istate_lock);
	}
	spin_unlock_bh(&conn->cmd_lock);
967 968

	list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
969
		list_del_init(&cmd->i_conn_node);
970 971
		iscsit_free_cmd(cmd, false);
	}
972 973 974 975
}

static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
{
976
	u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
977

978
	iov_count += ISCSI_IOV_DATA_BUFFER;
979
	cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
980
	if (!cmd->iov_data)
981 982 983 984 985 986
		return -ENOMEM;

	cmd->orig_iov_data_count = iov_count;
	return 0;
}

987 988
int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
			  unsigned char *buf)
989
{
990
	int data_direction, payload_length;
991
	struct iscsi_scsi_req *hdr;
992 993
	int iscsi_task_attr;
	int sam_task_attr;
994

995
	atomic_long_inc(&conn->sess->cmd_pdus);
996 997 998 999 1000 1001 1002 1003 1004 1005

	hdr			= (struct iscsi_scsi_req *) buf;
	payload_length		= ntoh24(hdr->dlength);

	/* FIXME; Add checks for AdditionalHeaderSegment */

	if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
				" not set. Bad iSCSI Initiator.\n");
1006 1007
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1008 1009 1010 1011 1012
	}

	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
	     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
		/*
1013 1014 1015 1016 1017 1018 1019 1020
		 * From RFC-3720 Section 10.3.1:
		 *
		 * "Either or both of R and W MAY be 1 when either the
		 *  Expected Data Transfer Length and/or Bidirectional Read
		 *  Expected Data Transfer Length are 0"
		 *
		 * For this case, go ahead and clear the unnecssary bits
		 * to avoid any confusion with ->data_direction.
1021
		 */
1022 1023
		hdr->flags &= ~ISCSI_FLAG_CMD_READ;
		hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1024

1025
		pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1026
			" set when Expected Data Transfer Length is 0 for"
1027
			" CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1028 1029 1030 1031 1032 1033 1034
	}

	if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
	    !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
			" MUST be set if Expected Data Transfer Length is not 0."
			" Bad iSCSI Initiator\n");
1035 1036
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1037 1038 1039 1040 1041
	}

	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
		pr_err("Bidirectional operations not supported!\n");
1042 1043
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1044 1045 1046 1047 1048
	}

	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
				" Scsi Command PDU.\n");
1049 1050
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1051 1052 1053 1054 1055
	}

	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
		pr_err("ImmediateData=No but DataSegmentLength=%u,"
			" protocol error.\n", payload_length);
1056 1057
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1058 1059
	}

1060
	if ((be32_to_cpu(hdr->data_length) == payload_length) &&
1061 1062 1063 1064
	    (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
		pr_err("Expected Data Transfer Length and Length of"
			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
			" bit is not set protocol error\n");
1065 1066
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1067 1068
	}

1069
	if (payload_length > be32_to_cpu(hdr->data_length)) {
1070 1071 1072
		pr_err("DataSegmentLength: %u is greater than"
			" EDTL: %u, protocol error.\n", payload_length,
				hdr->data_length);
1073 1074
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1075 1076
	}

1077
	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1078
		pr_err("DataSegmentLength: %u is greater than"
1079 1080
			" MaxXmitDataSegmentLength: %u, protocol error.\n",
			payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1081 1082
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1083 1084 1085 1086 1087 1088
	}

	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {