/* Task Management Functions. Must be called from process context. */
int (*lldd_abort_task)(struct sas_task *);
int (*lldd_abort_task_set)(struct domain_device *, u8 *lun);
- int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
int (*lldd_I_T_nexus_reset)(struct domain_device *);
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
my_ha->sas_ha.lldd_abort_task = my_abort_task;
my_ha->sas_ha.lldd_abort_task_set = my_abort_task_set;
- my_ha->sas_ha.lldd_clear_aca = my_clear_aca;
my_ha->sas_ha.lldd_clear_task_set = my_clear_task_set;
my_ha->sas_ha.lldd_I_T_nexus_reset= NULL; (2)
my_ha->sas_ha.lldd_lu_reset = my_lu_reset;
- BLK_EH_RESET_TIMER
This indicates that more time is required to finish the
- command. Timer is restarted. This action is counted as a
- retry and only allowed scmd->allowed + 1(!) times. Once the
- limit is reached, action for BLK_EH_DONE is taken instead.
+ command. Timer is restarted.
- BLK_EH_DONE
eh_timed_out() callback did not handle the command.
Step #2 is taken.
- 2. scsi_abort_command() is invoked to schedule an asynchrous abort.
- Asynchronous abort are not invoked for commands which the
- SCSI_EH_ABORT_SCHEDULED flag is set (this indicates that the command
- already had been aborted once, and this is a retry which failed),
- or when the EH deadline is expired. In these case Step #3 is taken.
+ 2. scsi_abort_command() is invoked to schedule an asynchronous abort which may
+ issue a retry scmd->allowed + 1 times. Asynchronous aborts are not invoked
+ for commands for which the SCSI_EH_ABORT_SCHEDULED flag is set (this
+ indicates that the command already had been aborted once, and this is a
+ retry which failed), when retries are exceeded, or when the EH deadline is
+ expired. In these cases Step #3 is taken.
3. scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD) is invoked for the
command. See [1-4] for more information.
1. Overview
2. UFS Architecture Overview
2.1 Application Layer
- 2.2 UFS Transport Protocol(UTP) layer
- 2.3 UFS Interconnect(UIC) Layer
+ 2.2 UFS Transport Protocol (UTP) layer
+ 2.3 UFS Interconnect (UIC) Layer
3. UFSHCD Overview
3.1 UFS controller initialization
3.2 UTP Transfer requests
1. Overview
===========
-Universal Flash Storage(UFS) is a storage specification for flash devices.
-It is aimed to provide a universal storage interface for both
-embedded and removable flash memory based storage in mobile
+Universal Flash Storage (UFS) is a storage specification for flash devices.
+It aims to provide a universal storage interface for both
+embedded and removable flash memory-based storage in mobile
devices such as smart phones and tablet computers. The specification
is defined by JEDEC Solid State Technology Association. UFS is based
-on MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
+on the MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
physical layer and MIPI Unipro as the link layer.
-The main goals of UFS is to provide:
+The main goals of UFS are to provide:
* Optimized performance:
UFS has a layered communication architecture which is based on SCSI
SAM-5 architectural model.
-UFS communication architecture consists of following layers,
+UFS communication architecture consists of the following layers.
2.1 Application Layer
---------------------
- The Application layer is composed of UFS command set layer(UCS),
+ The Application layer is composed of the UFS command set layer (UCS),
Task Manager and Device manager. The UFS interface is designed to be
protocol agnostic, however SCSI has been selected as a baseline
- protocol for versions 1.0 and 1.1 of UFS protocol layer.
+ protocol for versions 1.0 and 1.1 of the UFS protocol layer.
- UFS supports subset of SCSI commands defined by SPC-4 and SBC-3.
+ UFS supports a subset of SCSI commands defined by SPC-4 and SBC-3.
* UCS:
It handles SCSI commands supported by UFS specification.
requests which are used to modify and retrieve configuration
information of the device.
-2.2 UFS Transport Protocol(UTP) layer
--------------------------------------
+2.2 UFS Transport Protocol (UTP) layer
+--------------------------------------
- UTP layer provides services for
+ The UTP layer provides services for
the higher layers through Service Access Points. UTP defines 3
service access points for higher layers.
manager for device level operations. These device level operations
are done through query requests.
* UTP_CMD_SAP: Command service access point is exposed to UFS command
- set layer(UCS) to transport commands.
+ set layer (UCS) to transport commands.
* UTP_TM_SAP: Task management service access point is exposed to task
manager to transport task management functions.
- UTP transports messages through UFS protocol information unit(UPIU).
+ UTP transports messages through UFS protocol information unit (UPIU).
-2.3 UFS Interconnect(UIC) Layer
--------------------------------
+2.3 UFS Interconnect (UIC) Layer
+--------------------------------
- UIC is the lowest layer of UFS layered architecture. It handles
- connection between UFS host and UFS device. UIC consists of
+ UIC is the lowest layer of the UFS layered architecture. It handles
+ the connection between UFS host and UFS device. UIC consists of
MIPI UniPro and MIPI M-PHY. UIC provides 2 service access points
- to upper layer,
+ to upper layer:
* UIC_SAP: To transport UPIU between UFS host and UFS device.
* UIO_SAP: To issue commands to Unipro layers.
3. UFSHCD Overview
==================
-The UFS host controller driver is based on Linux SCSI Framework.
-UFSHCD is a low level device driver which acts as an interface between
-SCSI Midlayer and PCIe based UFS host controllers.
+The UFS host controller driver is based on the Linux SCSI Framework.
+UFSHCD is a low-level device driver which acts as an interface between
+the SCSI Midlayer and PCIe-based UFS host controllers.
-The current UFSHCD implementation supports following functionality,
+The current UFSHCD implementation supports the following functionality:
3.1 UFS controller initialization
---------------------------------
- The initialization module brings UFS host controller to active state
- and prepares the controller to transfer commands/response between
+ The initialization module brings the UFS host controller to active state
+ and prepares the controller to transfer commands/responses between
UFSHCD and UFS device.
3.2 UTP Transfer requests
-------------------------
Transfer request handling module of UFSHCD receives SCSI commands
- from SCSI Midlayer, forms UPIUs and issues the UPIUs to UFS Host
- controller. Also, the module decodes, responses received from UFS
+ from the SCSI Midlayer, forms UPIUs and issues the UPIUs to the UFS Host
+ controller. Also, the module decodes responses received from the UFS
host controller in the form of UPIUs and intimates the SCSI Midlayer
of the status of the command.
----------------------
Error handling module handles Host controller fatal errors,
- Device fatal errors and UIC interconnect layer related errors.
+ Device fatal errors and UIC interconnect layer-related errors.
3.4 SCSI Error handling
-----------------------
This is done through UFSHCD SCSI error handling routines registered
- with SCSI Midlayer. Examples of some of the error handling commands
- issues by SCSI Midlayer are Abort task, Lun reset and host reset.
+ with the SCSI Midlayer. Examples of some of the error handling commands
+ issues by the SCSI Midlayer are Abort task, LUN reset and host reset.
UFSHCD Routines to perform these tasks are registered with
SCSI Midlayer through .eh_abort_handler, .eh_device_reset_handler and
.eh_host_reset_handler.
-In this version of UFSHCD Query requests and power management
+In this version of UFSHCD, Query requests and power management
functionality are not implemented.
4. BSG Support
sg_io_v4.
The userspace tool that interacts with the ufs-bsg endpoint and uses its
-upiu-based protocol is available at:
+UPIU-based protocol is available at:
https://github.com/westerndigitalcorporation/ufs-tool
For more detailed information about the tool and its supported
features, please see the tool's README.
-UFS Specifications can be found at:
+UFS specifications can be found at:
- UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
- UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
REQ_OP_NAME(ZONE_CLOSE),
REQ_OP_NAME(ZONE_FINISH),
REQ_OP_NAME(ZONE_APPEND),
- REQ_OP_NAME(WRITE_SAME),
REQ_OP_NAME(WRITE_ZEROES),
REQ_OP_NAME(DRV_IN),
REQ_OP_NAME(DRV_OUT),
if (!blk_queue_secure_erase(q))
goto not_supported;
break;
- case REQ_OP_WRITE_SAME:
- if (!q->limits.max_write_same_sectors)
- goto not_supported;
- break;
case REQ_OP_ZONE_APPEND:
status = blk_check_zone_append(q, bio);
if (status != BLK_STS_OK)
* go through the normal accounting stuff before submission.
*/
if (bio_has_data(bio)) {
- unsigned int count;
-
- if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
- count = queue_logical_block_size(
- bdev_get_queue(bio->bi_bdev)) >> 9;
- else
- count = bio_sectors(bio);
+ unsigned int count = bio_sectors(bio);
if (op_is_write(bio_op(bio))) {
count_vm_events(PGPGOUT, count);
}
EXPORT_SYMBOL(blkdev_issue_discard);
-/**
- * __blkdev_issue_write_same - generate number of bios with same page
- * @bdev: target blockdev
- * @sector: start sector
- * @nr_sects: number of sectors to write
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @page: page containing data to write
- * @biop: pointer to anchor bio
- *
- * Description:
- * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
- */
-static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page,
- struct bio **biop)
-{
- struct request_queue *q = bdev_get_queue(bdev);
- unsigned int max_write_same_sectors;
- struct bio *bio = *biop;
- sector_t bs_mask;
-
- if (bdev_read_only(bdev))
- return -EPERM;
-
- bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
- if ((sector | nr_sects) & bs_mask)
- return -EINVAL;
-
- if (!bdev_write_same(bdev))
- return -EOPNOTSUPP;
-
- /* Ensure that max_write_same_sectors doesn't overflow bi_size */
- max_write_same_sectors = bio_allowed_max_sectors(q);
-
- while (nr_sects) {
- bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask);
- bio->bi_iter.bi_sector = sector;
- bio->bi_vcnt = 1;
- bio->bi_io_vec->bv_page = page;
- bio->bi_io_vec->bv_offset = 0;
- bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
-
- if (nr_sects > max_write_same_sectors) {
- bio->bi_iter.bi_size = max_write_same_sectors << 9;
- nr_sects -= max_write_same_sectors;
- sector += max_write_same_sectors;
- } else {
- bio->bi_iter.bi_size = nr_sects << 9;
- nr_sects = 0;
- }
- cond_resched();
- }
-
- *biop = bio;
- return 0;
-}
-
-/**
- * blkdev_issue_write_same - queue a write same operation
- * @bdev: target blockdev
- * @sector: start sector
- * @nr_sects: number of sectors to write
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @page: page containing data
- *
- * Description:
- * Issue a write same request for the sectors in question.
- */
-int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask,
- struct page *page)
-{
- struct bio *bio = NULL;
- struct blk_plug plug;
- int ret;
-
- blk_start_plug(&plug);
- ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
- &bio);
- if (ret == 0 && bio) {
- ret = submit_bio_wait(bio);
- bio_put(bio);
- }
- blk_finish_plug(&plug);
- return ret;
-}
-EXPORT_SYMBOL(blkdev_issue_write_same);
-
static int __blkdev_issue_write_zeroes(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
struct bio **biop, unsigned flags)
return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
}
-static struct bio *blk_bio_write_same_split(struct request_queue *q,
- struct bio *bio,
- struct bio_set *bs,
- unsigned *nsegs)
-{
- *nsegs = 1;
-
- if (!q->limits.max_write_same_sectors)
- return NULL;
-
- if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
- return NULL;
-
- return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
-}
-
/*
* Return the maximum number of sectors from the start of a bio that may be
* submitted as a single request to a block device. If enough sectors remain,
split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
nr_segs);
break;
- case REQ_OP_WRITE_SAME:
- split = blk_bio_write_same_split(q, *bio, &q->bio_split,
- nr_segs);
- break;
default:
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
break;
return 1;
case REQ_OP_WRITE_ZEROES:
return 0;
- case REQ_OP_WRITE_SAME:
- return 1;
}
rq_for_each_bvec(bv, rq, iter)
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
- else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
- nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
return ELEVATOR_NO_MERGE;
}
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
-{
- if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
- return true;
- return false;
-}
-
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
if (rq_data_dir(req) != rq_data_dir(next))
return NULL;
- if (req_op(req) == REQ_OP_WRITE_SAME &&
- !blk_write_same_mergeable(req->bio, next->bio))
- return NULL;
-
/*
* Don't allow merge of different write hints, or for a hint with
* non-hint IO.
if (!bio_crypt_rq_ctx_compatible(rq, bio))
return false;
- /* must be using the same buffer */
- if (req_op(rq) == REQ_OP_WRITE_SAME &&
- !blk_write_same_mergeable(rq->bio, bio))
- return false;
-
/*
* Don't allow merge of different write hints, or for a hint with
* non-hint IO.
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
lim->max_dev_sectors = 0;
lim->chunk_sectors = 0;
- lim->max_write_same_sectors = 0;
lim->max_write_zeroes_sectors = 0;
lim->max_zone_append_sectors = 0;
lim->max_discard_sectors = 0;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
lim->max_dev_sectors = UINT_MAX;
- lim->max_write_same_sectors = UINT_MAX;
lim->max_write_zeroes_sectors = UINT_MAX;
lim->max_zone_append_sectors = UINT_MAX;
}
}
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
-/**
- * blk_queue_max_write_same_sectors - set max sectors for a single write same
- * @q: the request queue for the device
- * @max_write_same_sectors: maximum number of sectors to write per command
- **/
-void blk_queue_max_write_same_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors)
-{
- q->limits.max_write_same_sectors = max_write_same_sectors;
-}
-EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
-
/**
* blk_queue_max_write_zeroes_sectors - set max sectors for a single
* write zeroes
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
- t->max_write_same_sectors = min(t->max_write_same_sectors,
- b->max_write_same_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
b->max_write_zeroes_sectors);
t->max_zone_append_sectors = min(t->max_zone_append_sectors,
static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
{
- return sprintf(page, "%llu\n",
- (unsigned long long)q->limits.max_write_same_sectors << 9);
+ return queue_var_show(0, page);
}
static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
switch (req_op(rq)) {
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE:
return blk_rq_zone_is_seq(rq);
default:
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
return true; /* non-trivial splitting decisions */
default:
break;
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
break;
- case REQ_OP_WRITE_SAME:
- bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
- break;
default:
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
*/
bool ata_scsi_dma_need_drain(struct request *rq)
{
- return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+
+ return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC;
}
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
p->qlim->io_min = cpu_to_be32(queue_io_min(q));
p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
p->qlim->discard_enabled = blk_queue_discard(q);
- p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
+ p->qlim->write_same_capable = 0;
} else {
q = device->rq_queue;
p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
? 0 : MSG_MORE);
if (err)
return err;
- /* REQ_OP_WRITE_SAME has only one segment */
- if (bio_op(bio) == REQ_OP_WRITE_SAME)
- break;
}
return 0;
}
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
return err;
- /* REQ_OP_WRITE_SAME has only one segment */
- if (bio_op(bio) == REQ_OP_WRITE_SAME)
- break;
}
return 0;
}
return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
- (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
(bio_op(bio) == REQ_OP_WRITE_ZEROES ?
((connection->agreed_features & DRBD_FF_WZEROES) ?
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
- struct p_wsame *wsame = NULL;
void *digest_out;
unsigned int dp_flags = 0;
int digest_size;
err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
goto out;
}
- if (dp_flags & DP_WSAME) {
- /* this will only work if DRBD_FF_WSAME is set AND the
- * handshake agreed that all nodes and backend devices are
- * WRITE_SAME capable and agree on logical_block_size */
- wsame = (struct p_wsame*)p;
- digest_out = wsame + 1;
- wsame->size = cpu_to_be32(req->i.size);
- } else
- digest_out = p + 1;
+ digest_out = p + 1;
/* our digest is still only over the payload.
* TRIM does not carry any payload. */
if (digest_size)
drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
- if (wsame) {
- err =
- __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
- sizeof(*wsame) + digest_size, NULL,
- bio_iovec(req->master_bio).bv_len);
- } else
- err =
- __send_command(peer_device->connection, device->vnr, sock, P_DATA,
- sizeof(*p) + digest_size, NULL, req->i.size);
+ err = __send_command(peer_device->connection, device->vnr, sock, P_DATA,
+ sizeof(*p) + digest_size, NULL, req->i.size);
if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
q->limits.max_write_zeroes_sectors = 0;
}
-static void decide_on_write_same_support(struct drbd_device *device,
- struct request_queue *q,
- struct request_queue *b, struct o_qlim *o,
- bool disable_write_same)
-{
- struct drbd_peer_device *peer_device = first_peer_device(device);
- struct drbd_connection *connection = peer_device->connection;
- bool can_do = b ? b->limits.max_write_same_sectors : true;
-
- if (can_do && disable_write_same) {
- can_do = false;
- drbd_info(peer_device, "WRITE_SAME disabled by config\n");
- }
-
- if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
- can_do = false;
- drbd_info(peer_device, "peer does not support WRITE_SAME\n");
- }
-
- if (o) {
- /* logical block size; queue_logical_block_size(NULL) is 512 */
- unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
- unsigned int me_lbs_b = queue_logical_block_size(b);
- unsigned int me_lbs = queue_logical_block_size(q);
-
- if (me_lbs_b != me_lbs) {
- drbd_warn(device,
- "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
- me_lbs, me_lbs_b);
- /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
- can_do = false;
- }
- if (me_lbs_b != peer_lbs) {
- drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
- me_lbs, peer_lbs);
- if (can_do) {
- drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
- can_do = false;
- }
- me_lbs = max(me_lbs, me_lbs_b);
- /* We cannot change the logical block size of an in-use queue.
- * We can only hope that access happens to be properly aligned.
- * If not, the peer will likely produce an IO error, and detach. */
- if (peer_lbs > me_lbs) {
- if (device->state.role != R_PRIMARY) {
- blk_queue_logical_block_size(q, peer_lbs);
- drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
- } else {
- drbd_warn(peer_device,
- "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
- me_lbs, peer_lbs);
- }
- }
- }
- if (can_do && !o->write_same_capable) {
- /* If we introduce an open-coded write-same loop on the receiving side,
- * the peer would present itself as "capable". */
- drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
- can_do = false;
- }
- }
-
- blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
-}
-
static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
unsigned int max_bio_size, struct o_qlim *o)
{
struct request_queue *b = NULL;
struct disk_conf *dc;
bool discard_zeroes_if_aligned = true;
- bool disable_write_same = false;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
dc = rcu_dereference(device->ldev->disk_conf);
max_segments = dc->max_bio_bvecs;
discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
- disable_write_same = dc->disable_write_same;
rcu_read_unlock();
blk_set_stacking_limits(&q->limits);
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
- decide_on_write_same_support(device, q, b, o, disable_write_same);
if (b) {
blk_stack_limits(&q->limits, &b->limits, 0);
if (write_ordering_changed(old_disk_conf, new_disk_conf))
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
- if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned
- || old_disk_conf->disable_write_same != new_disk_conf->disable_write_same)
+ if (old_disk_conf->discard_zeroes_if_aligned !=
+ new_disk_conf->discard_zeroes_if_aligned)
drbd_reconsider_queue_parameters(device, device->ldev, NULL);
drbd_md_sync(device);
drbd_endio_write_sec_final(peer_req);
}
-static void drbd_issue_peer_wsame(struct drbd_device *device,
- struct drbd_peer_request *peer_req)
-{
- struct block_device *bdev = device->ldev->backing_bdev;
- sector_t s = peer_req->i.sector;
- sector_t nr = peer_req->i.size >> 9;
- if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
- peer_req->flags |= EE_WAS_ERROR;
- drbd_endio_write_sec_final(peer_req);
-}
-
-
-/*
+/**
* drbd_submit_peer_request()
* @device: DRBD device.
* @peer_req: peer request
* Correctness first, performance later. Next step is to code an
* asynchronous variant of the same.
*/
- if (peer_req->flags & (EE_TRIM|EE_WRITE_SAME|EE_ZEROOUT)) {
+ if (peer_req->flags & (EE_TRIM | EE_ZEROOUT)) {
/* wait for all pending IO completions, before we start
* zeroing things out. */
conn_wait_active_ee_empty(peer_req->peer_device->connection);
spin_unlock_irq(&device->resource->req_lock);
}
- if (peer_req->flags & (EE_TRIM|EE_ZEROOUT))
- drbd_issue_peer_discard_or_zero_out(device, peer_req);
- else /* EE_WRITE_SAME */
- drbd_issue_peer_wsame(device, peer_req);
+ drbd_issue_peer_discard_or_zero_out(device, peer_req);
return 0;
}
unsigned long *data;
struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
struct p_trim *zeroes = (pi->cmd == P_ZEROES) ? pi->data : NULL;
- struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
data_size -= digest_size;
}
- /* assume request_size == data_size, but special case trim and wsame. */
+ /* assume request_size == data_size, but special case trim. */
ds = data_size;
if (trim) {
if (!expect(data_size == 0))
if (!expect(data_size == 0))
return NULL;
ds = be32_to_cpu(zeroes->size);
- } else if (wsame) {
- if (data_size != queue_logical_block_size(device->rq_queue)) {
- drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
- data_size, queue_logical_block_size(device->rq_queue));
- return NULL;
- }
- if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
- drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
- data_size, bdev_logical_block_size(device->ldev->backing_bdev));
- return NULL;
- }
- ds = be32_to_cpu(wsame->size);
}
if (!expect(IS_ALIGNED(ds, 512)))
return NULL;
- if (trim || wsame || zeroes) {
+ if (trim || zeroes) {
if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
return NULL;
} else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
peer_req->flags |= EE_ZEROOUT;
return peer_req;
}
- if (wsame)
- peer_req->flags |= EE_WRITE_SAME;
/* receive payload size bytes into page chain */
ds = data_size;
return REQ_OP_WRITE_ZEROES;
if (dpf & DP_DISCARD)
return REQ_OP_DISCARD;
- if (dpf & DP_WSAME)
- return REQ_OP_WRITE_SAME;
else
return REQ_OP_WRITE;
}
update_peer_seq(peer_device, peer_seq);
spin_lock_irq(&device->resource->req_lock);
}
- /* TRIM and WRITE_SAME are processed synchronously,
+ /* TRIM and is processed synchronously,
* we wait for all pending requests, respectively wait for
* active_ee to become empty in drbd_submit_peer_request();
* better not add ourselves here. */
- if ((peer_req->flags & (EE_TRIM|EE_WRITE_SAME|EE_ZEROOUT)) == 0)
+ if ((peer_req->flags & (EE_TRIM | EE_ZEROOUT)) == 0)
list_add_tail(&peer_req->w.list, &device->active_ee);
spin_unlock_irq(&device->resource->req_lock);
[P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
[P_ZEROES] = { 0, sizeof(struct p_trim), receive_Data },
[P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
- [P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data },
};
static void drbdd(struct drbd_connection *connection)
req->private_bio->bi_end_io = drbd_request_endio;
req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
- | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
| (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
req->device = device;
src = bvec_kmap_local(&bvec);
crypto_shash_update(desc, src, bvec.bv_len);
kunmap_local(src);
-
- /* REQ_OP_WRITE_SAME has only one segment,
- * checksum the payload only once. */
- if (bio_op(bio) == REQ_OP_WRITE_SAME)
- break;
}
crypto_shash_final(desc, digest);
shash_desc_zero(desc);
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
struct request_queue *q = bdev_get_queue(pd->bdev);
+ struct scsi_cmnd *scmd;
struct request *rq;
int ret = 0;
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
goto out;
}
- scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
- memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
+ scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
+ memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
rq->timeout = 60*HZ;
if (cgc->quiet)
rq->rq_flags |= RQF_QUIET;
blk_execute_rq(rq, false);
- if (scsi_req(rq)->result)
+ if (scmd->result)
ret = -EIO;
out:
blk_mq_free_request(rq);
dev->nsectors = le64_to_cpu(rsp->nsectors);
dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
- dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors);
dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
- blk_queue_max_write_same_sectors(dev->queue,
- dev->max_write_same_sectors);
/*
* we don't support discards to "discontiguous" segments
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
+ "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, dev->nsectors,
dev->logical_block_size, dev->physical_block_size,
- dev->max_write_same_sectors, dev->max_discard_sectors,
+ dev->max_discard_sectors,
dev->discard_granularity, dev->discard_alignment,
dev->secure_discard, dev->max_segments,
dev->max_hw_sectors, dev->wc, dev->fua);
bool wc;
bool fua;
u32 max_hw_sectors;
- u32 max_write_same_sectors;
u32 max_discard_sectors;
u32 discard_granularity;
u32 discard_alignment;
case RNBD_OP_SECURE_ERASE:
bio_opf = REQ_OP_SECURE_ERASE;
break;
- case RNBD_OP_WRITE_SAME:
- bio_opf = REQ_OP_WRITE_SAME;
- break;
default:
WARN(1, "Unknown RNBD type: %d (flags %d)\n",
rnbd_op(rnbd_opf), rnbd_opf);
case REQ_OP_SECURE_ERASE:
rnbd_opf = RNBD_OP_SECURE_ERASE;
break;
- case REQ_OP_WRITE_SAME:
- rnbd_opf = RNBD_OP_WRITE_SAME;
- break;
case REQ_OP_FLUSH:
rnbd_opf = RNBD_OP_FLUSH;
break;
cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev));
rsp->max_hw_sectors =
cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev));
- rsp->max_write_same_sectors =
- cpu_to_le32(bdev_write_same(rnbd_dev->bdev));
+ rsp->max_write_same_sectors = 0;
rsp->max_discard_sectors =
cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev));
rsp->discard_granularity =
#include <linux/times.h>
#include <linux/uaccess.h>
#include <scsi/scsi_common.h>
-#include <scsi/scsi_request.h>
/* used to tell the module to turn on full debugging messages */
static bool debug;
.proc_name = "iscsi_iser",
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport iscsi_iser_transport = {
#define DMF_EMULATE_ZONE_APPEND 9
void disable_discard(struct mapped_device *md);
-void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
static inline sector_t dm_get_size(struct mapped_device *md)
*/
switch (bio_op(ctx->bio_in)) {
case REQ_OP_WRITE:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE_ZEROES:
return true;
default:
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_secure_erase_bios = 0;
- ti->num_write_same_bios = 0;
ti->num_write_zeroes_bios = 0;
return 0;
bad:
unsigned num_bvecs;
sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev);
- unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors;
unsigned int special_cmd_max_sectors;
special_cmd_max_sectors = q->limits.max_discard_sectors;
else if (op == REQ_OP_WRITE_ZEROES)
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
- else if (op == REQ_OP_WRITE_SAME)
- special_cmd_max_sectors = q->limits.max_write_same_sectors;
- if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
- op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
+ if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
+ special_cmd_max_sectors == 0) {
atomic_inc(&io->count);
dec_count(io, region, BLK_STS_NOTSUPP);
return;
case REQ_OP_WRITE_ZEROES:
num_bvecs = 0;
break;
- case REQ_OP_WRITE_SAME:
- num_bvecs = 1;
- break;
default:
num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
(PAGE_SIZE >> SECTOR_SHIFT)));
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
- } else if (op == REQ_OP_WRITE_SAME) {
- /*
- * WRITE SAME only uses a single page.
- */
- dp->get_page(dp, &page, &len, &offset);
- bio_add_page(bio, page, logical_block_size, offset);
- num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
- bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
-
- offset = 0;
- remaining -= num_sectors;
- dp->next_page(dp);
} else while (remaining) {
/*
* Try and add as many pages as possible.
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_secure_erase_bios = 1;
- ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
ti->private = lc;
return 0;
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
if (m->queue_mode == DM_TYPE_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
if (req_op(clone) == REQ_OP_DISCARD &&
!clone->q->limits.max_discard_sectors)
disable_discard(tio->md);
- else if (req_op(clone) == REQ_OP_WRITE_SAME &&
- !clone->q->limits.max_write_same_sectors)
- disable_write_same(tio->md);
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
!clone->q->limits.max_write_zeroes_sectors)
disable_write_zeroes(tio->md);
ti->num_flush_bios = stripes;
ti->num_discard_bios = stripes;
ti->num_secure_erase_bios = stripes;
- ti->num_write_same_bios = stripes;
ti->num_write_zeroes_bios = stripes;
sc->chunk_size = chunk_size;
}
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
- unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
- unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
+ unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
return stripe_map_range(sc, bio, target_bio_nr);
return !blk_queue_add_random(q);
}
-static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !q->limits.max_write_same_sectors;
-}
-
-static bool dm_table_supports_write_same(struct dm_table *t)
-{
- struct dm_target *ti;
- unsigned i;
-
- for (i = 0; i < dm_table_get_num_targets(t); i++) {
- ti = dm_table_get_target(t, i);
-
- if (!ti->num_write_same_bios)
- return false;
-
- if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
- return false;
- }
-
- return true;
-}
-
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- if (!dm_table_supports_write_same(t))
- q->limits.max_write_same_sectors = 0;
if (!dm_table_supports_write_zeroes(t))
q->limits.max_write_zeroes_sectors = 0;
switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE:
return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
default:
case REQ_OP_ZONE_FINISH:
return true;
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE:
/* Writes must be aligned to the zone write pointer */
if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset)
blk_queue_zone_sectors(md->queue));
return BLK_STS_OK;
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE:
WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
return BLK_STS_OK;
return false;
switch (bio_op(orig_bio)) {
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_FINISH:
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
}
-void disable_write_same(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support WRITE SAME, disable it */
- limits->max_write_same_sectors = 0;
-}
-
void disable_write_zeroes(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
if (bio_op(bio) == REQ_OP_DISCARD &&
!q->limits.max_discard_sectors)
disable_discard(md);
- else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !q->limits.max_write_same_sectors)
- disable_write_same(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!q->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE_ZEROES:
r = true;
break;
case REQ_OP_SECURE_ERASE:
num_bios = ti->num_secure_erase_bios;
break;
- case REQ_OP_WRITE_SAME:
- num_bios = ti->num_write_same_bios;
- break;
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
break;
if (mddev->gendisk)
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
bio_sector);
- mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
submit_bio_noacct(bio);
}
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
- mddev_check_writesame(mddev, &mp_bh->bio);
mddev_check_write_zeroes(mddev, &mp_bh->bio);
submit_bio_noacct(&mp_bh->bio);
return true;
mddev->flags &= ~unsupported_flags;
}
-static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
-{
- if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_bdev->bd_disk->queue->limits.max_write_same_sectors)
- mddev->queue->limits.max_write_same_sectors = 0;
-}
-
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
bool discard_supported = false;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
if (mddev->gendisk)
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
bio_sector);
- mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
submit_bio_noacct(bio);
return true;
if (IS_ERR(conf))
return PTR_ERR(conf);
- if (mddev->queue) {
- blk_queue_max_write_same_sectors(mddev->queue, 0);
+ if (mddev->queue)
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
- }
rdev_for_each(rdev, mddev) {
if (!mddev->gendisk)
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
UINT_MAX);
- blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
raid10_set_io_opt(conf);
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
- blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
rdev_for_each(rdev, mddev) {
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
return NULL;
- port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
+ port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
if (!port_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
/* SCSI needs scsi_cmnd lookup table!
* (with size equal to req_depth*PtrSz!)
*/
- ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
+ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL);
if (!ioc->ScsiLookup) {
error = -ENOMEM;
goto out_mptspi_probe;
slot->cmnd = SCp;
SCp->host_scribble = (unsigned char *)slot;
- SCp->SCp.ptr = NULL;
- SCp->SCp.buffer = NULL;
#ifdef NCR_700_DEBUG
printk("53c700: scsi%d, command ", SCp->device->host->host_no);
* On command termination, the done function will be called as
* appropriate.
*
- * SCSI pointers are maintained in the SCp field of SCSI command
- * structures, being initialized after the command is connected
+ * The command data pointer is initialized after the command is connected
* in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
* Note that in violation of the standard, an implicit SAVE POINTERS operation
* is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
static inline void initialize_SCp(struct scsi_cmnd *cmd)
{
- /*
- * Initialize the Scsi Pointer field so that all of the commands in the
- * various queues are valid.
- */
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ ncmd->buffer = scsi_sglist(cmd);
+ ncmd->ptr = sg_virt(ncmd->buffer);
+ ncmd->this_residual = ncmd->buffer->length;
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.ptr = NULL;
- cmd->SCp.this_residual = 0;
+ ncmd->buffer = NULL;
+ ncmd->ptr = NULL;
+ ncmd->this_residual = 0;
}
- cmd->SCp.Status = 0;
- cmd->SCp.Message = 0;
+ ncmd->status = 0;
+ ncmd->message = 0;
}
-static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
+static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd)
{
- struct scatterlist *s = cmd->SCp.buffer;
+ struct scatterlist *s = ncmd->buffer;
- if (!cmd->SCp.this_residual && s && !sg_is_last(s)) {
- cmd->SCp.buffer = sg_next(s);
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ if (!ncmd->this_residual && s && !sg_is_last(s)) {
+ ncmd->buffer = sg_next(s);
+ ncmd->ptr = sg_virt(ncmd->buffer);
+ ncmd->this_residual = ncmd->buffer->length;
}
}
static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
{
- int resid = cmd->SCp.this_residual;
- struct scatterlist *s = cmd->SCp.buffer;
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
+ int resid = ncmd->this_residual;
+ struct scatterlist *s = ncmd->buffer;
if (s)
while (!sg_is_last(s)) {
struct scsi_cmnd *cmd)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
unsigned long flags;
#if (NDEBUG & NDEBUG_NO_WRITE)
static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
static void NCR5380_dma_complete(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected);
int transferred;
unsigned char **data;
int *count;
unsigned char p;
if (hostdata->read_overruns) {
- p = hostdata->connected->SCp.phase;
+ p = ncmd->phase;
if (p & SR_IO) {
udelay(10);
if ((NCR5380_read(BUS_AND_STATUS_REG) &
transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
hostdata->dma_len = 0;
- data = (unsigned char **)&hostdata->connected->SCp.ptr;
- count = &hostdata->connected->SCp.this_residual;
+ data = (unsigned char **)&ncmd->ptr;
+ count = &ncmd->this_residual;
*data += transferred;
*count -= transferred;
return -1;
}
- hostdata->connected->SCp.phase = p;
+ NCR5380_to_ncmd(hostdata->connected)->phase = p;
if (p & SR_IO) {
if (hostdata->read_overruns)
#endif
while ((cmd = hostdata->connected)) {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
tmp = NCR5380_read(STATUS_REG);
/* We only have a valid SCSI phase when REQ is asserted */
sun3_dma_setup_done != cmd) {
int count;
- advance_sg_buffer(cmd);
+ advance_sg_buffer(ncmd);
count = sun3scsi_dma_xfer_len(hostdata, cmd);
if (count > 0) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
- cmd->SCp.ptr, count);
+ ncmd->ptr, count);
else
sun3scsi_dma_recv_setup(hostdata,
- cmd->SCp.ptr, count);
+ ncmd->ptr, count);
sun3_dma_setup_done = cmd;
}
#ifdef SUN3_SCSI_VME
* scatter-gather list, move onto the next one.
*/
- advance_sg_buffer(cmd);
+ advance_sg_buffer(ncmd);
dsprintk(NDEBUG_INFORMATION, instance,
"this residual %d, sg ents %d\n",
- cmd->SCp.this_residual,
- sg_nents(cmd->SCp.buffer));
+ ncmd->this_residual,
+ sg_nents(ncmd->buffer));
/*
* The preferred transfer method is going to be
if (transfersize > 0) {
len = transfersize;
if (NCR5380_transfer_dma(instance, &phase,
- &len, (unsigned char **)&cmd->SCp.ptr)) {
+ &len, (unsigned char **)&ncmd->ptr)) {
/*
* If the watchdog timer fires, all future
* accesses to this device will use the
/* Transfer a small chunk so that the
* irq mode lock is not held too long.
*/
- transfersize = min(cmd->SCp.this_residual,
+ transfersize = min(ncmd->this_residual,
NCR5380_PIO_CHUNK_SIZE);
len = transfersize;
NCR5380_transfer_pio(instance, &phase, &len,
- (unsigned char **)&cmd->SCp.ptr,
+ (unsigned char **)&ncmd->ptr,
0);
- cmd->SCp.this_residual -= transfersize - len;
+ ncmd->this_residual -= transfersize - len;
}
#ifdef CONFIG_SUN3
if (sun3_dma_setup_done == cmd)
len = 1;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
- cmd->SCp.Message = tmp;
+ ncmd->message = tmp;
switch (tmp) {
case ABORT:
hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
- set_status_byte(cmd, cmd->SCp.Status);
+ set_status_byte(cmd, ncmd->status);
set_resid_from_SCp(cmd);
if (cmd->cmnd[0] == REQUEST_SENSE)
complete_cmd(instance, cmd);
else {
- if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION ||
- cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) {
+ if (ncmd->status == SAM_STAT_CHECK_CONDITION ||
+ ncmd->status == SAM_STAT_COMMAND_TERMINATED) {
dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n",
cmd);
list_add_tail(&ncmd->list,
len = 1;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
- cmd->SCp.Status = tmp;
+ ncmd->status = tmp;
break;
default:
shost_printk(KERN_ERR, instance, "unknown phase\n");
if (sun3_dma_setup_done != tmp) {
int count;
- advance_sg_buffer(tmp);
+ advance_sg_buffer(ncmd);
count = sun3scsi_dma_xfer_len(hostdata, tmp);
if (count > 0) {
if (tmp->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
- tmp->SCp.ptr, count);
+ ncmd->ptr, count);
else
sun3scsi_dma_recv_setup(hostdata,
- tmp->SCp.ptr, count);
+ ncmd->ptr, count);
sun3_dma_setup_done = tmp;
}
}
struct scsi_cmnd *needle)
{
if (list_find_cmd(haystack, needle)) {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(needle);
list_del(&ncmd->list);
return true;
};
struct NCR5380_cmd {
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int status;
+ int message;
+ int phase;
struct list_head list;
};
-#define NCR5380_CMD_SIZE (sizeof(struct NCR5380_cmd))
-
#define NCR5380_PIO_CHUNK_SIZE 256
/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
}
+static inline struct NCR5380_cmd *NCR5380_to_ncmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#ifndef NDEBUG
#define NDEBUG (0)
#endif
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "a2091.h"
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct Scsi_Host *instance = cmd->device->host;
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ unsigned long addr = virt_to_bus(scsi_pointer->ptr);
/* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
if (!dir_in) {
/* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
}
}
if (dir_in) {
/* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
+ cache_clear(addr, scsi_pointer->this_residual);
} else {
/* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
+ cache_push(addr, scsi_pointer->this_residual);
}
/* start DMA */
regs->ST_DMA = 1;
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
kfree(wh->dma_bounce_buffer);
wh->dma_bounce_buffer = NULL;
wh->dma_bounce_len = 0;
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "a3000.h"
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct Scsi_Host *instance = cmd->device->host;
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ unsigned long addr = virt_to_bus(scsi_pointer->ptr);
/*
* if the physical address has the wrong alignment, or if
* buffer
*/
if (addr & A3000_XFER_MASK) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
if (!dir_in) {
/* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
}
addr = virt_to_bus(wh->dma_bounce_buffer);
if (dir_in) {
/* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
+ cache_clear(addr, scsi_pointer->this_residual);
} else {
/* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
+ cache_push(addr, scsi_pointer->this_residual);
}
/* start DMA */
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
if (status && wh->dma_bounce_buffer) {
if (SCpnt) {
if (wh->dma_dir && SCpnt)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
kfree(wh->dma_bounce_buffer);
wh->dma_bounce_buffer = NULL;
wh->dma_bounce_len = 0;
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
aac_fib_complete(fibptr);
return 0;
}
- scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
+ aac_priv(scsicmd)->owner = AAC_OWNER_MIDLEVEL;
device = scsicmd->device;
if (unlikely(!device)) {
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_READ_NAME);
{
struct fsa_dev_info *fsa_dev_ptr;
int (*callback)(struct scsi_cmnd *);
- struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
+ struct scsi_cmnd *scsicmd = context;
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
int i;
if (!aac_valid_context(scsicmd, fibptr))
return;
- scsicmd->SCp.Status = 0;
+ cmd_priv->status = 0;
fsa_dev_ptr = fibptr->dev->fsa_dev;
if (fsa_dev_ptr) {
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
}
if ((fsa_dev_ptr->valid & 1) == 0)
fsa_dev_ptr->valid = 0;
- scsicmd->SCp.Status = le32_to_cpu(dresp->count);
+ cmd_priv->status = le32_to_cpu(dresp->count);
}
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
- callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
- scsicmd->SCp.ptr = NULL;
+ callback = cmd_priv->callback;
+ cmd_priv->callback = NULL;
(*callback)(scsicmd);
return;
}
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
fibptr,
static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
{
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
struct fib * fibptr;
int status = -ENOMEM;
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
- scsicmd->SCp.ptr = (char *)callback;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ cmd_priv->callback = callback;
+ cmd_priv->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
fibptr,
return 0;
if (status < 0) {
- scsicmd->SCp.ptr = NULL;
+ cmd_priv->callback = NULL;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
}
int aac_probe_container(struct aac_dev *dev, int cid)
{
struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
int status;
while (scsicmd->device == scsidev)
schedule();
kfree(scsidev);
- status = scsicmd->SCp.Status;
+ status = cmd_priv->status;
kfree(scsicmd);
return status;
}
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
* Alocate and initialize a Fib
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
/*
synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
synchronizecmd->count =
cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
/*
* Now send the Fib to the adapter
pmcmd->cid = cpu_to_le32(sdev_id(sdev));
pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
/*
* Now send the Fib to the adapter
aac_fib_complete(fibptr);
if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
- scsicmd->SCp.sent_command = 1;
+ aac_priv(scsicmd)->sent_command = 1;
else
aac_scsi_done(scsicmd);
}
* Allocate and initialize a Fib then setup a BlockWrite command
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
/*
if (!cmd_fibcontext)
return -1;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_hba(cmd_fibcontext, scsicmd);
/*
#include <linux/completion.h>
#include <linux/pci.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
/*------------------------------------------------------------------------------
* D E F I N E S
cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
}
-/* SCp.phase values */
-#define AAC_OWNER_MIDLEVEL 0x101
-#define AAC_OWNER_LOWLEVEL 0x102
-#define AAC_OWNER_ERROR_HANDLER 0x103
-#define AAC_OWNER_FIRMWARE 0x106
+enum aac_cmd_owner {
+ AAC_OWNER_MIDLEVEL = 0x101,
+ AAC_OWNER_LOWLEVEL = 0x102,
+ AAC_OWNER_ERROR_HANDLER = 0x103,
+ AAC_OWNER_FIRMWARE = 0x106,
+};
+
+struct aac_cmd_priv {
+ int (*callback)(struct scsi_cmnd *);
+ int status;
+ enum aac_cmd_owner owner;
+ bool sent_command;
+};
+
+static inline struct aac_cmd_priv *aac_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
void aac_safw_rescan_worker(struct work_struct *work);
void aac_src_reinit_aif_worker(struct work_struct *work);
{
int *active = data;
- if (cmd->SCp.phase == AAC_OWNER_FIRMWARE)
+ if (aac_priv(cmd)->owner == AAC_OWNER_FIRMWARE)
*active = *active + 1;
return true;
}
static int aac_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *cmd)
{
- int r = 0;
- cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
- r = (aac_scsi_cmd(cmd) ? FAILED : 0);
- return r;
+ aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
+
+ return aac_scsi_cmd(cmd) ? FAILED : 0;
}
/**
{
struct fib_count_data *fib_count = data;
- switch (scmnd->SCp.phase) {
+ switch (aac_priv(scmnd)->owner) {
case AAC_OWNER_FIRMWARE:
fib_count->fwcnt++;
break;
static int aac_eh_abort(struct scsi_cmnd* cmd)
{
+ struct aac_cmd_priv *cmd_priv = aac_priv(cmd);
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
fib->hbacmd_size = sizeof(*tmf);
- cmd->SCp.sent_command = 0;
+ cmd_priv->sent_command = 0;
status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
(fib_callback) aac_hba_callback,
}
/* Wait up to 15 secs for completion */
for (count = 0; count < 15; ++count) {
- if (cmd->SCp.sent_command) {
+ if (cmd_priv->sent_command) {
ret = SUCCESS;
break;
}
(fib->callback_data == cmd)) {
fib->flags |=
FIB_CONTEXT_FLAG_TIMED_OUT;
- cmd->SCp.phase =
+ cmd_priv->owner =
AAC_OWNER_ERROR_HANDLER;
ret = SUCCESS;
}
(command->device == cmd->device)) {
fib->flags |=
FIB_CONTEXT_FLAG_TIMED_OUT;
- command->SCp.phase =
+ aac_priv(command)->owner =
AAC_OWNER_ERROR_HANDLER;
if (command == cmd)
ret = SUCCESS;
rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
fib->hbacmd_size = sizeof(*rst);
- return HBA_IU_TYPE_SATA_REQ;
+ return HBA_IU_TYPE_SATA_REQ;
}
static void aac_tmf_callback(void *context, struct fib *fibptr)
if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
- cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ aac_priv(cmd)->owner = AAC_OWNER_ERROR_HANDLER;
}
}
}
#endif
.emulated = 1,
.no_write_same = 1,
+ .cmd_size = sizeof(struct aac_cmd_priv),
};
static void __aac_shutdown(struct aac_dev * aac)
dvc_var.adv_dvc_var)
#define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev)
+struct advansys_cmd {
+ dma_addr_t dma_handle;
+};
+
+static struct advansys_cmd *advansys_cmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#ifdef ADVANSYS_DEBUG
static int asc_dbglvl = 3;
ASC_STATS(boardp->shost, callback);
- dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
+ dma_unmap_single(boardp->dev, advansys_cmd(scp)->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
/*
* 'qdonep' contains the command's ending status.
static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp)
{
struct asc_board *board = shost_priv(scp->device->host);
+ struct advansys_cmd *acmd = advansys_cmd(scp);
- scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(board->dev, scp->SCp.dma_handle)) {
+ acmd->dma_handle = dma_map_single(board->dev, scp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(board->dev, acmd->dma_handle)) {
ASC_DBG(1, "failed to map sense buffer\n");
return 0;
}
- return cpu_to_le32(scp->SCp.dma_handle);
+ return cpu_to_le32(acmd->dma_handle);
}
static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
.eh_host_reset_handler = advansys_reset,
.bios_param = advansys_biosparam,
.slave_configure = advansys_slave_configure,
+ .cmd_size = sizeof(struct advansys_cmd),
};
static int advansys_wide_init_chip(struct Scsi_Host *shost)
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <scsi/scsicam.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_spi.h>
-#include <scsi/scsi_eh.h>
+#include <scsi/scsicam.h>
#include "aha152x.h"
static LIST_HEAD(aha152x_host_list);
check_condition = 0x0800, /* requesting sense after CHECK CONDITION */
};
+struct aha152x_cmd_priv {
+ struct scsi_pointer scsi_pointer;
+};
+
+static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd);
+
+ return &acmd->scsi_pointer;
+}
+
MODULE_AUTHOR("Jürgen Fischer");
MODULE_DESCRIPTION(AHA152X_REVID);
MODULE_LICENSE("GPL");
static int setup_expected_interrupts(struct Scsi_Host *shpnt)
{
if(CURRENT_SC) {
- CURRENT_SC->SCp.phase |= 1 << 16;
+ struct scsi_pointer *scsi_pointer =
+ aha152x_scsi_pointer(CURRENT_SC);
+
+ scsi_pointer->phase |= 1 << 16;
- if(CURRENT_SC->SCp.phase & selecting) {
+ if (scsi_pointer->phase & selecting) {
SETPORT(SSTAT1, SELTO);
SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
SETPORT(SIMODE1, ENSELTIMO);
} else {
- SETPORT(SIMODE0, (CURRENT_SC->SCp.phase & spiordy) ? ENSPIORDY : 0);
+ SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0);
SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
}
} else if(STATE==seldi) {
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
struct completion *complete, int phase)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt);
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
- SCpnt->SCp.phase = not_issued | phase;
- SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.have_data_in = 0;
- SCpnt->SCp.sent_command = 0;
+ scsi_pointer->phase = not_issued | phase;
+ scsi_pointer->Status = 0x1; /* Ilegal status by SCSI standard */
+ scsi_pointer->Message = 0;
+ scsi_pointer->have_data_in = 0;
+ scsi_pointer->sent_command = 0;
- if(SCpnt->SCp.phase & (resetting|check_condition)) {
+ if (scsi_pointer->phase & (resetting | check_condition)) {
if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
return FAILED;
SCp.phase : current state of the command */
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
scsi_set_resid(SCpnt, 0);
- SCpnt->SCp.buffer = NULL;
+ scsi_pointer->buffer = NULL;
} else {
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ scsi_pointer->buffer = scsi_sglist(SCpnt);
+ scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
}
DO_LOCK(flags);
static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
{
- if (SCpnt->SCp.phase & resetting)
+ if (aha152x_scsi_pointer(SCpnt)->phase & resetting)
reset_done(SCpnt);
else
scsi_done(SCpnt);
DO_LOCK(flags);
- if(SCpnt->SCp.phase & resetted) {
+ if (aha152x_scsi_pointer(SCpnt)->phase & resetted) {
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
SETPORT(SSTAT1, CLRBUSFREE);
if(CURRENT_SC) {
+ struct scsi_pointer *scsi_pointer =
+ aha152x_scsi_pointer(CURRENT_SC);
+
#if defined(AHA152X_STAT)
action++;
#endif
- CURRENT_SC->SCp.phase &= ~syncneg;
+ scsi_pointer->phase &= ~syncneg;
- if(CURRENT_SC->SCp.phase & completed) {
+ if (scsi_pointer->phase & completed) {
/* target sent COMMAND COMPLETE */
- done(shpnt, CURRENT_SC->SCp.Status, DID_OK);
+ done(shpnt, scsi_pointer->Status, DID_OK);
- } else if(CURRENT_SC->SCp.phase & aborted) {
- done(shpnt, CURRENT_SC->SCp.Status, DID_ABORT);
+ } else if (scsi_pointer->phase & aborted) {
+ done(shpnt, scsi_pointer->Status, DID_ABORT);
- } else if(CURRENT_SC->SCp.phase & resetted) {
- done(shpnt, CURRENT_SC->SCp.Status, DID_RESET);
+ } else if (scsi_pointer->phase & resetted) {
+ done(shpnt, scsi_pointer->Status, DID_RESET);
- } else if(CURRENT_SC->SCp.phase & disconnected) {
+ } else if (scsi_pointer->phase & disconnected) {
/* target sent DISCONNECT */
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->disconnections++;
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
- CURRENT_SC->SCp.phase |= 1 << 16;
+ scsi_pointer->phase |= 1 << 16;
CURRENT_SC = NULL;
} else {
action++;
#endif
- if(DONE_SC->SCp.phase & check_condition) {
+ if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) {
struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
struct aha152x_scdata *sc = SCDATA(cmd);
scsi_eh_restore_cmnd(cmd, &sc->ses);
- cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
+ aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION;
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- } else if(DONE_SC->SCp.Status==SAM_STAT_CHECK_CONDITION) {
+ } else if (aha152x_scsi_pointer(DONE_SC)->Status ==
+ SAM_STAT_CHECK_CONDITION) {
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->busfree_with_check_condition++;
#endif
- if(!(DONE_SC->SCp.phase & not_issued)) {
+ if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) {
struct aha152x_scdata *sc;
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- if (!(ptr->SCp.phase & resetting)) {
+ if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) {
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
}
DO_UNLOCK(flags);
if(CURRENT_SC) {
+ struct scsi_pointer *scsi_pointer =
+ aha152x_scsi_pointer(CURRENT_SC);
+
#if defined(AHA152X_STAT)
action++;
#endif
- CURRENT_SC->SCp.phase |= selecting;
+ scsi_pointer->phase |= selecting;
/* clear selection timeout */
SETPORT(SSTAT1, SELTO);
*/
static void seldo_run(struct Scsi_Host *shpnt)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+
SETPORT(SCSISIG, 0);
SETPORT(SSTAT1, CLRBUSFREE);
SETPORT(SSTAT1, CLRPHASECHG);
- CURRENT_SC->SCp.phase &= ~(selecting|not_issued);
+ scsi_pointer->phase &= ~(selecting | not_issued);
SETPORT(SCSISEQ, 0);
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
- if (CURRENT_SC->SCp.phase & aborting) {
+ if (scsi_pointer->phase & aborting) {
ADDMSGO(ABORT);
- } else if (CURRENT_SC->SCp.phase & resetting) {
+ } else if (scsi_pointer->phase & resetting) {
ADDMSGO(BUS_DEVICE_RESET);
} else if (SYNCNEG==0 && SYNCHRONOUS) {
- CURRENT_SC->SCp.phase |= syncneg;
+ scsi_pointer->phase |= syncneg;
MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
SYNCNEG=1; /* negotiation in progress */
}
*/
static void selto_run(struct Scsi_Host *shpnt)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+
SETPORT(SCSISEQ, 0);
SETPORT(SSTAT1, CLRSELTIMO);
if (!CURRENT_SC)
return;
- CURRENT_SC->SCp.phase &= ~selecting;
+ scsi_pointer->phase &= ~selecting;
- if (CURRENT_SC->SCp.phase & aborted)
+ if (scsi_pointer->phase & aborted)
done(shpnt, SAM_STAT_GOOD, DID_ABORT);
else if (TESTLO(SSTAT0, SELINGO))
done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
SETPORT(SSTAT1, CLRPHASECHG);
if(CURRENT_SC) {
- if(!(CURRENT_SC->SCp.phase & not_issued))
+ struct scsi_pointer *scsi_pointer =
+ aha152x_scsi_pointer(CURRENT_SC);
+
+ if (!(scsi_pointer->phase & not_issued))
scmd_printk(KERN_ERR, CURRENT_SC,
"command should not have been issued yet\n");
static void msgi_run(struct Scsi_Host *shpnt)
{
for(;;) {
+ struct scsi_pointer *scsi_pointer;
int sstat1 = GETPORT(SSTAT1);
if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
continue;
}
- CURRENT_SC->SCp.Message = MSGI(0);
- CURRENT_SC->SCp.phase &= ~disconnected;
+ scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ scsi_pointer->Message = MSGI(0);
+ scsi_pointer->phase &= ~disconnected;
MSGILEN=0;
continue;
}
- CURRENT_SC->SCp.Message = MSGI(0);
+ scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ scsi_pointer->Message = MSGI(0);
switch (MSGI(0)) {
case DISCONNECT:
scmd_printk(KERN_WARNING, CURRENT_SC,
"target was not allowed to disconnect\n");
- CURRENT_SC->SCp.phase |= disconnected;
+ scsi_pointer->phase |= disconnected;
break;
case COMMAND_COMPLETE:
- CURRENT_SC->SCp.phase |= completed;
+ scsi_pointer->phase |= completed;
break;
case MESSAGE_REJECT:
*/
static void msgo_init(struct Scsi_Host *shpnt)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+
if(MSGOLEN==0) {
- if((CURRENT_SC->SCp.phase & syncneg) && SYNCNEG==2 && SYNCRATE==0) {
+ if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 &&
+ SYNCRATE==0) {
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
} else {
scmd_printk(KERN_INFO, CURRENT_SC,
*/
static void msgo_run(struct Scsi_Host *shpnt)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+
while(MSGO_I<MSGOLEN) {
if (TESTLO(SSTAT0, SPIORDY))
return;
if (MSGO(MSGO_I) & IDENTIFY_BASE)
- CURRENT_SC->SCp.phase |= identified;
+ scsi_pointer->phase |= identified;
if (MSGO(MSGO_I)==ABORT)
- CURRENT_SC->SCp.phase |= aborted;
+ scsi_pointer->phase |= aborted;
if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
- CURRENT_SC->SCp.phase |= resetted;
+ scsi_pointer->phase |= resetted;
SETPORT(SCSIDAT, MSGO(MSGO_I++));
}
*/
static void cmd_init(struct Scsi_Host *shpnt)
{
- if (CURRENT_SC->SCp.sent_command) {
+ if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) {
scmd_printk(KERN_ERR, CURRENT_SC,
"command already sent\n");
done(shpnt, SAM_STAT_GOOD, DID_ERROR);
"command sent incompletely (%d/%d)\n",
CMD_I, CURRENT_SC->cmd_len);
else
- CURRENT_SC->SCp.sent_command++;
+ aha152x_scsi_pointer(CURRENT_SC)->sent_command++;
}
/*
if (TESTLO(SSTAT0, SPIORDY))
return;
- CURRENT_SC->SCp.Status = GETPORT(SCSIDAT);
+ aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT);
}
static void datai_run(struct Scsi_Host *shpnt)
{
+ struct scsi_pointer *scsi_pointer;
unsigned long the_time;
int fifodata, data_count;
fifodata = GETPORT(FIFOSTAT);
}
- if(CURRENT_SC->SCp.this_residual>0) {
- while(fifodata>0 && CURRENT_SC->SCp.this_residual>0) {
- data_count = fifodata > CURRENT_SC->SCp.this_residual ?
- CURRENT_SC->SCp.this_residual :
+ scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ if (scsi_pointer->this_residual > 0) {
+ while (fifodata > 0 && scsi_pointer->this_residual > 0) {
+ data_count = fifodata > scsi_pointer->this_residual ?
+ scsi_pointer->this_residual :
fifodata;
fifodata -= data_count;
if (data_count & 1) {
SETPORT(DMACNTRL0, ENDMA|_8BIT);
- *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
- CURRENT_SC->SCp.this_residual--;
+ *scsi_pointer->ptr++ = GETPORT(DATAPORT);
+ scsi_pointer->this_residual--;
DATA_LEN++;
SETPORT(DMACNTRL0, ENDMA);
}
if (data_count > 1) {
data_count >>= 1;
- insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
- CURRENT_SC->SCp.ptr += 2 * data_count;
- CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ insw(DATAPORT, scsi_pointer->ptr, data_count);
+ scsi_pointer->ptr += 2 * data_count;
+ scsi_pointer->this_residual -= 2 * data_count;
DATA_LEN += 2 * data_count;
}
- if (CURRENT_SC->SCp.this_residual == 0 &&
- !sg_is_last(CURRENT_SC->SCp.buffer)) {
+ if (scsi_pointer->this_residual == 0 &&
+ !sg_is_last(scsi_pointer->buffer)) {
/* advance to next buffer */
- CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
}
}
} else if (fifodata > 0) {
static void datao_run(struct Scsi_Host *shpnt)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
unsigned long the_time;
int data_count;
/* until phase changes or all data sent */
- while(TESTLO(DMASTAT, INTSTAT) && CURRENT_SC->SCp.this_residual>0) {
+ while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) {
data_count = 128;
- if(data_count > CURRENT_SC->SCp.this_residual)
- data_count=CURRENT_SC->SCp.this_residual;
+ if (data_count > scsi_pointer->this_residual)
+ data_count = scsi_pointer->this_residual;
if(TESTLO(DMASTAT, DFIFOEMP)) {
scmd_printk(KERN_ERR, CURRENT_SC,
if(data_count & 1) {
SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
- SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
- CURRENT_SC->SCp.this_residual--;
+ SETPORT(DATAPORT, *scsi_pointer->ptr++);
+ scsi_pointer->this_residual--;
CMD_INC_RESID(CURRENT_SC, -1);
SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
}
if(data_count > 1) {
data_count >>= 1;
- outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
- CURRENT_SC->SCp.ptr += 2 * data_count;
- CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ outsw(DATAPORT, scsi_pointer->ptr, data_count);
+ scsi_pointer->ptr += 2 * data_count;
+ scsi_pointer->this_residual -= 2 * data_count;
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
}
- if (CURRENT_SC->SCp.this_residual == 0 &&
- !sg_is_last(CURRENT_SC->SCp.buffer)) {
+ if (scsi_pointer->this_residual == 0 &&
+ !sg_is_last(scsi_pointer->buffer)) {
/* advance to next buffer */
- CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
}
the_time=jiffies + 100*HZ;
static void datao_end(struct Scsi_Host *shpnt)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+
if(TESTLO(DMASTAT, DFIFOEMP)) {
u32 datao_cnt = GETSTCNT();
int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC);
sg = sg_next(sg);
}
- CURRENT_SC->SCp.buffer = sg;
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) + done;
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length -
+ scsi_pointer->buffer = sg;
+ scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done;
+ scsi_pointer->this_residual = scsi_pointer->buffer->length -
done;
}
*/
static int update_state(struct Scsi_Host *shpnt)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
int dataphase=0;
unsigned int stat0 = GETPORT(SSTAT0);
unsigned int stat1 = GETPORT(SSTAT1);
SETPORT(SSTAT1,SCSIRSTI);
} else if (stat0 & SELDI && PREVSTATE == busfree) {
STATE=seldi;
- } else if(stat0 & SELDO && CURRENT_SC && (CURRENT_SC->SCp.phase & selecting)) {
+ } else if (stat0 & SELDO && CURRENT_SC &&
+ (scsi_pointer->phase & selecting)) {
STATE=seldo;
} else if(stat1 & SELTO) {
STATE=selto;
SETPORT(SXFRCTL0, CH1);
SETPORT(DMACNTRL0, 0);
if(CURRENT_SC)
- CURRENT_SC->SCp.phase &= ~spiordy;
+ aha152x_scsi_pointer(CURRENT_SC)->phase &=
+ ~spiordy;
}
/*
SETPORT(DMACNTRL0, 0);
SETPORT(SXFRCTL0, CH1|SPIOEN);
if(CURRENT_SC)
- CURRENT_SC->SCp.phase |= spiordy;
+ aha152x_scsi_pointer(CURRENT_SC)->phase |=
+ spiordy;
}
/*
*/
static void show_command(struct scsi_cmnd *ptr)
{
+ const int phase = aha152x_scsi_pointer(ptr)->phase;
+
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
"request_bufflen=%d; resid=%d; "
"phase |%s%s%s%s%s%s%s%s%s; next=0x%p",
scsi_bufflen(ptr), scsi_get_resid(ptr),
- (ptr->SCp.phase & not_issued) ? "not issued|" : "",
- (ptr->SCp.phase & selecting) ? "selecting|" : "",
- (ptr->SCp.phase & identified) ? "identified|" : "",
- (ptr->SCp.phase & disconnected) ? "disconnected|" : "",
- (ptr->SCp.phase & completed) ? "completed|" : "",
- (ptr->SCp.phase & spiordy) ? "spiordy|" : "",
- (ptr->SCp.phase & syncneg) ? "syncneg|" : "",
- (ptr->SCp.phase & aborted) ? "aborted|" : "",
- (ptr->SCp.phase & resetted) ? "resetted|" : "",
- (SCDATA(ptr)) ? SCNEXT(ptr) : NULL);
+ phase & not_issued ? "not issued|" : "",
+ phase & selecting ? "selecting|" : "",
+ phase & identified ? "identified|" : "",
+ phase & disconnected ? "disconnected|" : "",
+ phase & completed ? "completed|" : "",
+ phase & spiordy ? "spiordy|" : "",
+ phase & syncneg ? "syncneg|" : "",
+ phase & aborted ? "aborted|" : "",
+ phase & resetted ? "resetted|" : "",
+ SCDATA(ptr) ? SCNEXT(ptr) : NULL);
}
/*
static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
+ struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr);
+ const int phase = scsi_pointer->phase;
int i;
seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
seq_printf(m, "0x%02x ", ptr->cmnd[i]);
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
- scsi_get_resid(ptr), ptr->SCp.this_residual,
- sg_nents(ptr->SCp.buffer) - 1);
+ scsi_get_resid(ptr), scsi_pointer->this_residual,
+ sg_nents(scsi_pointer->buffer) - 1);
- if (ptr->SCp.phase & not_issued)
+ if (phase & not_issued)
seq_puts(m, "not issued|");
- if (ptr->SCp.phase & selecting)
+ if (phase & selecting)
seq_puts(m, "selecting|");
- if (ptr->SCp.phase & disconnected)
+ if (phase & disconnected)
seq_puts(m, "disconnected|");
- if (ptr->SCp.phase & aborted)
+ if (phase & aborted)
seq_puts(m, "aborted|");
- if (ptr->SCp.phase & identified)
+ if (phase & identified)
seq_puts(m, "identified|");
- if (ptr->SCp.phase & completed)
+ if (phase & completed)
seq_puts(m, "completed|");
- if (ptr->SCp.phase & spiordy)
+ if (phase & spiordy)
seq_puts(m, "spiordy|");
- if (ptr->SCp.phase & syncneg)
+ if (phase & syncneg)
seq_puts(m, "syncneg|");
seq_printf(m, "; next=0x%p\n", SCNEXT(ptr));
}
.sg_tablesize = SG_ALL,
.dma_boundary = PAGE_SIZE - 1,
.slave_alloc = aha152x_adjust_queue,
+ .cmd_size = sizeof(struct aha152x_cmd_priv),
};
#if !defined(AHA152X_PCMCIA)
setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1;
setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
- if (ints[0] > 8) { /*}*/
+ if (ints[0] > 8)
printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
"[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
- } else {
+ else
setup_count++;
- return 0;
- }
return 1;
}
static int aha1542_test_port(struct Scsi_Host *sh)
{
- u8 inquiry_result[4];
int i;
/* Quick and dirty test for presence of the card. */
for (i = 0; i < 4; i++) {
if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0))
return 0;
- inquiry_result[i] = inb(DATA(sh->io_port));
+ (void)inb(DATA(sh->io_port));
}
/* Reading port should reset DF */
#include <asm/dma.h>
#include <asm/io.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "aha1740.h"
/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
/*
* Decend the tree of scopes and insert/emit
* patches as appropriate. We perform a depth first
- * tranversal, recursively handling each scope.
+ * traversal, recursively handling each scope.
*/
/* start at the root scope */
dump_scope(SLIST_FIRST(&scope_stack));
/* ---------- TMFs ---------- */
int asd_abort_task(struct sas_task *);
int asd_abort_task_set(struct domain_device *, u8 *lun);
-int asd_clear_aca(struct domain_device *, u8 *lun);
int asd_clear_task_set(struct domain_device *, u8 *lun);
int asd_lu_reset(struct domain_device *, u8 *lun);
int asd_I_T_nexus_reset(struct domain_device *dev);
.lldd_abort_task = asd_abort_task,
.lldd_abort_task_set = asd_abort_task_set,
- .lldd_clear_aca = asd_clear_aca,
.lldd_clear_task_set = asd_clear_task_set,
.lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
.lldd_lu_reset = asd_lu_reset,
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
struct completion *completion = ascb->completion;
struct sas_task *t = task;
struct asd_ascb *ascb = NULL, *a;
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
- unsigned long flags;
res = asd_can_queue(asd_ha, 1);
if (res)
}
if (res)
goto out_err_unmap;
-
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&t->task_state_lock, flags);
}
list_del_init(&alist);
if (a == b)
break;
t = a->uldd_task;
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&t->task_state_lock, flags);
switch (t->task_proto) {
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
fh = edb->vaddr + 16;
ru = edb->vaddr + 16 + sizeof(*fh);
res = ru->status;
- if (ru->datapres == 1) /* Response data present */
+ if (ru->datapres == SAS_DATAPRES_RESPONSE_DATA)
res = ru->resp_data[3];
#if 0
ascb->tag = fh->tag;
return res;
}
-int asd_clear_aca(struct domain_device *dev, u8 *lun)
-{
- int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
-
- if (res == TMF_RESP_FUNC_COMPLETE)
- asd_clear_nexus_I_T_L(dev, lun);
- return res;
-}
-
int asd_clear_task_set(struct domain_device *dev, u8 *lun)
{
int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_spi.h>
#include "acornscsi.h"
#include "msgqueue.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
*/
host->scsi.phase = PHASE_CONNECTING;
host->SCpnt = SCpnt;
- host->scsi.SCp = SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(SCpnt);
host->dma.xfer_setup = 0;
host->dma.xfer_required = 0;
host->dma.xfer_done = 0;
static
void acornscsi_message(AS_Host *host)
{
+ struct scsi_pointer *scsi_pointer;
unsigned char message[16];
unsigned int msgidx = 0, msglen = 1;
* the saved data pointer for the current I/O process.
*/
acornscsi_dma_cleanup(host);
- host->SCpnt->SCp = host->scsi.SCp;
- host->SCpnt->SCp.sent_command = 0;
+ scsi_pointer = arm_scsi_pointer(host->SCpnt);
+ *scsi_pointer = host->scsi.SCp;
+ scsi_pointer->sent_command = 0;
host->scsi.phase = PHASE_MSGIN;
break;
* the present command and status areas.'
*/
acornscsi_dma_cleanup(host);
- host->scsi.SCp = host->SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(host->SCpnt);
host->scsi.phase = PHASE_MSGIN;
break;
/*
* Restore data pointer from SAVED pointers.
*/
- host->scsi.SCp = host->SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(host->SCpnt);
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
printk(", data pointers: [%p, %X]",
host->scsi.SCp.ptr, host->scsi.SCp.this_residual);
*/
static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
void (*done)(struct scsi_cmnd *) = scsi_done;
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
SCpnt->host_scribble = NULL;
SCpnt->result = 0;
- SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
- SCpnt->SCp.sent_command = 0;
- SCpnt->SCp.scsi_xferred = 0;
+ scsi_pointer->phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->scsi_xferred = 0;
init_SCp(SCpnt);
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.proc_name = "acornscsi",
+ .cmd_size = sizeof(struct arm_cmd_priv),
};
static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2002 Russell King
+ *
+ * Commonly used functions by the ARM SCSI-II drivers.
+ */
+
+#include <linux/scatterlist.h>
+
+#define BELT_AND_BRACES
+
+struct arm_cmd_priv {
+ struct scsi_pointer scsi_pointer;
+};
+
+static inline struct scsi_pointer *arm_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ struct arm_cmd_priv *acmd = scsi_cmd_priv(cmd);
+
+ return &acmd->scsi_pointer;
+}
+
+/*
+ * The scatter-gather list handling. This contains all
+ * the yucky stuff that needs to be fixed properly.
+ */
+
+/*
+ * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
+ * entries of uninitialized memory. SCp is from scsi-ml and has a valid
+ * (possibly chained) sg-list
+ */
+static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max)
+{
+ int bufs = SCp->buffers_residual;
+
+ /* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg().
+ * and to remove this BUG_ON. Use min() in-its-place
+ */
+ BUG_ON(bufs + 1 > max);
+
+ sg_set_buf(sg, SCp->ptr, SCp->this_residual);
+
+ if (bufs) {
+ struct scatterlist *src_sg;
+ unsigned i;
+
+ for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i)
+ *(++sg) = *src_sg;
+ sg_mark_end(sg);
+ }
+
+ return bufs + 1;
+}
+
+static inline int next_SCp(struct scsi_pointer *SCp)
+{
+ int ret = SCp->buffers_residual;
+ if (ret) {
+ SCp->buffer = sg_next(SCp->buffer);
+ SCp->buffers_residual--;
+ SCp->ptr = sg_virt(SCp->buffer);
+ SCp->this_residual = SCp->buffer->length;
+ } else {
+ SCp->ptr = NULL;
+ SCp->this_residual = 0;
+ }
+ return ret;
+}
+
+static inline unsigned char get_next_SCp_byte(struct scsi_pointer *SCp)
+{
+ char c = *SCp->ptr;
+
+ SCp->ptr += 1;
+ SCp->this_residual -= 1;
+
+ return c;
+}
+
+static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c)
+{
+ *SCp->ptr = c;
+ SCp->ptr += 1;
+ SCp->this_residual -= 1;
+}
+
+static inline void init_SCp(struct scsi_cmnd *SCpnt)
+{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
+ memset(scsi_pointer, 0, sizeof(struct scsi_pointer));
+
+ if (scsi_bufflen(SCpnt)) {
+ unsigned long len = 0;
+
+ scsi_pointer->buffer = scsi_sglist(SCpnt);
+ scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->phase = scsi_bufflen(SCpnt);
+
+#ifdef BELT_AND_BRACES
+ { /*
+ * Calculate correct buffer length. Some commands
+ * come in with the wrong scsi_bufflen.
+ */
+ struct scatterlist *sg;
+ unsigned i, sg_count = scsi_sg_count(SCpnt);
+
+ scsi_for_each_sg(SCpnt, sg, sg_count, i)
+ len += sg->length;
+
+ if (scsi_bufflen(SCpnt) != len) {
+ printk(KERN_WARNING
+ "scsi%d.%c: bad request buffer "
+ "length %d, should be %ld\n",
+ SCpnt->device->host->host_no,
+ '0' + SCpnt->device->id,
+ scsi_bufflen(SCpnt), len);
+ /*
+ * FIXME: Totaly naive fixup. We should abort
+ * with error
+ */
+ scsi_pointer->phase =
+ min_t(unsigned long, len,
+ scsi_bufflen(SCpnt));
+ }
+ }
+#endif
+ } else {
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->phase = 0;
+ }
+}
#include <asm/io.h>
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
struct arxescsi_info {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.proc_name = "CumanaSCSI-1",
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
.dma_boundary = PAGE_SIZE - 1,
};
#include <asm/ecard.h>
#include <asm/io.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
#include <asm/dma.h>
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
#include <asm/irq.h>
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
/* NOTE: SCSI2 Synchronous transfers *require* DMA according to
* the data sheet. This restriction is crazy, especially when
fas216_log(info, LOG_ERROR, "null buffer passed to "
"fas216_starttransfer");
print_SCp(&info->scsi.SCp, "SCp: ", "\n");
- print_SCp(&info->SCpnt->SCp, "Cmnd SCp: ", "\n");
+ print_SCp(arm_scsi_pointer(info->SCpnt), "Cmnd SCp: ", "\n");
return;
}
/*
* Restore data pointer from SAVED data pointer
*/
- info->scsi.SCp = info->SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(info->SCpnt);
fas216_log(info, LOG_CONNECT, "data pointers: [%p, %X]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int msglen)
{
+ struct scsi_pointer *scsi_pointer;
int i;
switch (message[0]) {
* as required by the SCSI II standard. These always
* point to the start of their respective areas.
*/
- info->SCpnt->SCp = info->scsi.SCp;
- info->SCpnt->SCp.sent_command = 0;
+ scsi_pointer = arm_scsi_pointer(info->SCpnt);
+ *scsi_pointer = info->scsi.SCp;
+ scsi_pointer->sent_command = 0;
fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
"save data pointers: [%p, %X]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
/*
* Restore current data pointer from SAVED data pointer
*/
- info->scsi.SCp = info->SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(info->SCpnt);
fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
"restore data pointers: [%p, 0x%x]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
* claim host busy
*/
info->scsi.phase = PHASE_SELECTION;
- info->scsi.SCp = SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(SCpnt);
info->SCpnt = SCpnt;
info->dma.transfer_type = fasdma_none;
* claim host busy
*/
info->scsi.phase = PHASE_SELECTION;
- info->scsi.SCp = SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(SCpnt);
info->SCpnt = SCpnt;
info->dma.transfer_type = fasdma_none;
static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
unsigned int result)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
"request sense complete, result=0x%04x%02x%02x",
- result, SCpnt->SCp.Message, SCpnt->SCp.Status);
+ result, scsi_pointer->Message, scsi_pointer->Status);
- if (result != DID_OK || SCpnt->SCp.Status != SAM_STAT_GOOD)
+ if (result != DID_OK || scsi_pointer->Status != SAM_STAT_GOOD)
/*
* Something went wrong. Make sure that we don't
* have valid data in the sense buffer that could
static void
fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
info->stats.fins += 1;
set_host_byte(SCpnt, result);
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
"requesting sense");
init_SCp(SCpnt);
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.Status = 0;
+ scsi_pointer->Message = 0;
+ scsi_pointer->Status = 0;
SCpnt->host_scribble = (void *)fas216_rq_sns_done;
/*
/* driver-private data per SCSI command. */
struct fas216_cmd_priv {
+ /*
+ * @scsi_pointer must be the first member. See also arm_scsi_pointer().
+ */
+ struct scsi_pointer scsi_pointer;
void (*scsi_done)(struct scsi_cmnd *cmd);
};
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.proc_name = "oakscsi",
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
#include <asm/ecard.h>
#include <asm/io.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
#include <linux/list.h>
#include <linux/init.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#define DEBUG
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/acorn/scsi/scsi.h
- *
- * Copyright (C) 2002 Russell King
- *
- * Commonly used scsi driver functions.
- */
-
-#include <linux/scatterlist.h>
-
-#define BELT_AND_BRACES
-
-/*
- * The scatter-gather list handling. This contains all
- * the yucky stuff that needs to be fixed properly.
- */
-
-/*
- * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
- * entries of uninitialized memory. SCp is from scsi-ml and has a valid
- * (possibly chained) sg-list
- */
-static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max)
-{
- int bufs = SCp->buffers_residual;
-
- /* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg().
- * and to remove this BUG_ON. Use min() in-its-place
- */
- BUG_ON(bufs + 1 > max);
-
- sg_set_buf(sg, SCp->ptr, SCp->this_residual);
-
- if (bufs) {
- struct scatterlist *src_sg;
- unsigned i;
-
- for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i)
- *(++sg) = *src_sg;
- sg_mark_end(sg);
- }
-
- return bufs + 1;
-}
-
-static inline int next_SCp(struct scsi_pointer *SCp)
-{
- int ret = SCp->buffers_residual;
- if (ret) {
- SCp->buffer = sg_next(SCp->buffer);
- SCp->buffers_residual--;
- SCp->ptr = sg_virt(SCp->buffer);
- SCp->this_residual = SCp->buffer->length;
- } else {
- SCp->ptr = NULL;
- SCp->this_residual = 0;
- }
- return ret;
-}
-
-static inline unsigned char get_next_SCp_byte(struct scsi_pointer *SCp)
-{
- char c = *SCp->ptr;
-
- SCp->ptr += 1;
- SCp->this_residual -= 1;
-
- return c;
-}
-
-static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c)
-{
- *SCp->ptr = c;
- SCp->ptr += 1;
- SCp->this_residual -= 1;
-}
-
-static inline void init_SCp(struct scsi_cmnd *SCpnt)
-{
- memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
-
- if (scsi_bufflen(SCpnt)) {
- unsigned long len = 0;
-
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
- SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.phase = scsi_bufflen(SCpnt);
-
-#ifdef BELT_AND_BRACES
- { /*
- * Calculate correct buffer length. Some commands
- * come in with the wrong scsi_bufflen.
- */
- struct scatterlist *sg;
- unsigned i, sg_count = scsi_sg_count(SCpnt);
-
- scsi_for_each_sg(SCpnt, sg, sg_count, i)
- len += sg->length;
-
- if (scsi_bufflen(SCpnt) != len) {
- printk(KERN_WARNING
- "scsi%d.%c: bad request buffer "
- "length %d, should be %ld\n",
- SCpnt->device->host->host_no,
- '0' + SCpnt->device->id,
- scsi_bufflen(SCpnt), len);
- /*
- * FIXME: Totaly naive fixup. We should abort
- * with error
- */
- SCpnt->SCp.phase =
- min_t(unsigned long, len,
- scsi_bufflen(SCpnt));
- }
- }
-#endif
- } else {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
- SCpnt->SCp.phase = 0;
- }
-}
static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int wanted_len = cmd->SCp.this_residual;
+ int wanted_len = NCR5380_to_ncmd(cmd)->this_residual;
int possible_len, limit;
if (wanted_len < DMA_MIN_SIZE)
}
/* Last step: apply the hard limit on DMA transfers */
- limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ?
+ limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(NCR5380_to_ncmd(cmd)->ptr))) ?
STRAM_BUFFER_SIZE : 255*512;
if (possible_len > limit)
possible_len = limit;
.this_id = 7,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int __init atari_scsi_probe(struct platform_device *pdev)
static int beiscsi_eh_abort(struct scsi_cmnd *sc)
{
- struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
+ struct iscsi_task *abrt_task = iscsi_cmd(sc)->task;
struct iscsi_cls_session *cls_session;
struct beiscsi_io_task *abrt_io_task;
struct beiscsi_conn *beiscsi_conn;
.cmd_per_lun = BEISCSI_CMD_PER_LUN,
.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct scsi_transport_template *beiscsi_scsi_transport;
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
- return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
+ return sysfs_emit(buf, "%s\n", serial_num);
}
static ssize_t
char model[BFA_ADAPTER_MODEL_NAME_LEN];
bfa_get_adapter_model(&bfad->bfa, model);
- return snprintf(buf, PAGE_SIZE, "%s\n", model);
+ return sysfs_emit(buf, "%s\n", model);
}
static ssize_t
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
- return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
+ return sysfs_emit(buf, "%s\n", model_descr);
}
static ssize_t
u64 nwwn;
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
- return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
+ return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn));
}
static ssize_t
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
- return snprintf(buf, PAGE_SIZE, "%s\n", symname);
+ return sysfs_emit(buf, "%s\n", symname);
}
static ssize_t
char hw_ver[BFA_VERSION_LEN];
bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
+ return sysfs_emit(buf, "%s\n", hw_ver);
}
static ssize_t
bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
+ return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION);
}
static ssize_t
char optrom_ver[BFA_VERSION_LEN];
bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
+ return sysfs_emit(buf, "%s\n", optrom_ver);
}
static ssize_t
char fw_ver[BFA_VERSION_LEN];
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
+ return sysfs_emit(buf, "%s\n", fw_ver);
}
static ssize_t
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
bfa_get_nports(&bfad->bfa));
}
bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
+ return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME);
}
static ssize_t
rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s),
GFP_ATOMIC);
if (rports == NULL)
- return snprintf(buf, PAGE_SIZE, "Failed\n");
+ return sysfs_emit(buf, "Failed\n");
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
kfree(rports);
- return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
+ return sysfs_emit(buf, "%d\n", nrports);
}
static DEVICE_ATTR(serial_number, S_IRUGO,
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
wait_queue_head_t *wq;
- cmnd->SCp.Status |= tsk_status << 1;
- set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
- wq = (wait_queue_head_t *) cmnd->SCp.ptr;
- cmnd->SCp.ptr = NULL;
+ bfad_priv(cmnd)->status |= tsk_status << 1;
+ set_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status);
+ wq = bfad_priv(cmnd)->wq;
+ bfad_priv(cmnd)->wq = NULL;
if (wq)
wake_up(wq);
* happens.
*/
cmnd->host_scribble = NULL;
- cmnd->SCp.Status = 0;
+ bfad_priv(cmnd)->status = 0;
bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
/*
* bfa_itnim can be NULL if the port gets disconnected and the bfa
* if happens.
*/
cmnd->host_scribble = NULL;
- cmnd->SCp.ptr = (char *)&wq;
- cmnd->SCp.Status = 0;
+ bfad_priv(cmnd)->wq = &wq;
+ bfad_priv(cmnd)->status = 0;
bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
/*
* bfa_itnim can be NULL if the port gets disconnected and the bfa
FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- wait_event(wq, test_bit(IO_DONE_BIT,
- (unsigned long *)&cmnd->SCp.Status));
+ wait_event(wq, test_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status));
- task_status = cmnd->SCp.Status >> 1;
+ task_status = bfad_priv(cmnd)->status >> 1;
if (task_status != BFI_TSKIM_STS_OK) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"LUN reset failure, status: %d\n", task_status);
spin_lock_irqsave(&bfad->bfad_lock, flags);
itnim = bfad_get_itnim(im_port, starget->id);
if (itnim) {
- cmnd->SCp.ptr = (char *)&wq;
+ bfad_priv(cmnd)->wq = &wq;
rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
if (rc == BFA_STATUS_OK) {
/* wait target reset to complete */
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
wait_event(wq, test_bit(IO_DONE_BIT,
- (unsigned long *)&cmnd->SCp.Status));
+ &bfad_priv(cmnd)->status));
spin_lock_irqsave(&bfad->bfad_lock, flags);
- task_status = cmnd->SCp.Status >> 1;
+ task_status = bfad_priv(cmnd)->status >> 1;
if (task_status != BFI_TSKIM_STS_OK)
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"target reset failure,"
.name = BFAD_DRIVER_NAME,
.info = bfad_im_info,
.queuecommand = bfad_im_queuecommand,
+ .cmd_size = sizeof(struct bfad_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bfad_im_abort_handler,
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.name = BFAD_DRIVER_NAME,
.info = bfad_im_info,
.queuecommand = bfad_im_queuecommand,
+ .cmd_size = sizeof(struct bfad_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bfad_im_abort_handler,
.eh_device_reset_handler = bfad_im_reset_lun_handler,
*/
#define IO_DONE_BIT 0
+/**
+ * struct bfad_cmd_priv - private data per SCSI command.
+ * @status: Lowest bit represents IO_DONE. The next seven bits hold a value of
+ * type enum bfi_tskim_status.
+ * @wq: Wait queue used to wait for completion of an operation.
+ */
+struct bfad_cmd_priv {
+ unsigned long status;
+ wait_queue_head_t *wq;
+};
+
+static inline struct bfad_cmd_priv *bfad_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
struct bfad_itnim_data_s {
struct bfad_itnim_s *itnim;
};
#define BNX2FC_FW_TIMEOUT (3 * HZ)
#define PORT_MAX 2
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
-
/* FC FCP Status */
#define FC_GOOD 0
struct work_struct unsol_els_work;
};
+struct bnx2fc_priv {
+ struct bnx2fc_cmd *io_req;
+};
+static inline struct bnx2fc_priv *bnx2fc_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
bg = &bnx2fc_global;
skb_queue_head_init(&bg->fcoe_rx_list);
- l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
- (void *)bg,
- "bnx2fc_l2_thread");
+ l2_thread = kthread_run(bnx2fc_l2_rcv_thread,
+ (void *)bg,
+ "bnx2fc_l2_thread");
if (IS_ERR(l2_thread)) {
rc = PTR_ERR(l2_thread);
goto free_wq;
}
- wake_up_process(l2_thread);
spin_lock_bh(&bg->fcoe_rx_list.lock);
bg->kthread = l2_thread;
spin_unlock_bh(&bg->fcoe_rx_list.lock);
.track_queue_depth = 1,
.slave_configure = bnx2fc_slave_configure,
.shost_groups = bnx2fc_host_groups,
+ .cmd_size = sizeof(struct bnx2fc_priv),
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
sc_cmd->allowed);
scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
- sc_cmd->SCp.ptr = NULL;
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
}
task = &(task_page[index]);
bnx2fc_init_mp_task(io_req, task);
- sc_cmd->SCp.ptr = (char *)io_req;
+ bnx2fc_priv(sc_cmd)->io_req = io_req;
/* Obtain free SQ entry */
spin_lock_bh(&tgt->tgt_lock);
BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
spin_lock_bh(&tgt->tgt_lock);
- io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+ io_req = bnx2fc_priv(sc_cmd)->io_req;
if (!io_req) {
/* Command might have just completed */
printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
fc_hdr->fh_r_ctl);
}
- if (!sc_cmd->SCp.ptr) {
- printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
+ if (!bnx2fc_priv(sc_cmd)->io_req) {
+ printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
return;
}
switch (io_req->fcp_status) {
return;
}
- sc_cmd->SCp.ptr = NULL;
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
io_req->fcp_resid = fcp_rsp->fcp_resid;
io_req->scsi_comp_flags = rsp_flags;
- CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
- fcp_rsp->scsi_status_code;
+ io_req->cdb_status = fcp_rsp->scsi_status_code;
/* Fetch fcp_rsp_info and fcp_sns_info if available */
if (num_rq) {
/* parse fcp_rsp and obtain sense data from RQ if available */
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
- if (!sc_cmd->SCp.ptr) {
- printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+ if (!bnx2fc_priv(sc_cmd)->io_req) {
+ printk(KERN_ERR PFX "io_req is NULL\n");
return;
}
io_req->fcp_status);
break;
}
- sc_cmd->SCp.ptr = NULL;
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
io_req->port = port;
io_req->tgt = tgt;
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
- sc_cmd->SCp.ptr = (char *)io_req;
+ bnx2fc_priv(sc_cmd)->io_req = io_req;
stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
}
/*
- * Offlaod process is protected with hba mutex.
+ * Offload process is protected with hba mutex.
* Use the same mutex_lock for upload process too
*/
mutex_lock(&hba->hba_mutex);
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
.shost_groups = bnx2i_dev_groups,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
struct iscsi_transport bnx2i_iscsi_transport = {
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
/* Check for Task Management */
- if (likely(scmnd->SCp.Message == 0)) {
+ if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) {
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
fcp_cmnd->fc_tm_flags = 0;
fcp_cmnd->fc_cmdref = 0;
} else {
memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
- fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
+ fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags;
}
}
/* Needed during abort */
cmnd->host_scribble = (unsigned char *)ioreq;
- cmnd->SCp.Message = 0;
+ csio_priv(cmnd)->fc_tm_flags = 0;
/* Kick off SCSI IO SM on the ioreq */
spin_lock_irqsave(&hw->lock, flags);
req, req->wr_status);
/* Cache FW return status */
- cmnd->SCp.Status = req->wr_status;
+ csio_priv(cmnd)->wr_status = req->wr_status;
/* Special handling based on FCP response */
/* Modify return status if flags indicate success */
if (flags & FCP_RSP_LEN_VAL)
if (rsp_info->rsp_code == FCP_TMF_CMPL)
- cmnd->SCp.Status = FW_SUCCESS;
+ csio_priv(cmnd)->wr_status = FW_SUCCESS;
csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
}
csio_scsi_cmnd(ioreq) = cmnd;
cmnd->host_scribble = (unsigned char *)ioreq;
- cmnd->SCp.Status = 0;
+ csio_priv(cmnd)->wr_status = 0;
- cmnd->SCp.Message = FCP_TMF_LUN_RESET;
+ csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET;
ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
/*
}
/* LUN reset returned, check cached status */
- if (cmnd->SCp.Status != FW_SUCCESS) {
+ if (csio_priv(cmnd)->wr_status != FW_SUCCESS) {
csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
- cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
+ cmnd->device->id, cmnd->device->lun,
+ csio_priv(cmnd)->wr_status);
goto fail;
}
.name = CSIO_DRV_DESC,
.proc_name = KBUILD_MODNAME,
.queuecommand = csio_queuecommand,
+ .cmd_size = sizeof(struct csio_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
uint64_t oslun;
};
+struct csio_cmd_priv {
+ uint8_t fc_tm_flags; /* task management flags */
+ uint16_t wr_status;
+};
+
+static inline struct csio_cmd_priv *csio_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline struct csio_ioreq *
csio_get_scsi_ioreq(struct csio_scsim *scm)
{
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport cxgb3i_iscsi_transport = {
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport cxgb4i_iscsi_transport = {
struct device *dev = &cfg->dev->dev;
const u32 s = ilog2(sdev->sector_size) - 9;
const u32 to = sdev->request_queue->rq_timeout;
- const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
- REQ_OP_WRITE_SAME) >> s;
+ const u32 ws_limit =
+ sdev->request_queue->limits.max_write_zeroes_sectors >> s;
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
/* Here is the info for Doug Gilbert's sg3 ... */
scsi_set_resid(cmd, srb->total_xfer_length);
- /* This may be interpreted by sb. or not ... */
- cmd->SCp.this_residual = srb->total_xfer_length;
- cmd->SCp.buffers_residual = 0;
if (debug_enabled(DBG_KG)) {
if (srb->total_xfer_length)
dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int dmx3191d_probe_one(struct pci_dev *pdev,
sli4->qinfo.count_mask[q]);
}
- /* single, continguous DMA allocations will be called for each queue
+ /* single, contiguous DMA allocations will be called for each queue
* of size (max_qentries * queue entry size); since these can be large,
* check against the OS max DMA allocation size
*/
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
.skip_settle_delay = 1,
+ .cmd_size = sizeof(struct esp_cmd_priv),
};
EXPORT_SYMBOL(scsi_esp_template);
static int __init esp_init(void)
{
- BUILD_BUG_ON(sizeof(struct scsi_pointer) <
- sizeof(struct esp_cmd_priv));
-
esp_transport_template = spi_attach_transport(&esp_transport_ops);
if (!esp_transport_template)
return -ENODEV;
struct scatterlist *cur_sg;
int tot_residue;
};
-#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
+
+#define ESP_CMD_PRIV(cmd) ((struct esp_cmd_priv *)scsi_cmd_priv(cmd))
/* NOTE: this enum is ordered based on chip features! */
enum esp_rev {
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct libfc_cmd_priv),
};
/**
struct work_struct work;
};
+static struct scsi_pointer *fdomain_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline void fdomain_make_bus_idle(struct fdomain *fd)
{
outb(0, fd->base + REG_BCTL);
struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host,
hostdata);
struct scsi_cmnd *cmd = fd->cur_cmd;
+ struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd);
unsigned long flags;
int status;
int done = 0;
spin_lock_irqsave(sh->host_lock, flags);
- if (cmd->SCp.phase & in_arbitration) {
+ if (scsi_pointer->phase & in_arbitration) {
status = inb(fd->base + REG_ASTAT);
if (!(status & ASTAT_ARB)) {
set_host_byte(cmd, DID_BUS_BUSY);
fdomain_finish_cmd(fd);
goto out;
}
- cmd->SCp.phase = in_selection;
+ scsi_pointer->phase = in_selection;
outb(ICTL_SEL | FIFO_COUNT, fd->base + REG_ICTL);
outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL);
/* Stop arbitration and enable parity */
outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
goto out;
- } else if (cmd->SCp.phase & in_selection) {
+ } else if (scsi_pointer->phase & in_selection) {
status = inb(fd->base + REG_BSTAT);
if (!(status & BSTAT_BSY)) {
/* Try again, for slow devices */
/* Stop arbitration and enable parity */
outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
}
- cmd->SCp.phase = in_other;
+ scsi_pointer->phase = in_other;
outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL);
outb(BCTL_BUSEN, fd->base + REG_BCTL);
goto out;
}
- /* cur_cmd->SCp.phase == in_other: this is the body of the routine */
+ /* fdomain_scsi_pointer(cur_cmd)->phase == in_other: this is the body of the routine */
status = inb(fd->base + REG_BSTAT);
if (status & BSTAT_REQ) {
switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) {
case BSTAT_CMD: /* COMMAND OUT */
- outb(cmd->cmnd[cmd->SCp.sent_command++],
+ outb(cmd->cmnd[scsi_pointer->sent_command++],
fd->base + REG_SCSI_DATA);
break;
case 0: /* DATA OUT -- tmc18c50/tmc18c30 only */
- if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
- cmd->SCp.have_data_in = -1;
+ if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) {
+ scsi_pointer->have_data_in = -1;
outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
PARITY_MASK, fd->base + REG_ACTL);
}
break;
case BSTAT_IO: /* DATA IN -- tmc18c50/tmc18c30 only */
- if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
- cmd->SCp.have_data_in = 1;
+ if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) {
+ scsi_pointer->have_data_in = 1;
outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
fd->base + REG_ACTL);
}
break;
case BSTAT_CMD | BSTAT_IO: /* STATUS IN */
- cmd->SCp.Status = inb(fd->base + REG_SCSI_DATA);
+ scsi_pointer->Status = inb(fd->base + REG_SCSI_DATA);
break;
case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */
outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA);
break;
case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
- cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
- if (cmd->SCp.Message == COMMAND_COMPLETE)
+ scsi_pointer->Message = inb(fd->base + REG_SCSI_DATA);
+ if (scsi_pointer->Message == COMMAND_COMPLETE)
++done;
break;
}
}
- if (fd->chip == tmc1800 && !cmd->SCp.have_data_in &&
- cmd->SCp.sent_command >= cmd->cmd_len) {
+ if (fd->chip == tmc1800 && !scsi_pointer->have_data_in &&
+ scsi_pointer->sent_command >= cmd->cmd_len) {
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
- cmd->SCp.have_data_in = -1;
+ scsi_pointer->have_data_in = -1;
outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
PARITY_MASK, fd->base + REG_ACTL);
} else {
- cmd->SCp.have_data_in = 1;
+ scsi_pointer->have_data_in = 1;
outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
fd->base + REG_ACTL);
}
}
- if (cmd->SCp.have_data_in == -1) /* DATA OUT */
+ if (scsi_pointer->have_data_in == -1) /* DATA OUT */
fdomain_write_data(cmd);
- if (cmd->SCp.have_data_in == 1) /* DATA IN */
+ if (scsi_pointer->have_data_in == 1) /* DATA IN */
fdomain_read_data(cmd);
if (done) {
- set_status_byte(cmd, cmd->SCp.Status);
+ set_status_byte(cmd, scsi_pointer->Status);
set_host_byte(cmd, DID_OK);
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
fdomain_finish_cmd(fd);
} else {
- if (cmd->SCp.phase & disconnect) {
+ if (scsi_pointer->phase & disconnect) {
outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
fd->base + REG_ICTL);
outb(0, fd->base + REG_BCTL);
static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd);
struct fdomain *fd = shost_priv(cmd->device->host);
unsigned long flags;
- cmd->SCp.Status = 0;
- cmd->SCp.Message = 0;
- cmd->SCp.have_data_in = 0;
- cmd->SCp.sent_command = 0;
- cmd->SCp.phase = in_arbitration;
+ scsi_pointer->Status = 0;
+ scsi_pointer->Message = 0;
+ scsi_pointer->have_data_in = 0;
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->phase = in_arbitration;
scsi_set_resid(cmd, scsi_bufflen(cmd));
spin_lock_irqsave(sh->host_lock, flags);
spin_lock_irqsave(sh->host_lock, flags);
fdomain_make_bus_idle(fd);
- fd->cur_cmd->SCp.phase |= aborted;
+ fdomain_scsi_pointer(fd->cur_cmd)->phase |= aborted;
/* Aborts are not done well. . . */
set_host_byte(fd->cur_cmd, DID_ABORT);
.this_id = 7,
.sg_tablesize = 64,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
struct Scsi_Host *fdomain_create(int base, int irq, int this_id,
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
/*
- * Usage of the scsi_cmnd scratchpad.
+ * fnic private data per SCSI command.
* These fields are locked by the hashed io_req_lock.
*/
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
-#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
-#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
-#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
-#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
+struct fnic_cmd_priv {
+ struct fnic_io_req *io_req;
+ enum fnic_ioreq_state state;
+ u32 flags;
+ u16 abts_status;
+ u16 lr_status;
+};
+
+static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
+static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
+{
+ struct fnic_cmd_priv *fcmd = fnic_priv(cmd);
+
+ return ((u64)fcmd->flags << 32) | fcmd->state;
+}
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
.max_sectors = 0xffff,
.shost_groups = fnic_host_groups,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct fnic_cmd_priv),
};
static void
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
- CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
- CMD_FLAGS(sc) = FNIC_NO_FLAGS;
+ fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
+ fnic_priv(sc)->flags = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc));
+ tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
- CMD_SP(sc) = (char *)io_req;
- CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
+ fnic_priv(sc)->io_req = io_req;
+ fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
/* create copy wq desc and enqueue it */
wq = &fnic->wq_copy[0];
* refetch the pointer under the lock.
*/
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, 0, 0, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- CMD_SP(sc) = NULL;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ tag, sc, 0, 0, 0, fnic_flags_and_state(sc));
+ io_req = fnic_priv(sc)->io_req;
+ fnic_priv(sc)->io_req = NULL;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
fnic_release_ioreq_buf(fnic, io_req, sc);
atomic64_read(&fnic_stats->io_stats.active_ios));
/* REVISIT: Use per IO lock in the final code */
- CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
}
out:
cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
sc->cmnd[5]);
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, io_req, sg_count, cmd_trace,
- (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ tag, sc, io_req, sg_count, cmd_trace,
+ fnic_flags_and_state(sc));
/* if only we issued IO, will we have the io lock */
if (io_lock_acquired)
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
- CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl io_req is null - "
* if SCSI-ML has already issued abort on this command,
* set completion of the IO. The abts path will clean it up
*/
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
/*
* set the FNIC_IO_DONE so that this doesn't get
* flagged as 'out of order' if it was not aborted
*/
- CMD_FLAGS(sc) |= FNIC_IO_DONE;
- CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_IO_DONE;
+ fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
spin_unlock_irqrestore(io_lock, flags);
if(FCPIO_ABORTED == hdr_status)
- CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
}
/* Mark the IO as complete */
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
icmnd_cmpl = &desc->u.icmnd_cmpl;
}
/* Break link with the SCSI command */
- CMD_SP(sc) = NULL;
- CMD_FLAGS(sc) |= FNIC_IO_DONE;
+ fnic_priv(sc)->io_req = NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_DONE;
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
((u64)icmnd_cmpl->_resvd0[1] << 56 |
(u64)icmnd_cmpl->_resvd0[0] << 48 |
jiffies_to_msecs(jiffies - start_time)),
- desc, cmd_trace,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, cmd_trace, fnic_flags_and_state(sc));
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset abts cmpl recd. id %x status %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
- CMD_ABTS_STATUS(sc) = hdr_status;
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
+ fnic_priv(sc)->abts_status = hdr_status;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
if (io_req->abts_done)
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
case FCPIO_SUCCESS:
break;
case FCPIO_TIMEOUT:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_fw_timeouts);
else
atomic64_inc(
(int)(id & FNIC_TAG_MASK));
break;
case FCPIO_IO_NOT_FOUND:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_io_not_found);
else
atomic64_inc(
&term_stats->terminate_io_not_found);
break;
default:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_failures);
else
atomic64_inc(
&term_stats->terminate_failures);
break;
}
- if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
spin_unlock_irqrestore(io_lock, flags);
return;
}
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
- CMD_ABTS_STATUS(sc) = hdr_status;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
+ fnic_priv(sc)->abts_status = hdr_status;
/* If the status is IO not found consider it as success */
if (hdr_status == FCPIO_IO_NOT_FOUND)
- CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
+ fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
- if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
+ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
} else {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl, completing IO\n");
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
sc->result = (DID_ERROR << 16);
spin_unlock_irqrestore(io_lock, flags);
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) |
- CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
scsi_done(sc);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
- CMD_LR_STATUS(sc) = hdr_status;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ fnic_priv(sc)->lr_status = hdr_status;
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
- desc, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, 0, fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Terminate pending "
"dev reset cmpl recd. id %d status %s\n",
fnic_fcpio_status_to_str(hdr_status));
return;
}
- if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
/* Need to wait for terminate completion */
spin_unlock_irqrestore(io_lock, flags);
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
- desc, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, 0, fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd after time out. "
"id %d status %s\n",
fnic_fcpio_status_to_str(hdr_status));
return;
}
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
} else {
shost_printk(KERN_ERR, fnic->lport->host,
"Unexpected itmf io state %s tag %x\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state), id);
spin_unlock_irqrestore(io_lock, flags);
}
io_lock = fnic_io_lock_tag(fnic, tag);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ io_req = fnic_priv(sc)->io_req;
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
/*
* We will be here only when FW completes reset
* without sending completions for outstanding ios.
*/
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
if (io_req && io_req->dr_done)
complete(io_req->dr_done);
else if (io_req && io_req->abts_done)
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
return true;
- } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
goto cleanup_scsi_cmd;
}
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
atomic64_inc(&fnic_stats->io_stats.io_completions);
/* Complete the command to SCSI */
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
+ if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED))
shost_printk(KERN_ERR, fnic->lport->host,
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
tag, sc);
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
scsi_done(sc);
spin_lock_irqsave(io_lock, flags);
/* Get the IO context which this desc refers to */
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
/* fnic interrupts are turned off by now */
goto wq_copy_cleanup_scsi_cmd;
}
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
scsi_done(sc);
}
io_lock = fnic_io_lock_tag(fnic, abt_tag);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req || io_req->port_id != iter_data->port_id) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
sc);
* Found IO that is still pending with firmware and
* belongs to rport that went away
*/
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_rport_exch_reset: io_req->abts_done is set "
"state is %s\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
}
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
shost_printk(KERN_ERR, fnic->lport->host,
"rport_exch_reset "
"IO not yet issued %p tag 0x%x flags "
"%x state %d\n",
- sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
}
- old_ioreq_state = CMD_STATE(sc);
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ old_ioreq_state = fnic_priv(sc)->state;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag |= FNIC_TAG_DEV_RST;
}
* lun reset
*/
spin_lock_irqsave(io_lock, flags);
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
} else {
spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
else
- CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
atomic64_inc(&term_stats->terminates);
iter_data->term_cnt++;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
- rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+ rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags);
- CMD_FLAGS(sc) = FNIC_NO_FLAGS;
+ fnic_priv(sc)->flags = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
* happened, the completion wont actually complete the command
* and it will be considered as an aborted command
*
- * The CMD_SP will not be cleared except while holding io_req_lock.
+ * .io_req will not be cleared except while holding io_req_lock.
*/
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
io_req->abts_done = &tm_done;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
goto wait_pending;
}
* the completion wont be done till mid-layer, since abort
* has already started.
*/
- old_ioreq_state = CMD_STATE(sc);
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ old_ioreq_state = fnic_priv(sc)->state;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
io_req)) {
spin_lock_irqsave(io_lock, flags);
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
}
if (task_req == FCPIO_ITMF_ABT_TASK) {
- CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
atomic64_inc(&fnic_stats->abts_stats.aborts);
} else {
- CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
atomic64_inc(&fnic_stats->term_stats.terminates);
}
/* Check the abort status */
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
io_req->abts_done = NULL;
/* fw did not complete abort, timed out */
- if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
+ if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
if (task_req == FCPIO_ITMF_ABT_TASK) {
atomic64_inc(&abts_stats->abort_drv_timeouts);
} else {
atomic64_inc(&term_stats->terminate_drv_timeouts);
}
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
/* IO out of order */
- if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issuing Host reset due to out of order IO\n");
goto fnic_abort_cmd_end;
}
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
start_time = io_req->start_time;
/*
* free the io_req if successful. If abort fails,
* Device reset will clean the I/O.
*/
- if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
- CMD_SP(sc) = NULL;
- else {
+ if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) {
+ fnic_priv(sc)->io_req = NULL;
+ } else {
ret = FAILED;
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from abort cmd type %x %s\n", task_req,
io_lock = fnic_io_lock_tag(fnic, abt_tag);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
return true;
*/
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Found IO in %s on lun\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"%s dev rst not pending sc 0x%p\n", __func__,
sc);
if (io_req->abts_done)
shost_printk(KERN_ERR, fnic->lport->host,
"%s: io_req->abts_done is set state is %s\n",
- __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
- old_ioreq_state = CMD_STATE(sc);
+ __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ old_ioreq_state = fnic_priv(sc)->state;
/*
* Any pending IO issued prior to reset is expected to be
* in abts pending state, if not we need to set
* When IO is completed, the IO will be handed over and
* handled in this function.
*/
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
BUG_ON(io_req->abts_done);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
abt_tag |= FNIC_TAG_DEV_RST;
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"%s: dev rst sc 0x%p\n", __func__, sc);
}
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
iter_data->ret = FAILED;
return false;
} else {
spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
}
- CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
wait_for_completion_timeout(&tm_done, msecs_to_jiffies
(fnic->config.ed_tov));
/* Recheck cmd state to check if it is now aborted */
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
return true;
}
io_req->abts_done = NULL;
/* if abort is still pending with fw, fail */
- if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
+ if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
iter_data->ret = FAILED;
return false;
}
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
/* original sc used for lr is handled by dev reset code */
if (sc != iter_data->lr_sc)
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
/* original sc used for lr is handled by dev reset code */
goto fnic_device_reset_end;
}
- CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
/* Allocate tag if not present */
if (unlikely(tag < 0)) {
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
/*
* If there is a io_req attached to this command, then use it,
}
memset(io_req, 0, sizeof(*io_req));
io_req->port_id = rport->port_id;
- CMD_SP(sc) = (char *)io_req;
+ fnic_priv(sc)->io_req = io_req;
}
io_req->dr_done = &tm_done;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
- CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
+ fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
*/
if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
spin_lock_irqsave(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
/*
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
}
io_req->dr_done = NULL;
- status = CMD_LR_STATUS(sc);
+ status = fnic_priv(sc)->lr_status;
/*
* If lun reset not completed, bail out with failed. io_req
atomic64_inc(&reset_stats->device_reset_timeouts);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
spin_unlock_irqrestore(io_lock, flags);
int_to_scsilun(sc->device->lun, &fc_lun);
/*
*/
while (1) {
spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
spin_unlock_irqrestore(io_lock, flags);
break;
}
msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
} else {
spin_lock_irqsave(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
io_req->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
}
while (1) {
spin_lock_irqsave(io_lock, flags);
- if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
spin_unlock_irqrestore(io_lock, flags);
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
break;
} else {
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
io_req->abts_done = NULL;
goto fnic_device_reset_clean;
}
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Device reset completed - failed\n");
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
goto fnic_device_reset_clean;
}
*/
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset failed"
" since could not abort all IOs\n");
/* Clean lun reset command */
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
/* Completed, and successful */
ret = SUCCESS;
fnic_device_reset_clean:
if (io_req)
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
/* free tag if it is allocated */
if (unlikely(tag_gen_flag))
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
return true;
*/
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"Found IO in %s on lun\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
- cmd_state = CMD_STATE(sc);
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ cmd_state = fnic_priv(sc)->state;
spin_unlock_irqrestore(io_lock, flags);
if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
iter_data->ret = 1;
static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int transfersize = cmd->SCp.this_residual;
+ int transfersize = NCR5380_to_ncmd(cmd)->this_residual;
if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
return 0;
/* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */
if (hostdata->board == BOARD_DTC3181E &&
cmd->sc_data_direction == DMA_TO_DEVICE)
- transfersize = min(cmd->SCp.this_residual, 512);
+ transfersize = min(transfersize, 512);
return min(transfersize, DMA_MAX_SIZE);
}
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "gvp11.h"
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct Scsi_Host *instance = cmd->device->host;
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ unsigned long addr = virt_to_bus(scsi_pointer->ptr);
int bank_mask;
static int scsi_alloc_out_of_range = 0;
/* use bounce buffer if the physical address is bad */
if (addr & wh->dma_xfer_mask) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
if (!scsi_alloc_out_of_range) {
wh->dma_bounce_buffer =
if (!dir_in) {
/* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
}
}
if (dir_in) {
/* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
+ cache_clear(addr, scsi_pointer->this_residual);
} else {
/* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
+ cache_push(addr, scsi_pointer->this_residual);
}
bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir && SCpnt)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
kfree(wh->dma_bounce_buffer);
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int check_wd33c93(struct gvp11_scsiregs *regs)
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
-#define HISI_SAS_WAIT_PHYUP_TIMEOUT (20 * HZ)
+#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ)
#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
struct hisi_hba;
bool done;
};
-struct hisi_sas_internal_abort {
- unsigned int flag;
- unsigned int tag;
-};
-
#define HISI_SAS_RST_WORK_INIT(r, c) \
{ .hisi_hba = hisi_hba, \
.completion = &c, \
spinlock_t lock; /* For protecting slots */
};
-struct hisi_sas_tmf_task {
- int force_phy;
- int phy_id;
- u8 tmf;
- u16 tag_of_task_to_be_managed;
-};
-
struct hisi_sas_slot {
struct list_head entry;
struct list_head delivery;
dma_addr_t cmd_hdr_dma;
struct timer_list internal_abort_timer;
bool is_internal;
- struct hisi_sas_tmf_task *tmf;
+ struct sas_tmf_task *tmf;
/* Do not reorder/change members after here */
void *buf;
dma_addr_t buf_dma;
void (*prep_stp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
void (*prep_abort)(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort);
+ struct hisi_sas_slot *slot);
void (*phys_init)(struct hisi_hba *hisi_hba);
void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
#define DEV_IS_GONE(dev) \
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
-static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
- u8 *lun, struct hisi_sas_tmf_task *tmf);
-static int
-hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device,
- int abort_flag, int tag, bool rst_to_recover);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
struct domain_device *device);
static void hisi_sas_dev_gone(struct domain_device *device);
+struct hisi_sas_internal_abort_data {
+ bool rst_ha_timeout; /* reset the HA for timeout */
+};
+
u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
switch (fis->command) {
}
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
- struct hisi_sas_internal_abort *abort,
- struct hisi_sas_slot *slot, int device_id)
+ struct hisi_sas_slot *slot)
{
- hisi_hba->hw->prep_abort(hisi_hba, slot,
- device_id, abort->flag, abort->tag);
+ hisi_hba->hw->prep_abort(hisi_hba, slot);
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_dq *dq,
- struct hisi_sas_device *sas_dev,
- struct hisi_sas_internal_abort *abort)
+ struct hisi_sas_device *sas_dev)
{
struct hisi_sas_cmd_hdr *cmd_hdr_base;
int dlvry_queue_slot, dlvry_queue;
struct sas_task *task = slot->task;
- unsigned long flags;
int wr_q_index;
spin_lock(&dq->lock);
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_STP_ALL:
hisi_sas_task_prep_ata(hisi_hba, slot);
break;
- case SAS_PROTOCOL_NONE:
- if (abort) {
- hisi_sas_task_prep_abort(hisi_hba, abort, slot, sas_dev->device_id);
- break;
- }
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ hisi_sas_task_prep_abort(hisi_hba, slot);
+ break;
fallthrough;
default:
- dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
- task->task_proto);
- break;
+ return;
}
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
WRITE_ONCE(slot->ready, 1);
spin_lock(&dq->lock);
spin_unlock(&dq->lock);
}
-static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
- struct hisi_sas_tmf_task *tmf)
+static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_device *sas_dev = device->lldd_dev;
+ bool internal_abort = sas_is_internal_abort(task);
struct scsi_cmnd *scmd = NULL;
struct hisi_sas_dq *dq = NULL;
struct hisi_sas_port *port;
* libsas will use dev->port, should
* not call task_done for sata
*/
- if (device->dev_type != SAS_SATA_DEV)
+ if (device->dev_type != SAS_SATA_DEV && !internal_abort)
task->task_done(task);
return -ECOMM;
}
hisi_hba = dev_to_hisi_hba(device);
dev = hisi_hba->dev;
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- if (!gfpflags_allow_blocking(gfp_flags))
- return -EINVAL;
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ case SAS_PROTOCOL_SMP:
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_STP_ALL:
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
+ if (!gfpflags_allow_blocking(gfp_flags))
+ return -EINVAL;
- down(&hisi_hba->sem);
- up(&hisi_hba->sem);
- }
+ down(&hisi_hba->sem);
+ up(&hisi_hba->sem);
+ }
- if (DEV_IS_GONE(sas_dev)) {
- if (sas_dev)
- dev_info(dev, "task prep: device %d not ready\n",
- sas_dev->device_id);
- else
- dev_info(dev, "task prep: device %016llx not ready\n",
- SAS_ADDR(device->sas_addr));
+ if (DEV_IS_GONE(sas_dev)) {
+ if (sas_dev)
+ dev_info(dev, "task prep: device %d not ready\n",
+ sas_dev->device_id);
+ else
+ dev_info(dev, "task prep: device %016llx not ready\n",
+ SAS_ADDR(device->sas_addr));
- return -ECOMM;
- }
+ return -ECOMM;
+ }
- if (task->uldd_task) {
- struct ata_queued_cmd *qc;
+ port = to_hisi_sas_port(sas_port);
+ if (!port->port_attached) {
+ dev_info(dev, "task prep: %s port%d not attach device\n",
+ dev_is_sata(device) ? "SATA/STP" : "SAS",
+ device->port->id);
- if (dev_is_sata(device)) {
- qc = task->uldd_task;
- scmd = qc->scsicmd;
- } else {
- scmd = task->uldd_task;
+ return -ECOMM;
}
- }
- if (scmd) {
- unsigned int dq_index;
- u32 blk_tag;
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
- dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
- dq = &hisi_hba->dq[dq_index];
- } else {
- struct Scsi_Host *shost = hisi_hba->shost;
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- int queue = qmap->mq_map[raw_smp_processor_id()];
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
- dq = &hisi_hba->dq[queue];
- }
+ if (scmd) {
+ unsigned int dq_index;
+ u32 blk_tag;
- port = to_hisi_sas_port(sas_port);
- if (port && !port->port_attached) {
- dev_info(dev, "task prep: %s port%d not attach device\n",
- (dev_is_sata(device)) ?
- "SATA/STP" : "SAS",
- device->port->id);
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
+ dq = &hisi_hba->dq[dq_index];
+ } else {
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ int queue = qmap->mq_map[raw_smp_processor_id()];
- return -ECOMM;
+ dq = &hisi_hba->dq[queue];
+ }
+ break;
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ if (!hisi_hba->hw->prep_abort)
+ return TMF_RESP_FUNC_FAILED;
+
+ if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
+ return -EIO;
+
+ hisi_hba = dev_to_hisi_hba(device);
+
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
+ return -EINVAL;
+
+ port = to_hisi_sas_port(sas_port);
+ dq = &hisi_hba->dq[task->abort_task.qid];
+ break;
+ default:
+ dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
+ task->task_proto);
+ return -EINVAL;
}
rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
goto err_out_dma_unmap;
}
- if (hisi_hba->hw->slot_index_alloc)
+ if (!internal_abort && hisi_hba->hw->slot_index_alloc)
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
else
rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
slot->task = task;
slot->port = port;
- slot->tmf = tmf;
- slot->is_internal = tmf;
+ slot->tmf = task->tmf;
+ slot->is_internal = !!task->tmf || internal_abort;
/* protect task_prep and start_delivery sequence */
- hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
return 0;
return sas_dev;
}
+static void hisi_sas_tmf_aborted(struct sas_task *task)
+{
+ struct hisi_sas_slot *slot = task->lldd_task;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * sync irq to avoid free'ing task
+ * before using task in IO completion
+ */
+ synchronize_irq(cq->irq_no);
+ slot->task = NULL;
+ }
+}
+
#define HISI_SAS_DISK_RECOVER_CNT 3
static int hisi_sas_init_device(struct domain_device *device)
{
int rc = TMF_RESP_FUNC_COMPLETE;
struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
int retry = HISI_SAS_DISK_RECOVER_CNT;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
case SAS_END_DEVICE:
int_to_scsilun(0, &lun);
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
while (retry-- > 0) {
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
- &tmf_task);
+ rc = sas_clear_task_set(device, lun.scsi_lun);
if (rc == TMF_RESP_FUNC_COMPLETE) {
hisi_sas_release_task(hisi_hba, device);
break;
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
hisi_hba->hw->dereg_device(hisi_hba, device);
}
+static int
+hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
+ bool rst_ha_timeout)
+{
+ struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
+ struct domain_device *device = sas_dev->sas_device;
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+ int i, rc;
+
+ for (i = 0; i < hisi_hba->cq_nvecs; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ const struct cpumask *mask = cq->irq_mask;
+
+ if (mask && !cpumask_intersects(cpu_online_mask, mask))
+ continue;
+ rc = sas_execute_internal_abort_dev(device, i, &data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static void hisi_sas_dev_gone(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
down(&hisi_hba->sem);
if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0, true);
+ hisi_sas_internal_task_abort_dev(sas_dev, true);
hisi_sas_dereg_device(hisi_hba, device);
up(&hisi_hba->sem);
}
-static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return hisi_sas_task_exec(task, gfp_flags, NULL);
-}
-
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
goto out;
}
- if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) {
+ if (sts && !wait_for_completion_timeout(&completion,
+ HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
phy_no, func);
if (phy->in_reset)
return ret;
}
-static void hisi_sas_task_done(struct sas_task *task)
-{
- del_timer_sync(&task->slow_task->timer);
- complete(&task->slow_task->completion);
-}
-
-static void hisi_sas_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
- unsigned long flags;
- bool is_completed = true;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- is_completed = false;
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- if (!is_completed)
- complete(&task->slow_task->completion);
-}
-
-#define TASK_TIMEOUT (20 * HZ)
-#define TASK_RETRY 3
-#define INTERNAL_ABORT_TIMEOUT (6 * HZ)
-static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
- void *parameter, u32 para_len,
- struct hisi_sas_tmf_task *tmf)
-{
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
- struct device *dev = hisi_hba->dev;
- struct sas_task *task;
- int res, retry;
-
- for (retry = 0; retry < TASK_RETRY; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = device;
- task->task_proto = device->tproto;
-
- if (dev_is_sata(device)) {
- task->ata_task.device_control_reg_update = 1;
- memcpy(&task->ata_task.fis, parameter, para_len);
- } else {
- memcpy(&task->ssp_task, parameter, para_len);
- }
- task->task_done = hisi_sas_task_done;
-
- task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
- add_timer(&task->slow_task->timer);
-
- res = hisi_sas_task_exec(task, GFP_KERNEL, tmf);
- if (res) {
- del_timer_sync(&task->slow_task->timer);
- dev_err(dev, "abort tmf: executing internal task failed: %d\n",
- res);
- goto ex_err;
- }
-
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- struct hisi_sas_slot *slot = task->lldd_task;
-
- dev_err(dev, "abort tmf: TMF task timeout and not done\n");
- if (slot) {
- struct hisi_sas_cq *cq =
- &hisi_hba->cq[slot->dlvry_queue];
- /*
- * sync irq to avoid free'ing task
- * before using task in IO completion
- */
- synchronize_irq(cq->irq_no);
- slot->task = NULL;
- }
-
- goto ex_err;
- } else
- dev_err(dev, "abort tmf: TMF task timeout\n");
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_SUCC) {
- res = TMF_RESP_FUNC_SUCC;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun
- */
- dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
- SAS_ADDR(device->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- res = task->task_status.residual;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- dev_warn(dev, "abort tmf: blocked task error\n");
- res = -EMSGSIZE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_OPEN_REJECT) {
- dev_warn(dev, "abort tmf: open reject failed\n");
- res = -EIO;
- } else {
- dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
- SAS_ADDR(device->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- }
- sas_free_task(task);
- task = NULL;
- }
-ex_err:
- if (retry == TASK_RETRY)
- dev_warn(dev, "abort tmf: executing internal task failed!\n");
- sas_free_task(task);
- return res;
-}
-
static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
bool reset, int pmp, u8 *fis)
{
int rc = TMF_RESP_FUNC_FAILED;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- int s = sizeof(struct host_to_dev_fis);
- struct hisi_sas_tmf_task tmf = {};
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf);
+ rc = sas_execute_ata_cmd(device, fis, -1);
if (rc != TMF_RESP_FUNC_COMPLETE)
break;
}
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis,
- s, &tmf);
+ rc = sas_execute_ata_cmd(device, fis, -1);
if (rc != TMF_RESP_FUNC_COMPLETE)
dev_err(dev, "ata disk %016llx de-reset failed\n",
SAS_ADDR(device->sas_addr));
return rc;
}
-static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
- u8 *lun, struct hisi_sas_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
-
- if (!(device->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- memcpy(ssp_task.LUN, lun, 8);
-
- return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
- sizeof(ssp_task), tmf);
-}
-
static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
{
u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
struct asd_sas_port *sas_port,
struct domain_device *device)
{
- struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
struct ata_port *ap = device->sata_dev.ap;
struct device *dev = hisi_hba->dev;
- int s = sizeof(struct host_to_dev_fis);
int rc = TMF_RESP_FUNC_FAILED;
struct ata_link *link;
u8 fis[20] = {0};
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
- tmf_task.phy_id = i;
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
- &tmf_task);
+ rc = sas_execute_ata_cmd(device, fis, i);
if (rc != TMF_RESP_FUNC_COMPLETE) {
dev_err(dev, "phy%d ata reset failed rc=%d\n",
i, rc);
if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
continue;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0,
- false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0)
dev_err(dev, "STP reject: abort dev failed %d\n", rc);
}
static int hisi_sas_abort_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
+ struct hisi_sas_internal_abort_data internal_abort_data = { false };
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_slot *slot = task->lldd_task;
u16 tag = slot->idx;
int rc2;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
-
- rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
- &tmf_task);
-
- rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag,
- false);
+ rc = sas_abort_task(task, tag);
+ rc2 = sas_execute_internal_abort_single(device, tag,
+ slot->dlvry_queue, &internal_abort_data);
if (rc2 < 0) {
dev_err(dev, "abort task: internal abort (%d)\n", rc2);
return TMF_RESP_FUNC_FAILED;
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV,
- 0, false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "abort task: internal abort failed\n");
goto out;
u32 tag = slot->idx;
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag,
- false);
+ rc = sas_execute_internal_abort_single(device,
+ tag, slot->dlvry_queue,
+ &internal_abort_data);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- struct hisi_sas_tmf_task tmf_task;
int rc;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0, false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
return TMF_RESP_FUNC_FAILED;
}
hisi_sas_dereg_device(hisi_hba, device);
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
-
+ rc = sas_abort_task_set(device, lun);
if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
return rc;
}
-static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
-{
- struct hisi_sas_tmf_task tmf_task;
- int rc;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
-
- return rc;
-}
-
-#define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ)
-
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
struct sas_phy *local_phy = sas_get_local_phy(device);
static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int rc;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0, false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
return TMF_RESP_FUNC_FAILED;
int rc = TMF_RESP_FUNC_FAILED;
/* Clear internal IO and then lu reset */
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0, false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
hisi_sas_release_task(hisi_hba, device);
sas_put_local_phy(phy);
} else {
- struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
-
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ rc = sas_lu_reset(device, lun);
if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
}
static int hisi_sas_query_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
- struct domain_device *device = task->dev;
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
-
- rc = hisi_sas_debug_issue_ssp_tmf(device,
- lun.scsi_lun,
- &tmf_task);
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
return rc;
}
-static int
-hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
- struct hisi_sas_internal_abort *abort,
- struct sas_task *task,
- struct hisi_sas_dq *dq)
+static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
+ void *data)
{
struct domain_device *device = task->dev;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = hisi_hba->dev;
- struct hisi_sas_port *port;
- struct asd_sas_port *sas_port = device->port;
- struct hisi_sas_slot *slot;
- int slot_idx;
-
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
- return -EINVAL;
-
- if (!device->port)
- return -1;
-
- port = to_hisi_sas_port(sas_port);
-
- /* simply get a slot and send abort command */
- slot_idx = hisi_sas_slot_index_alloc(hisi_hba, NULL);
- if (slot_idx < 0)
- goto err_out;
-
- slot = &hisi_hba->slot_info[slot_idx];
- slot->n_elem = 0;
- slot->task = task;
- slot->port = port;
- slot->is_internal = true;
-
- hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort);
-
- return 0;
-
-err_out:
- dev_err(dev, "internal abort task prep: failed[%d]!\n", slot_idx);
-
- return slot_idx;
-}
-
-/**
- * _hisi_sas_internal_task_abort -- execute an internal
- * abort command for single IO command or a device
- * @hisi_hba: host controller struct
- * @device: domain device
- * @abort_flag: mode of operation, device or single IO
- * @tag: tag of IO to be aborted (only relevant to single
- * IO mode)
- * @dq: delivery queue for this internal abort command
- * @rst_to_recover: If rst_to_recover set, queue a controller
- * reset if an internal abort times out.
- */
-static int
-_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device, int abort_flag,
- int tag, struct hisi_sas_dq *dq, bool rst_to_recover)
-{
- struct sas_task *task;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_sas_internal_abort abort = {
- .flag = abort_flag,
- .tag = tag,
- };
- struct device *dev = hisi_hba->dev;
- int res;
- /*
- * The interface is not realized means this HW don't support internal
- * abort, or don't need to do internal abort. Then here, we return
- * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
- * the internal abort has been executed and returned CQ.
- */
- if (!hisi_hba->hw->prep_abort)
- return TMF_RESP_FUNC_FAILED;
-
- if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
- return -EIO;
-
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = device;
- task->task_proto = SAS_PROTOCOL_NONE;
- task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT;
- add_timer(&task->slow_task->timer);
-
- res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
- &abort, task, dq);
- if (res) {
- del_timer_sync(&task->slow_task->timer);
- dev_err(dev, "internal task abort: executing internal task failed: %d\n",
- res);
- goto exit;
- }
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
-
- /* Internal abort timed out */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- struct hisi_sas_slot *slot = task->lldd_task;
-
- set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
-
- if (slot) {
- struct hisi_sas_cq *cq =
- &hisi_hba->cq[slot->dlvry_queue];
- /*
- * sync irq to avoid free'ing task
- * before using task in IO completion
- */
- synchronize_irq(cq->irq_no);
- slot->task = NULL;
- }
-
- if (rst_to_recover) {
- dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n");
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- } else {
- dev_err(dev, "internal task abort: timeout and not done.\n");
- }
-
- res = -EIO;
- goto exit;
- } else
- dev_err(dev, "internal task abort: timeout.\n");
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
- res = TMF_RESP_FUNC_COMPLETE;
- goto exit;
- }
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct hisi_sas_internal_abort_data *timeout = data;
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_SUCC) {
- res = TMF_RESP_FUNC_SUCC;
- goto exit;
- }
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+ queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-exit:
- dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
- SAS_ADDR(device->sas_addr), task,
- task->task_status.resp, /* 0 is complete, -1 is undelivered */
- task->task_status.stat);
- sas_free_task(task);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ pr_err("Internal abort: timeout %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ } else {
+ struct hisi_sas_slot *slot = task->lldd_task;
- return res;
-}
+ set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
-static int
-hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device,
- int abort_flag, int tag, bool rst_to_recover)
-{
- struct hisi_sas_slot *slot;
- struct device *dev = hisi_hba->dev;
- struct hisi_sas_dq *dq;
- int i, rc;
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * sync irq to avoid free'ing task
+ * before using task in IO completion
+ */
+ synchronize_irq(cq->irq_no);
+ slot->task = NULL;
+ }
- switch (abort_flag) {
- case HISI_SAS_INT_ABT_CMD:
- slot = &hisi_hba->slot_info[tag];
- dq = &hisi_hba->dq[slot->dlvry_queue];
- return _hisi_sas_internal_task_abort(hisi_hba, device,
- abort_flag, tag, dq,
- rst_to_recover);
- case HISI_SAS_INT_ABT_DEV:
- for (i = 0; i < hisi_hba->cq_nvecs; i++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- const struct cpumask *mask = cq->irq_mask;
-
- if (mask && !cpumask_intersects(cpu_online_mask, mask))
- continue;
- dq = &hisi_hba->dq[i];
- rc = _hisi_sas_internal_task_abort(hisi_hba, device,
- abort_flag, tag,
- dq, rst_to_recover);
- if (rc)
- return rc;
+ if (timeout->rst_ha_timeout) {
+ pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
+ SAS_ADDR(device->sas_addr));
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ } else {
+ pr_err("Internal abort: timeout and not done %016llx.\n",
+ SAS_ADDR(device->sas_addr));
}
- break;
- default:
- dev_err(dev, "Unrecognised internal abort flag (%d)\n",
- abort_flag);
- return -EINVAL;
+
+ return true;
}
- return 0;
+ return false;
}
static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
.lldd_control_phy = hisi_sas_control_phy,
.lldd_abort_task = hisi_sas_abort_task,
.lldd_abort_task_set = hisi_sas_abort_task_set,
- .lldd_clear_aca = hisi_sas_clear_aca,
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
.lldd_lu_reset = hisi_sas_lu_reset,
.lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
.lldd_write_gpio = hisi_sas_write_gpio,
+ .lldd_tmf_aborted = hisi_sas_tmf_aborted,
+ .lldd_abort_timeout = hisi_sas_internal_abort_timeout,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd, fburst = 0;
u32 dw1, dw2;
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
idx = i * HISI_SAS_PHY_INT_NR;
for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (irq < 0) {
- dev_err(dev, "irq init: fail map phy interrupt %d\n",
- idx);
+ if (irq < 0)
return irq;
- }
rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
DRV_NAME " phy", phy);
idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (irq < 0) {
- dev_err(dev, "irq init: could not map cq interrupt %d\n",
- idx);
+ if (irq < 0)
return irq;
- }
rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0,
DRV_NAME " cq", &hisi_hba->cq[i]);
idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (irq < 0) {
- dev_err(dev, "irq init: could not map fatal interrupt %d\n",
- idx);
+ if (irq < 0)
return irq;
- }
rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
DRV_NAME " fatal", hisi_hba);
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd;
u32 dw1 = 0, dw2 = 0;
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_ata_task *ata_task = &task->ata_task;
+ struct sas_tmf_task *tmf = slot->tmf;
u8 *buf_cmd;
int has_data = 0, hdr_tag = 0;
u32 dw0, dw1 = 0, dw2 = 0;
else
dw0 |= 4 << CMD_HDR_CMD_OFF;
- if (tmf && tmf->force_phy) {
+ if (tmf && ata_task->force_phy) {
dw0 |= CMD_HDR_FORCE_PHY_MSK;
- dw0 |= (1 << tmf->phy_id) << CMD_HDR_PHY_ID_OFF;
+ dw0 |= (1 << ata_task->force_phy_id) << CMD_HDR_PHY_ID_OFF;
}
hdr->dw0 = cpu_to_le32(dw0);
}
static void prep_abort_v2_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
struct domain_device *dev = task->dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct hisi_sas_port *port = slot->port;
struct timer_list *timer = &slot->internal_abort_timer;
+ struct hisi_sas_device *sas_dev = dev->lldd_dev;
/* setup the quirk timer */
timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0);
(port->id << CMD_HDR_PORT_OFF) |
(dev_is_sata(dev) <<
CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
- (abort_flag << CMD_HDR_ABORT_FLAG_OFF));
+ (abort->type << CMD_HDR_ABORT_FLAG_OFF));
/* dw1 */
- hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF);
+ hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEV_ID_OFF);
/* dw7 */
- hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
}
#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
#define CMPLT_HDR_RSPNS_XFRD_OFF 10
#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
+#define CMPLT_HDR_RSPNS_GOOD_OFF 11
+#define CMPLT_HDR_RSPNS_GOOD_MSK (0x1 << CMPLT_HDR_RSPNS_GOOD_OFF)
#define CMPLT_HDR_ERX_OFF 12
#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
#define CMPLT_HDR_ABORT_STAT_OFF 13
/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
static int prot_mask;
-module_param(prot_mask, int, 0);
+module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
static void debugfs_work_handler_v3_hw(struct work_struct *work);
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
unsigned char prot_op;
u8 *buf_cmd;
}
static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
struct domain_device *dev = task->dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct hisi_sas_port *port = slot->port;
+ struct hisi_sas_device *sas_dev = dev->lldd_dev;
+ bool sata = dev_is_sata(dev);
/* dw0 */
- hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/
+ hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /* abort */
(port->id << CMD_HDR_PORT_OFF) |
- (dev_is_sata(dev)
- << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
- (abort_flag
- << CMD_HDR_ABORT_FLAG_OFF));
+ (sata << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
+ (abort->type << CMD_HDR_ABORT_FLAG_OFF));
/* dw1 */
- hdr->dw1 = cpu_to_le32(device_id
+ hdr->dw1 = cpu_to_le32(sas_dev->device_id
<< CMD_HDR_DEV_ID_OFF);
/* dw7 */
- hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
}
return IRQ_HANDLED;
}
-static void
+static bool
slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ /*
+ * If returned response frame is incorrect because of data underflow,
+ * but I/O information has been written to the host memory, we examine
+ * response IU.
+ */
+ if (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
+ (complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
+ return false;
+
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
default:
break;
}
+ return true;
}
static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
- slot_err_v3_hw(hisi_hba, task, slot);
- if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
- slot->idx, task, sas_dev->device_id,
- SAS_ADDR(device->sas_addr),
- dw0, dw1, complete_hdr->act, dw3,
- error_info[0], error_info[1],
- error_info[2], error_info[3]);
- if (unlikely(slot->abort)) {
- sas_task_abort(task);
- return;
+ if (slot_err_v3_hw(hisi_hba, task, slot)) {
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task, sas_dev->device_id,
+ SAS_ADDR(device->sas_addr),
+ dw0, dw1, complete_hdr->act, dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
+ if (unlikely(slot->abort)) {
+ sas_task_abort(task);
+ return;
+ }
+ goto out;
}
- goto out;
}
switch (task->task_proto) {
return IRQ_WAKE_THREAD;
}
+static void hisi_sas_v3_free_vectors(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ pci_free_irq_vectors(pdev);
+}
+
static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
{
int vectors;
int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
struct Scsi_Host *shost = hisi_hba->shost;
+ struct pci_dev *pdev = hisi_hba->pci_dev;
struct irq_affinity desc = {
.pre_vectors = BASE_VECTORS_V3_HW,
};
min_msi = MIN_AFFINE_VECTORS_V3_HW;
- vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
+ vectors = pci_alloc_irq_vectors_affinity(pdev,
min_msi, max_msi,
PCI_IRQ_MSI |
PCI_IRQ_AFFINITY,
hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
shost->nr_hw_queues = hisi_hba->cq_nvecs;
+ devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
return 0;
}
.owner = THIS_MODULE,
};
+static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int cnt;
+ int val;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ val = kstrtouint_from_user(buf, count, 0, &cnt);
+ if (val)
+ return val;
+
+ if (cnt)
+ return -EINVAL;
+
+ hisi_hba->debugfs_bist_cnt = 0;
+ return count;
+}
+
+static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt);
+
+ return 0;
+}
+
+static int debugfs_bist_cnt_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_cnt_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_cnt_v3_hw_ops = {
+ .open = debugfs_bist_cnt_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_cnt_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static const struct {
int value;
char *name;
debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
hisi_hba, &debugfs_bist_phy_v3_hw_fops);
- debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
- &hisi_hba->debugfs_bist_cnt);
+ debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_cnt_v3_hw_ops);
debugfs_create_file("loopback_mode", 0600,
hisi_hba->debugfs_bist_dentry,
if (!hisi_hba->regs) {
dev_err(dev, "cannot map register\n");
rc = -ENOMEM;
- goto err_out_ha;
+ goto err_out_free_host;
}
phy_nr = port_nr = hisi_hba->n_phy;
arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
if (!arr_phy || !arr_port) {
rc = -ENOMEM;
- goto err_out_ha;
+ goto err_out_free_host;
}
sha->sas_phy = arr_phy;
rc = interrupt_preinit_v3_hw(hisi_hba);
if (rc)
- goto err_out_debugfs;
+ goto err_out_undo_debugfs;
rc = scsi_add_host(shost, dev);
if (rc)
- goto err_out_free_irq_vectors;
+ goto err_out_undo_debugfs;
rc = sas_register_ha(sha);
if (rc)
- goto err_out_register_ha;
+ goto err_out_remove_host;
rc = hisi_sas_v3_init(hisi_hba);
if (rc)
- goto err_out_hw_init;
+ goto err_out_unregister_ha;
scsi_scan_host(shost);
return 0;
-err_out_hw_init:
+err_out_unregister_ha:
sas_unregister_ha(sha);
-err_out_register_ha:
+err_out_remove_host:
scsi_remove_host(shost);
-err_out_free_irq_vectors:
- pci_free_irq_vectors(pdev);
-err_out_debugfs:
+err_out_undo_debugfs:
debugfs_exit_v3_hw(hisi_hba);
-err_out_ha:
+err_out_free_host:
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
err_out:
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
- pci_free_irq_vectors(pdev);
}
static void hisi_sas_v3_remove(struct pci_dev *pdev)
.slave_configure = hptiop_slave_config,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
+ .cmd_size = sizeof(struct hpt_cmd_priv),
};
static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
int index;
};
-struct hpt_scsi_pointer {
+struct hpt_cmd_priv {
int mapped;
int sgcnt;
dma_addr_t dma_handle;
};
-#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
+#define HPT_SCP(scp) ((struct hpt_cmd_priv *)scsi_cmd_priv(scp))
enum hptiop_family {
UNKNOWN_BASED_IOP,
*/
static void ibmvscsis_send_messages(struct scsi_info *vscsi)
{
- u64 msg_hi = 0;
- /* note do not attempt to access the IU_data_ptr with this pointer
- * it is not valid
- */
- struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
+ struct viosrp_crq empty_crq = { };
+ struct viosrp_crq *crq = &empty_crq;
struct ibmvscsis_cmd *cmd, *nxt;
long rc = ADAPT_SUCCESS;
bool retry = false;
crq->IU_length = cpu_to_be16(cmd->rsp.len);
rc = h_send_crq(vscsi->dma_dev->unit_address,
- be64_to_cpu(msg_hi),
+ be64_to_cpu(crq->high),
be64_to_cpu(cmd->rsp.tag));
dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
{
dev->base = dev->dev->port->base;
if (dev->cur_cmd)
- dev->cur_cmd->SCp.phase = 1;
+ imm_scsi_pointer(dev->cur_cmd)->phase = 1;
else
wake_up(dev->waiting);
}
* The driver appears to remain stable if we speed up the parallel port
* i/o in this function, but not elsewhere.
*/
-static int imm_completion(struct scsi_cmnd *cmd)
+static int imm_completion(struct scsi_cmnd *const cmd)
{
/* Return codes:
* -1 Error
* 0 Told to schedule
* 1 Finished data transfer
*/
+ struct scsi_pointer *scsi_pointer = imm_scsi_pointer(cmd);
imm_struct *dev = imm_dev(cmd->device->host);
unsigned short ppb = dev->base;
unsigned long start_jiffies = jiffies;
* a) Drive status is screwy (!ready && !present)
* b) Drive is requesting/sending more data than expected
*/
- if (((r & 0x88) != 0x88) || (cmd->SCp.this_residual <= 0)) {
+ if ((r & 0x88) != 0x88 || scsi_pointer->this_residual <= 0) {
imm_fail(dev, DID_ERROR);
return -1; /* ERROR_RETURN */
}
/* determine if we should use burst I/O */
if (dev->rd == 0) {
- fast = (bulk
- && (cmd->SCp.this_residual >=
- IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 2;
- status = imm_out(dev, cmd->SCp.ptr, fast);
+ fast = bulk && scsi_pointer->this_residual >=
+ IMM_BURST_SIZE ? IMM_BURST_SIZE : 2;
+ status = imm_out(dev, scsi_pointer->ptr, fast);
} else {
- fast = (bulk
- && (cmd->SCp.this_residual >=
- IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 1;
- status = imm_in(dev, cmd->SCp.ptr, fast);
+ fast = bulk && scsi_pointer->this_residual >=
+ IMM_BURST_SIZE ? IMM_BURST_SIZE : 1;
+ status = imm_in(dev, scsi_pointer->ptr, fast);
}
- cmd->SCp.ptr += fast;
- cmd->SCp.this_residual -= fast;
+ scsi_pointer->ptr += fast;
+ scsi_pointer->this_residual -= fast;
if (!status) {
imm_fail(dev, DID_BUS_BUSY);
return -1; /* ERROR_RETURN */
}
- if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ if (scsi_pointer->buffer && !scsi_pointer->this_residual) {
/* if scatter/gather, advance to the next segment */
- if (cmd->SCp.buffers_residual--) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- cmd->SCp.this_residual =
- cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (scsi_pointer->buffers_residual--) {
+ scsi_pointer->buffer =
+ sg_next(scsi_pointer->buffer);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
/*
* Make sure that we transfer even number of bytes
* otherwise it makes imm_byte_out() messy.
*/
- if (cmd->SCp.this_residual & 0x01)
- cmd->SCp.this_residual++;
+ if (scsi_pointer->this_residual & 0x01)
+ scsi_pointer->this_residual++;
}
}
/* Now check to see if the drive is ready to comunicate */
}
#endif
- if (cmd->SCp.phase > 1)
+ if (imm_scsi_pointer(cmd)->phase > 1)
imm_disconnect(dev);
imm_pb_dismiss(dev);
return;
}
-static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
+static int imm_engine(imm_struct *dev, struct scsi_cmnd *const cmd)
{
+ struct scsi_pointer *scsi_pointer = imm_scsi_pointer(cmd);
unsigned short ppb = dev->base;
unsigned char l = 0, h = 0;
int retv, x;
if (dev->failed)
return 0;
- switch (cmd->SCp.phase) {
+ switch (scsi_pointer->phase) {
case 0: /* Phase 0 - Waiting for parport */
if (time_after(jiffies, dev->jstart + HZ)) {
/*
case 1: /* Phase 1 - Connected */
imm_connect(dev, CONNECT_EPP_MAYBE);
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 2: /* Phase 2 - We are now talking to the scsi bus */
imm_fail(dev, DID_NO_CONNECT);
return 0;
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
if (!imm_send_command(cmd))
return 0;
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.this_residual = 0;
- cmd->SCp.ptr = NULL;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->ptr = NULL;
}
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.phase++;
- if (cmd->SCp.this_residual & 0x01)
- cmd->SCp.this_residual++;
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->phase++;
+ if (scsi_pointer->this_residual & 0x01)
+ scsi_pointer->this_residual++;
fallthrough;
case 5: /* Phase 5 - Pre-Data transfer stage */
if ((dev->dp) && (dev->rd))
if (imm_negotiate(dev))
return 0;
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 6: /* Phase 6 - Data transfer stage */
if (retv == 0)
return 1;
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 7: /* Phase 7 - Post data transfer stage */
w_ctr(ppb, 0x4);
}
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 8: /* Phase 8 - Read status/message */
dev->jstart = jiffies;
dev->cur_cmd = cmd;
cmd->result = DID_ERROR << 16; /* default return code */
- cmd->SCp.phase = 0; /* bus free */
+ imm_scsi_pointer(cmd)->phase = 0; /* bus free */
schedule_delayed_work(&dev->imm_tq, 0);
* have tied the SCSI_MESSAGE line high in the interface
*/
- switch (cmd->SCp.phase) {
+ switch (imm_scsi_pointer(cmd)->phase) {
case 0: /* Do not have access to parport */
case 1: /* Have not connected to interface */
dev->cur_cmd = NULL; /* Forget the problem */
{
imm_struct *dev = imm_dev(cmd->device->host);
- if (cmd->SCp.phase)
+ if (imm_scsi_pointer(cmd)->phase)
imm_disconnect(dev);
dev->cur_cmd = NULL; /* Forget the problem */
.sg_tablesize = SG_ALL,
.can_queue = 1,
.slave_alloc = imm_adjust_queue,
+ .cmd_size = sizeof(struct scsi_pointer),
};
/***************************************************************************
#define w_ctr(x,y) outb(y, (x)+2)
#endif
+static inline struct scsi_pointer *imm_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static int imm_engine(imm_struct *, struct scsi_cmnd *);
#endif /* _IMM_H */
SENSE_SIZE, DMA_FROM_DEVICE);
cblk->senseptr = (u32)dma_addr;
cblk->senselen = SENSE_SIZE;
- cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
+ initio_priv(cmnd)->sense_dma_addr = dma_addr;
cblk->cdblen = cmnd->cmd_len;
/* Clear the returned status */
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
cblk->bufptr = (u32)dma_addr;
- cmnd->SCp.dma_handle = dma_addr;
+ initio_priv(cmnd)->sglist_dma_addr = dma_addr;
cblk->sglen = nseg;
static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
{
/* auto sense buffer */
- if (cmnd->SCp.ptr) {
+ if (initio_priv(cmnd)->sense_dma_addr) {
dma_unmap_single(&pci_dev->dev,
- (dma_addr_t)((unsigned long)cmnd->SCp.ptr),
+ initio_priv(cmnd)->sense_dma_addr,
SENSE_SIZE, DMA_FROM_DEVICE);
- cmnd->SCp.ptr = NULL;
+ initio_priv(cmnd)->sense_dma_addr = 0;
}
/* request buffer */
if (scsi_sg_count(cmnd)) {
- dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
+ dma_unmap_single(&pci_dev->dev,
+ initio_priv(cmnd)->sglist_dma_addr,
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
.this_id = 1,
.sg_tablesize = SG_ALL,
+ .cmd_size = sizeof(struct initio_cmd_priv),
};
static int initio_probe_one(struct pci_dev *pdev,
#define SCSI_RESET_HOST_RESET 0x200
#define SCSI_RESET_ACTION 0xff
+struct initio_cmd_priv {
+ dma_addr_t sense_dma_addr;
+ dma_addr_t sglist_dma_addr;
+};
+
+static inline struct initio_cmd_priv *initio_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
#include <linux/types.h>
#include <linux/dma-mapping.h>
-#include <scsi/sg.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sg.h>
#include "ips.h"
/* Remove a driver */
/* */
/****************************************************************************/
-static int
-ips_release(struct Scsi_Host *sh)
+static void ips_release(struct Scsi_Host *sh)
{
ips_scb_t *scb;
ips_ha_t *ha;
printk(KERN_WARNING
"(%s) release, invalid Scsi_Host pointer.\n", ips_name);
BUG();
- return (FALSE);
}
ha = IPS_HA(sh);
if (!ha)
- return (FALSE);
+ return;
/* flush the cache on the controller */
scb = &ha->scbs[ha->max_cmds - 1];
scsi_host_put(sh);
ips_released_controllers++;
-
- return (FALSE);
}
/****************************************************************************/
scsi_done(scsi_cmd);
}
- ha->active = FALSE;
+ ha->active = false;
return (FAILED);
}
scsi_done(scsi_cmd);
}
- ha->active = FALSE;
+ ha->active = false;
return (FAILED);
}
return 0;
}
- while (TRUE) {
+ while (true) {
sp = &ha->sp;
intrstatus = (*ha->func.isintr) (ha);
return 0;
}
- while (TRUE) {
+ while (true) {
sp = &ha->sp;
intrstatus = (*ha->func.isintr) (ha);
METHOD_TRACE("ipsintr_blocking", 2);
ips_freescb(ha, scb);
- if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
- ha->waitflag = FALSE;
+ if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0]) {
+ ha->waitflag = false;
return;
}
METHOD_TRACE("ips_send_wait", 1);
if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */
- ha->waitflag = TRUE;
+ ha->waitflag = true;
ha->cmd_in_progress = scb->cdb[0];
}
scb->callback = ipsintr_blocking;
if (scb->bus > 0) {
/* Controller commands can't be issued */
/* to real devices -- fail them */
- if ((ha->waitflag == TRUE) &&
- (ha->cmd_in_progress == scb->cdb[0])) {
- ha->waitflag = FALSE;
- }
+ if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0])
+ ha->waitflag = false;
return (1);
}
{
IPS_STATUS cstatus;
- while (TRUE) {
+ while (true) {
cstatus.value = (*ha->func.statupd) (ha);
if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
METHOD_TRACE("ips_wait", 1);
ret = IPS_FAILURE;
- done = FALSE;
+ done = false;
time *= IPS_ONE_SEC; /* convert seconds */
while ((time > 0) && (!done)) {
if (intr == IPS_INTR_ON) {
- if (ha->waitflag == FALSE) {
+ if (!ha->waitflag) {
ret = IPS_SUCCESS;
- done = TRUE;
+ done = true;
break;
}
} else if (intr == IPS_INTR_IORL) {
- if (ha->waitflag == FALSE) {
+ if (!ha->waitflag) {
/*
* controller generated an interrupt to
* acknowledge completion of the command
* and ips_intr() has serviced the interrupt.
*/
ret = IPS_SUCCESS;
- done = TRUE;
+ done = true;
break;
}
{
METHOD_TRACE("ips_write_driver_status", 1);
- if (!ips_readwrite_page5(ha, FALSE, intr)) {
+ if (!ips_readwrite_page5(ha, false, intr)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to read NVRAM page 5.\n");
ha->nvram->versioning = 0; /* Indicate the Driver Does Not Support Versioning */
/* now update the page */
- if (!ips_readwrite_page5(ha, TRUE, intr)) {
+ if (!ips_readwrite_page5(ha, true, intr)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to write NVRAM page 5.\n");
/* Task Management Functions. Must be called from process context. */
.lldd_abort_task = isci_task_abort_task,
.lldd_abort_task_set = isci_task_abort_task_set,
- .lldd_clear_aca = isci_task_clear_aca,
.lldd_clear_task_set = isci_task_clear_task_set,
.lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
.lldd_lu_reset = isci_task_lu_reset,
resp_iu = &ireq->ssp.rsp;
datapres = resp_iu->datapres;
- if (datapres == 1 || datapres == 2) {
+ if (datapres == SAS_DATAPRES_RESPONSE_DATA ||
+ datapres == SAS_DATAPRES_SENSE_DATA) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
resp_iu = &ireq->ssp.rsp;
- if (resp_iu->datapres == 0x01 ||
- resp_iu->datapres == 0x02) {
+ if (resp_iu->datapres == SAS_DATAPRES_RESPONSE_DATA ||
+ resp_iu->datapres == SAS_DATAPRES_SENSE_DATA) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
} else {
if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
/* Normal notification (task_done) */
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
}
spin_unlock_irqrestore(&task->task_state_lock, task_flags);
return ireq;
}
-static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
- struct sas_task *task,
- u16 tag)
+struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag)
{
struct isci_request *ireq;
}
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
- struct sas_task *task, u16 tag)
+ struct sas_task *task, struct isci_request *ireq)
{
enum sci_status status;
- struct isci_request *ireq;
unsigned long flags;
int ret = 0;
- /* do common allocation and init of request object. */
- ireq = isci_io_request_from_tag(ihost, task, tag);
-
status = isci_io_request_build(ihost, ireq, idev);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
struct isci_tmf *isci_tmf,
u16 tag);
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
- struct sas_task *task, u16 tag);
+ struct sas_task *task, struct isci_request *ireq);
+struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag);
enum sci_status
sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
/* Normal notification (task_done) */
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->lldd_task = NULL;
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_TASK_UNDELIVERED,
SAS_SAM_STAT_TASK_ABORTED);
} else {
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ struct isci_request *ireq;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_io_request_from_tag(ihost, task, tag);
spin_unlock_irqrestore(&task->task_state_lock, flags);
/* build and send the request. */
- status = isci_request_execute(ihost, idev, task, tag);
+ /* do common allocation and init of request object. */
+ status = isci_request_execute(ihost, idev, task, ireq);
if (status != SCI_SUCCESS) {
- spin_lock_irqsave(&task->task_state_lock, flags);
- /* Did not really start this command. */
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
if (test_bit(IDEV_GONE, &idev->flags)) {
/* Indicate that the device
* is gone.
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
- (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
old_request) {
idev = isci_get_device(task->dev->lldd_dev);
target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
*/
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
ret = TMF_RESP_FUNC_COMPLETE;
test_bit(IDEV_GONE, &idev->flags));
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
}
-/**
- * isci_task_clear_aca() - This function is one of the SAS Domain Template
- * functions. This is one of the Task Management functoins called by libsas.
- * @d_device: This parameter specifies the domain device associated with this
- * request.
- * @lun: This parameter specifies the lun associated with this request.
- *
- * status, zero indicates success.
- */
-int isci_task_clear_aca(
- struct domain_device *d_device,
- u8 *lun)
-{
- return TMF_RESP_FUNC_FAILED;
-}
-
-
-
/**
* isci_task_clear_task_set() - This function is one of the SAS Domain Template
* functions. This is one of the Task Management functoins called by libsas.
struct domain_device *d_device,
u8 *lun);
-int isci_task_clear_aca(
- struct domain_device *d_device,
- u8 *lun);
-
int isci_task_clear_task_set(
struct domain_device *d_device,
u8 *lun);
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport iscsi_sw_tcp_transport = {
&entry->type);
put_unaligned_be16(len, &entry->len);
put_unaligned_be64(lport->wwnn,
- (__be64 *)&entry->value[0]);
+ (__be64 *)&entry->value);
/* Manufacturer */
entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
if (cancel_delayed_work_sync(&ep->timeout_work)) {
FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
fc_exch_release(ep); /* release from pending timer hold */
+ return;
}
spin_lock_bh(&ep->ex_lock);
#define FC_SRB_READ (1 << 1)
#define FC_SRB_WRITE (1 << 0)
-/*
- * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
- */
-#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
-#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
-#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
-#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
+static struct libfc_cmd_priv *libfc_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/**
* struct fc_fcp_internal - FCP layer internal data
unsigned long flags;
int rc;
- fsp->cmd->SCp.ptr = (char *)fsp;
+ libfc_priv(fsp->cmd)->fsp = fsp;
fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
if (unlikely(rc)) {
spin_lock_irqsave(&si->scsi_queue_lock, flags);
- fsp->cmd->SCp.ptr = NULL;
+ libfc_priv(fsp->cmd)->fsp = NULL;
list_del(&fsp->list);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
fc_fcp_can_queue_ramp_up(lport);
sc_cmd = fsp->cmd;
- CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
+ libfc_priv(sc_cmd)->status = fsp->cdb_status;
switch (fsp->status_code) {
case FC_COMPLETE:
if (fsp->cdb_status == 0) {
*/
sc_cmd->result = DID_OK << 16;
if (fsp->scsi_resid)
- CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
} else {
/*
* transport level I/O was ok but scsi
*/
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to FC_DATA_UNDRUN (scsi)\n");
- CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
}
break;
spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
- sc_cmd->SCp.ptr = NULL;
+ libfc_priv(sc_cmd)->fsp = NULL;
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
scsi_done(sc_cmd);
si = fc_get_scsi_internal(lport);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
- fsp = CMD_SP(sc_cmd);
+ fsp = libfc_priv(sc_cmd)->fsp;
if (!fsp) {
/* command completed while scsi eh was setting up */
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
if (sc) {
/* SCSI eh reuses commands to verify us */
- sc->SCp.ptr = NULL;
+ iscsi_cmd(sc)->task = NULL;
/*
* queue command may call this to free the task, so
* it will decide how to return sc to scsi-ml.
if (!task || !task->sc)
return NULL;
- if (task->sc->SCp.phase != conn->session->age) {
+ if (iscsi_cmd(task->sc)->age != conn->session->age) {
iscsi_session_printk(KERN_ERR, conn->session,
"task's session age %d, expected %d\n",
- task->sc->SCp.phase, conn->session->age);
+ iscsi_cmd(task->sc)->age, conn->session->age);
return NULL;
}
(void *) &task, sizeof(void *)))
return NULL;
- sc->SCp.phase = conn->session->age;
- sc->SCp.ptr = (char *) task;
+ iscsi_cmd(sc)->age = conn->session->age;
+ iscsi_cmd(sc)->task = task;
refcount_set(&task->refcount, 1);
task->state = ISCSI_TASK_PENDING;
struct iscsi_task *task = NULL;
sc->result = 0;
- sc->SCp.ptr = NULL;
+ iscsi_cmd(sc)->task = NULL;
ihost = shost_priv(host);
spin_lock_bh(&session->frwd_lock);
spin_lock(&session->back_lock);
- task = (struct iscsi_task *)sc->SCp.ptr;
+ task = iscsi_cmd(sc)->task;
if (!task) {
/*
* Raced with completion. Blk layer has taken ownership
* if session was ISCSI_STATE_IN_RECOVERY then we may not have
* got the command.
*/
- if (!sc->SCp.ptr) {
+ if (!iscsi_cmd(sc)->task) {
ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
"it completed.\n");
spin_unlock_bh(&session->frwd_lock);
* then let the host reset code handle this
*/
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
- sc->SCp.phase != session->age) {
+ iscsi_cmd(sc)->age != session->age) {
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
ISCSI_DBG_EH(session, "failing abort due to dropped "
}
spin_lock(&session->back_lock);
- task = (struct iscsi_task *)sc->SCp.ptr;
+ task = iscsi_cmd(sc)->task;
if (!task || !task->sc) {
/* task completed before time out */
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
ihost = shost_priv(shost);
if (xmit_can_sleep) {
- snprintf(ihost->workq_name, sizeof(ihost->workq_name),
- "iscsi_q_%d", shost->host_no);
- ihost->workq = alloc_workqueue("%s",
+ ihost->workq = alloc_workqueue("iscsi_q_%d",
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, ihost->workq_name);
+ 1, shost->host_no);
if (!ihost->workq)
goto free_host;
}
struct iscsi_conn *conn;
struct iscsi_cls_conn *cls_conn;
char *data;
+ int err;
- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+ cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size,
conn_idx);
if (!cls_conn)
return NULL;
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
+ err = iscsi_add_conn(cls_conn);
+ if (err)
+ goto login_task_add_dev_fail;
+
return cls_conn;
+login_task_add_dev_fail:
+ free_pages((unsigned long) conn->data,
+ get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
+
login_task_data_alloc_fail:
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
login_task_alloc_fail:
- iscsi_destroy_conn(cls_conn);
+ iscsi_put_conn(cls_conn);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_conn_setup);
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- char *tmp_persistent_address = conn->persistent_address;
- char *tmp_local_ipaddr = conn->local_ipaddr;
+
+ iscsi_remove_conn(cls_conn);
del_timer_sync(&conn->transport_timer);
spin_lock_bh(&session->frwd_lock);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
+ kfree(conn->persistent_address);
+ kfree(conn->local_ipaddr);
/* regular RX path uses back_lock */
spin_lock_bh(&session->back_lock);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
- iscsi_destroy_conn(cls_conn);
- kfree(tmp_persistent_address);
- kfree(tmp_local_ipaddr);
+ iscsi_put_conn(cls_conn);
}
EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
case SAS_DATA_OVERRUN:
case SAS_QUEUE_FULL:
case SAS_DEVICE_UNKNOWN:
- case SAS_SG_ERR:
- return AC_ERR_INVALID;
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
pr_warn("%s: Saw error %d. What to do?\n",
task->task_proto = SAS_PROTOCOL_STP;
task->task_done = sas_ata_task_done;
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ ||
- qc->tf.command == ATA_CMD_FPDMA_RECV ||
- qc->tf.command == ATA_CMD_FPDMA_SEND ||
- qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
- /* Need to zero out the tag libata assigned us */
+ /* For NCQ commands, zero out the tag libata assigned us */
+ if (ata_is_ncq(qc->tf.protocol))
qc->tf.nsect = 0;
- }
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
task->uldd_task = qc;
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
task->data_dir = qc->dma_dir;
- } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+ } else if (!ata_is_data(qc->tf.protocol)) {
task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
}
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
- task->task_state_flags = SAS_TASK_STATE_PENDING;
qc->lldd_task = task;
task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
sas_enable_revalidation(sas_ha);
}
-void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q)
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
struct domain_device *eh_dev;
ap = dev->sata_dev.ap;
ata_port_wait_eh(ap);
}
+
+int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id)
+{
+ struct sas_tmf_task tmf_task = {};
+ return sas_execute_tmf(device, fis, sizeof(struct host_to_dev_fis),
+ force_phy_id, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_execute_ata_cmd);
}
}
-int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
+void sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
{
struct sas_discovery *disc;
if (!port)
- return 0;
+ return;
disc = &port->disc;
BUG_ON(ev >= DISC_NUM_EVENTS);
sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
-
- return 0;
}
/**
#include <scsi/scsi_host.h>
#include "sas_internal.h"
-int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- /* it's added to the defer_q when draining so return succeed */
- int rc = 1;
-
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
- return 0;
+ return false;
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
- } else
- rc = queue_work(ha->event_q, &sw->work);
+ return true;
+ }
- return rc;
+ return queue_work(ha->event_q, &sw->work);
}
-static int sas_queue_event(int event, struct sas_work *work,
+static bool sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
unsigned long flags;
- int rc;
+ bool rc;
spin_lock_irqsave(&ha->lock, flags);
rc = sas_queue_work(ha, work);
void sas_queue_deferred_work(struct sas_ha_struct *ha)
{
struct sas_work *sw, *_sw;
- int ret;
spin_lock_irq(&ha->lock);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
- ret = sas_queue_work(ha, sw);
- if (ret != 1) {
+
+ if (!sas_queue_work(ha, sw)) {
pm_runtime_put(ha->dev);
sas_free_event(to_asd_sas_event(&sw->work));
}
return deferred;
}
-int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
- gfp_t gfp_flags)
+void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
+ gfp_t gfp_flags)
{
struct sas_ha_struct *ha = phy->ha;
struct asd_sas_event *ev;
- int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
- return -ENOMEM;
+ return;
/* Call pm_runtime_put() with pairs in sas_port_event_worker() */
pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
if (sas_defer_event(phy, ev))
- return 0;
+ return;
- ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1) {
+ if (!sas_queue_event(event, &ev->work, ha)) {
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(sas_notify_port_event);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
- gfp_t gfp_flags)
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t gfp_flags)
{
struct sas_ha_struct *ha = phy->ha;
struct asd_sas_event *ev;
- int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
- return -ENOMEM;
+ return;
/* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
if (sas_defer_event(phy, ev))
- return 0;
+ return;
- ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1) {
+ if (!sas_queue_event(event, &ev->work, ha)) {
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(sas_notify_phy_event);
/* ---------- SMP task management ---------- */
-static void smp_task_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-}
-
-static void smp_task_done(struct sas_task *task)
-{
- del_timer(&task->slow_task->timer);
- complete(&task->slow_task->completion);
-}
-
/* Give it some long enough timeout. In seconds. */
#define SMP_TIMEOUT 10
task->smp_task.smp_req = *req;
task->smp_task.smp_resp = *resp;
- task->task_done = smp_task_done;
+ task->task_done = sas_task_internal_done;
- task->slow_task->timer.function = smp_task_timedout;
+ task->slow_task->timer.function = sas_task_internal_timedout;
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
-int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
+bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
- gfp_t flags);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
+void sas_task_internal_done(struct sas_task *task);
+void sas_task_internal_timedout(struct timer_list *t);
+int sas_execute_tmf(struct domain_device *device, void *parameter,
+ int para_len, int force_phy_id,
+ struct sas_tmf_task *tmf);
+
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
#else
sas_discover_event(port, DISCE_RESUME);
}
+static void sas_form_port_add_phy(struct asd_sas_port *port,
+ struct asd_sas_phy *phy, bool wideport)
+{
+ list_add_tail(&phy->port_phy_el, &port->phy_list);
+ sas_phy_set_target(phy, port->port_dev);
+ phy->port = port;
+ port->num_phys++;
+ port->phy_mask |= (1U << phy->id);
+
+ if (wideport)
+ pr_debug("phy%d matched wide port%d\n", phy->id,
+ port->id);
+ else
+ memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
+
+ if (*(u64 *)port->attached_sas_addr == 0) {
+ port->class = phy->class;
+ memcpy(port->attached_sas_addr, phy->attached_sas_addr,
+ SAS_ADDR_SIZE);
+ port->iproto = phy->iproto;
+ port->tproto = phy->tproto;
+ port->oob_mode = phy->oob_mode;
+ port->linkrate = phy->linkrate;
+ } else {
+ port->linkrate = max(port->linkrate, phy->linkrate);
+ }
+}
+
/**
* sas_form_port - add this phy to a port
* @phy: the phy of interest
int i;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
- struct domain_device *port_dev;
+ struct domain_device *port_dev = NULL;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
unsigned long flags;
if (*(u64 *) port->sas_addr &&
phy_is_wideport_member(port, phy) && port->num_phys > 0) {
/* wide port */
- pr_debug("phy%d matched wide port%d\n", phy->id,
- port->id);
+ port_dev = port->port_dev;
+ sas_form_port_add_phy(port, phy, true);
+ spin_unlock(&port->phy_list_lock);
break;
}
spin_unlock(&port->phy_list_lock);
port = sas_ha->sas_port[i];
spin_lock(&port->phy_list_lock);
if (*(u64 *)port->sas_addr == 0
- && port->num_phys == 0) {
- memcpy(port->sas_addr, phy->sas_addr,
- SAS_ADDR_SIZE);
+ && port->num_phys == 0) {
+ port_dev = port->port_dev;
+ sas_form_port_add_phy(port, phy, false);
+ spin_unlock(&port->phy_list_lock);
break;
}
spin_unlock(&port->phy_list_lock);
}
- }
- if (i >= sas_ha->num_phys) {
- pr_err("%s: couldn't find a free port, bug?\n", __func__);
- spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
- return;
+ if (i >= sas_ha->num_phys) {
+ pr_err("%s: couldn't find a free port, bug?\n",
+ __func__);
+ spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
+ return;
+ }
}
-
- /* add the phy to the port */
- port_dev = port->port_dev;
- list_add_tail(&phy->port_phy_el, &port->phy_list);
- sas_phy_set_target(phy, port_dev);
- phy->port = port;
- port->num_phys++;
- port->phy_mask |= (1U << phy->id);
-
- if (*(u64 *)port->attached_sas_addr == 0) {
- port->class = phy->class;
- memcpy(port->attached_sas_addr, phy->attached_sas_addr,
- SAS_ADDR_SIZE);
- port->iproto = phy->iproto;
- port->tproto = phy->tproto;
- port->oob_mode = phy->oob_mode;
- port->linkrate = phy->linkrate;
- } else
- port->linkrate = max(port->linkrate, phy->linkrate);
- spin_unlock(&port->phy_list_lock);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) {
case SAS_DEVICE_UNKNOWN:
hs = DID_BAD_TARGET;
break;
- case SAS_SG_ERR:
- hs = DID_PARITY;
- break;
case SAS_OPEN_REJECT:
if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
hs = DID_SOFT_ERROR; /* retry */
pr_notice("%s: task 0x%p failed to abort\n",
__func__, task);
return TASK_ABORT_FAILED;
+ default:
+ pr_notice("%s: task 0x%p result code %d not handled\n",
+ __func__, task, res);
}
-
}
}
- return res;
+ return TASK_ABORT_FAILED;
}
static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
- sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
+ sas_ata_eh(shost, &eh_work_q);
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
}
EXPORT_SYMBOL_GPL(sas_bios_param);
+void sas_task_internal_done(struct sas_task *task)
+{
+ del_timer(&task->slow_task->timer);
+ complete(&task->slow_task->completion);
+}
+
+void sas_task_internal_timedout(struct timer_list *t)
+{
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
+ bool is_completed = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ is_completed = false;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (!is_completed)
+ complete(&task->slow_task->completion);
+}
+
+#define TASK_TIMEOUT (20 * HZ)
+#define TASK_RETRY 3
+
+static int sas_execute_internal_abort(struct domain_device *device,
+ enum sas_internal_abort type, u16 tag,
+ unsigned int qid, void *data)
+{
+ struct sas_ha_struct *ha = device->port->ha;
+ struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
+ struct sas_task *task = NULL;
+ int res, retry;
+
+ for (retry = 0; retry < TASK_RETRY; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = device;
+ task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
+ task->task_done = sas_task_internal_done;
+ task->slow_task->timer.function = sas_task_internal_timedout;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
+ add_timer(&task->slow_task->timer);
+
+ task->abort_task.tag = tag;
+ task->abort_task.type = type;
+ task->abort_task.qid = qid;
+
+ res = i->dft->lldd_execute_task(task, GFP_KERNEL);
+ if (res) {
+ del_timer_sync(&task->slow_task->timer);
+ pr_err("Executing internal abort failed %016llx (%d)\n",
+ SAS_ADDR(device->sas_addr), res);
+ break;
+ }
+
+ wait_for_completion(&task->slow_task->completion);
+ res = TMF_RESP_FUNC_FAILED;
+
+ /* Even if the internal abort timed out, return direct. */
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ bool quit = true;
+
+ if (i->dft->lldd_abort_timeout)
+ quit = i->dft->lldd_abort_timeout(task, data);
+ else
+ pr_err("Internal abort: timeout %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EIO;
+ if (quit)
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+ res = TMF_RESP_FUNC_SUCC;
+ break;
+ }
+
+ pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
+ SAS_ADDR(device->sas_addr), task->task_status.resp,
+ task->task_status.stat);
+ sas_free_task(task);
+ task = NULL;
+ }
+ BUG_ON(retry == TASK_RETRY && task != NULL);
+ sas_free_task(task);
+ return res;
+}
+
+int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
+ unsigned int qid, void *data)
+{
+ return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
+ tag, qid, data);
+}
+EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
+
+int sas_execute_internal_abort_dev(struct domain_device *device,
+ unsigned int qid, void *data)
+{
+ return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
+ SCSI_NO_TAG, qid, data);
+}
+EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
+
+int sas_execute_tmf(struct domain_device *device, void *parameter,
+ int para_len, int force_phy_id,
+ struct sas_tmf_task *tmf)
+{
+ struct sas_task *task;
+ struct sas_internal *i =
+ to_sas_internal(device->port->ha->core.shost->transportt);
+ int res, retry;
+
+ for (retry = 0; retry < TASK_RETRY; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = device;
+ task->task_proto = device->tproto;
+
+ if (dev_is_sata(device)) {
+ task->ata_task.device_control_reg_update = 1;
+ if (force_phy_id >= 0) {
+ task->ata_task.force_phy = true;
+ task->ata_task.force_phy_id = force_phy_id;
+ }
+ memcpy(&task->ata_task.fis, parameter, para_len);
+ } else {
+ memcpy(&task->ssp_task, parameter, para_len);
+ }
+
+ task->task_done = sas_task_internal_done;
+ task->tmf = tmf;
+
+ task->slow_task->timer.function = sas_task_internal_timedout;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
+ add_timer(&task->slow_task->timer);
+
+ res = i->dft->lldd_execute_task(task, GFP_KERNEL);
+ if (res) {
+ del_timer_sync(&task->slow_task->timer);
+ pr_err("executing TMF task failed %016llx (%d)\n",
+ SAS_ADDR(device->sas_addr), res);
+ break;
+ }
+
+ wait_for_completion(&task->slow_task->completion);
+
+ if (i->dft->lldd_tmf_exec_complete)
+ i->dft->lldd_tmf_exec_complete(device);
+
+ res = TMF_RESP_FUNC_FAILED;
+
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ pr_err("TMF task timeout for %016llx and not done\n",
+ SAS_ADDR(device->sas_addr));
+ if (i->dft->lldd_tmf_aborted)
+ i->dft->lldd_tmf_aborted(task);
+ break;
+ }
+ pr_warn("TMF task timeout for %016llx and done\n",
+ SAS_ADDR(device->sas_addr));
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+ res = TMF_RESP_FUNC_SUCC;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun
+ */
+ pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
+ SAS_ADDR(device->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ res = task->task_status.residual;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ pr_warn("TMF task blocked task error %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EMSGSIZE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_OPEN_REJECT) {
+ pr_warn("TMF task open reject failed %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EIO;
+ } else {
+ pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
+ SAS_ADDR(device->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ }
+ sas_free_task(task);
+ task = NULL;
+ }
+
+ if (retry == TASK_RETRY)
+ pr_warn("executing TMF for %016llx failed after %d attempts!\n",
+ SAS_ADDR(device->sas_addr), TASK_RETRY);
+ sas_free_task(task);
+
+ return res;
+}
+
+static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
+ struct sas_tmf_task *tmf)
+{
+ struct sas_ssp_task ssp_task;
+
+ if (!(device->tproto & SAS_PROTOCOL_SSP))
+ return TMF_RESP_FUNC_ESUPP;
+
+ memcpy(ssp_task.LUN, lun, 8);
+
+ return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
+}
+
+int sas_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_ABORT_TASK_SET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_abort_task_set);
+
+int sas_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_CLEAR_TASK_SET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_clear_task_set);
+
+int sas_lu_reset(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_LU_RESET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_lu_reset);
+
+int sas_query_task(struct sas_task *task, u16 tag)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_QUERY_TASK,
+ .tag_of_task_to_be_managed = tag,
+ };
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct scsi_lun lun;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+
+ return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_query_task);
+
+int sas_abort_task(struct sas_task *task, u16 tag)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_ABORT_TASK,
+ .tag_of_task_to_be_managed = tag,
+ };
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct scsi_lun lun;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+
+ return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_abort_task);
+
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
* This should only ever be called by an LLDD.
tstat->resp = SAS_TASK_COMPLETE;
- if (iu->datapres == 0)
+ switch (iu->datapres) {
+ case SAS_DATAPRES_NO_DATA:
tstat->stat = iu->status;
- else if (iu->datapres == 1)
+ break;
+ case SAS_DATAPRES_RESPONSE_DATA:
tstat->stat = iu->resp_data[3];
- else if (iu->datapres == 2) {
+ break;
+ case SAS_DATAPRES_SENSE_DATA:
tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
tstat->buf_valid_size =
min_t(int, SAS_STATUS_BUF_SIZE,
if (iu->status != SAM_STAT_CHECK_CONDITION)
dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n",
SAS_ADDR(task->dev->sas_addr), iu->status);
- }
- else
+ break;
+ default:
/* when datapres contains corrupt/unknown value... */
tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
+ }
}
EXPORT_SYMBOL_GPL(sas_ssp_task_response);
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
(struct lpfc_vport *vport,
struct lpfc_io_buf *lpfc_cmd,
uint8_t tmo);
+ int (*lpfc_scsi_prep_task_mgmt_cmd)
+ (struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
- IOCB_t * (*lpfc_get_iocb_from_iocbq)
- (struct lpfc_iocbq *);
void (*lpfc_scsi_cmd_iocb_cmpl)
(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
int (*lpfc_bg_scsi_prep_dma_buf)
(struct lpfc_hba *, struct lpfc_io_buf *);
- /* Add new entries here */
+
+ /* Prep SLI WQE/IOCB jump table entries */
+ void (*__lpfc_sli_prep_els_req_rsp)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp,
+ u16 cmd_size, u32 did, u32 elscmd,
+ u8 tmo, u8 expect_rsp);
+ void (*__lpfc_sli_prep_gen_req)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi,
+ u32 num_entry, u8 tmo);
+ void (*__lpfc_sli_prep_xmit_seq64)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi,
+ u16 ox_id, u32 num_entry, u8 rctl,
+ u8 last_seq, u8 cr_cx_cmd);
+ void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag,
+ u8 ulp_class, u16 cqid, bool ia);
/* expedite pool */
struct lpfc_epd_pool epd_pool;
uint32_t cfg_nvme_seg_cnt;
uint32_t cfg_scsi_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
- uint64_t cfg_soft_wwnn;
- uint64_t cfg_soft_wwpn;
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
#define VPD_PORT 0x8 /* valid vpd port data */
#define VPD_MASK 0xf /* mask for any vpd data */
- uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
struct timer_list eratt_poll;
{
return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging;
}
+
+static inline
+u8 get_job_ulpstatus(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(lpfc_wcqe_c_status, &iocbq->wcqe_cmpl);
+ else
+ return iocbq->iocb.ulpStatus;
+}
+
+static inline
+u32 get_job_word4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wcqe_cmpl.parameter;
+ else
+ return iocbq->iocb.un.ulpWord[4];
+}
+
+static inline
+u8 get_job_cmnd(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_cmnd, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.ulpCommand;
+}
+
+static inline
+u16 get_job_ulpcontext(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_ctxt_tag, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.ulpContext;
+}
+
+static inline
+u16 get_job_rcvoxid(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_rcvoxid, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.unsli3.rcvsli3.ox_id;
+}
+
+static inline
+u32 get_job_data_placed(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wcqe_cmpl.total_data_placed;
+ else
+ return iocbq->iocb.un.genreq64.bdl.bdeSize;
+}
+
+static inline
+u32 get_job_abtsiotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wqe.abort_cmd.wqe_com.abort_tag;
+ else
+ return iocbq->iocb.un.acxri.abortIoTag;
+}
+
+static inline
+u32 get_job_els_rsp64_did(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_els_did, &iocbq->wqe.els_req.wqe_dest);
+ else
+ return iocbq->iocb.un.elsreq64.remoteID;
+}
NULL);
static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
-static char *lpfc_soft_wwn_key = "C99G71SL8032A";
#define WWN_SZ 8
/**
* lpfc_wwn_set - Convert string to the 8 byte WWN value.
}
return 0;
}
-/**
- * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: containing the string lpfc_soft_wwn_key.
- * @count: must be size of lpfc_soft_wwn_key.
- *
- * Returns:
- * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
- * length of buf indicates success
- **/
-static ssize_t
-lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- unsigned int cnt = count;
- uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
- u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
-
- /*
- * We're doing a simple sanity check for soft_wwpn setting.
- * We require that the user write a specific key to enable
- * the soft_wwpn attribute to be settable. Once the attribute
- * is written, the enable key resets. If further updates are
- * desired, the key must be written again to re-enable the
- * attribute.
- *
- * The "key" is not secret - it is a hardcoded string shown
- * here. The intent is to protect against the random user or
- * application that is just writing attributes.
- */
- if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0051 lpfc soft wwpn can not be enabled: "
- "fawwpn is enabled\n");
- return -EINVAL;
- }
-
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if ((cnt != strlen(lpfc_soft_wwn_key)) ||
- (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
- return -EINVAL;
-
- phba->soft_wwn_enable = 1;
-
- dev_printk(KERN_WARNING, &phba->pcidev->dev,
- "lpfc%d: soft_wwpn assignment has been enabled.\n",
- phba->brd_no);
- dev_printk(KERN_WARNING, &phba->pcidev->dev,
- " The soft_wwpn feature is not supported by Broadcom.");
-
- return count;
-}
-static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
-
-/**
- * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: on return contains the wwpn in hexadecimal.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long)phba->cfg_soft_wwpn);
-}
-
-/**
- * lpfc_soft_wwpn_store - Set the ww port name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: contains the wwpn in hexadecimal.
- * @count: number of wwpn bytes in buf
- *
- * Returns:
- * -EACCES hba reset not enabled, adapter over temp
- * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
- * -EIO error taking adapter offline or online
- * value of count on success
- **/
-static ssize_t
-lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- struct completion online_compl;
- int stat1 = 0, stat2 = 0;
- unsigned int cnt = count;
- u8 wwpn[WWN_SZ];
- int rc;
-
- if (!phba->cfg_enable_hba_reset)
- return -EACCES;
- spin_lock_irq(&phba->hbalock);
- if (phba->over_temp_state == HBA_OVER_TEMP) {
- spin_unlock_irq(&phba->hbalock);
- return -EACCES;
- }
- spin_unlock_irq(&phba->hbalock);
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if (!phba->soft_wwn_enable)
- return -EINVAL;
-
- /* lock setting wwpn, wwnn down */
- phba->soft_wwn_enable = 0;
-
- rc = lpfc_wwn_set(buf, cnt, wwpn);
- if (rc) {
- /* not able to set wwpn, unlock it */
- phba->soft_wwn_enable = 1;
- return rc;
- }
-
- phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
- fc_host_port_name(shost) = phba->cfg_soft_wwpn;
- if (phba->cfg_soft_wwnn)
- fc_host_node_name(shost) = phba->cfg_soft_wwnn;
-
- dev_printk(KERN_NOTICE, &phba->pcidev->dev,
- "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
-
- stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- if (stat1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0463 lpfc_soft_wwpn attribute set failed to "
- "reinit adapter - %d\n", stat1);
- init_completion(&online_compl);
- rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
- LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
- wait_for_completion(&online_compl);
- if (stat2)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0464 lpfc_soft_wwpn attribute set failed to "
- "reinit adapter - %d\n", stat2);
- return (stat1 || stat2) ? -EIO : count;
-}
-static DEVICE_ATTR_RW(lpfc_soft_wwpn);
-
-/**
- * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: on return contains the wwnn in hexadecimal.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long)phba->cfg_soft_wwnn);
-}
-
-/**
- * lpfc_soft_wwnn_store - sets the ww node name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: contains the ww node name in hexadecimal.
- * @count: number of wwnn bytes in buf.
- *
- * Returns:
- * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
- * value of count on success
- **/
-static ssize_t
-lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- unsigned int cnt = count;
- u8 wwnn[WWN_SZ];
- int rc;
-
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if (!phba->soft_wwn_enable)
- return -EINVAL;
-
- rc = lpfc_wwn_set(buf, cnt, wwnn);
- if (rc) {
- /* Allow wwnn to be set many times, as long as the enable
- * is set. However, once the wwpn is set, everything locks.
- */
- return rc;
- }
-
- phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
-
- dev_printk(KERN_NOTICE, &phba->pcidev->dev,
- "lpfc%d: soft_wwnn set. Value will take effect upon "
- "setting of the soft_wwpn\n", phba->brd_no);
-
- return count;
-}
-static DEVICE_ATTR_RW(lpfc_soft_wwnn);
/**
* lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
&dev_attr_lpfc_nvme_enable_fb.attr,
&dev_attr_lpfc_nvmet_fb_size.attr,
&dev_attr_lpfc_enable_bg.attr,
- &dev_attr_lpfc_soft_wwnn.attr,
- &dev_attr_lpfc_soft_wwpn.attr,
- &dev_attr_lpfc_soft_wwn_enable.attr,
&dev_attr_lpfc_enable_hba_reset.attr,
&dev_attr_lpfc_enable_hba_heartbeat.attr,
&dev_attr_lpfc_EnableXLane.attr,
phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_irq_chann = phba->cfg_hdw_queue;
- phba->cfg_soft_wwnn = 0L;
- phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_aer_support_init(phba, lpfc_aer_support);
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
struct lpfc_bsg_iocb *iocb;
unsigned long flags;
- unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
dd_data = cmdiocbq->context1;
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb;
rmp = iocb->rmp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
/* Copy the completed data or set the error status */
if (job) {
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
rc = -EACCES;
}
} else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
+ total_data_placed, 0);
}
}
lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
- uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
- IOCB_t *cmd;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
+ int request_nseg, reply_nseg;
+ u32 num_entry;
struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val;
int rc = 0;
int iocb_stat;
+ u16 ulp_context;
/* in case no data is transferred */
bsg_reply->reply_payload_rcv_len = 0;
goto free_dd;
}
- cmd = &cmdiocbq->iocb;
-
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
goto free_cmp;
}
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = ndlp->nlp_rpi;
+ num_entry = request_nseg + reply_nseg;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- cmd->ulpOwner = OWN_CHIP;
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
+
+ lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
+ phba->fc_ratov * 2);
+
+ cmdiocbq->num_bdes = num_entry;
cmdiocbq->vport = phba->pport;
+ cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- timeout = phba->fc_ratov * 2;
- cmd->ulpTimeout = timeout;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
if (iocb_stat == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed yet */
- if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
struct fc_bsg_ctels_reply *els_reply;
unsigned long flags;
unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
dd_data = cmdiocbq->context1;
ndlp = dd_data->context_un.iocb.ndlp;
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
- rsp = &rspiocbq->iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
*/
if (job) {
- if (rsp->ulpStatus == IOSTAT_SUCCESS) {
- rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+ if (ulp_status == IOSTAT_SUCCESS) {
+ rsp_size = total_data_placed;
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
- } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+ } else if (ulp_status == IOSTAT_LS_RJT) {
bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
- rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+ rjt_data = (uint8_t *)&ulp_word4;
els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
els_reply->rjt_data.reason_explanation = rjt_data[1];
els_reply->rjt_data.vendor_unique = rjt_data[0];
+ } else if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SEQUENCE_TIMEOUT) {
+ rc = -ETIMEDOUT;
} else {
rc = -EIO;
}
* we won't be dma into memory that is no longer allocated to for the
* request.
*/
-
cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
if (!cmdiocbq) {
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
- cmdsize);
+ job->request_payload.payload_len);
rpi = ndlp->nlp_rpi;
if (phba->sli_rev == LPFC_SLI_REV4)
- cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+ bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
+ phba->sli4_hba.rpi_ids[rpi]);
else
cmdiocbq->iocb.ulpContext = rpi;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->context1 = dd_data;
cmdiocbq->context_un.ndlp = ndlp;
- cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
- if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
struct lpfc_iocbq *iocbq;
+ IOCB_t *iocb = NULL;
size_t offset = 0;
struct list_head head;
struct ulp_bde64 *bde;
int i;
struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
- struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
struct bsg_job *job = NULL;
struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
+ u32 bde_count = 0;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* take accumulated byte count from the last iocbq */
iocbq = list_entry(head.prev, typeof(*iocbq), list);
- evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
+ else
+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
} else {
list_for_each_entry(iocbq, &head, list) {
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+ iocb = &iocbq->iocb;
+ for (i = 0; i < iocb->ulpBdeCount;
+ i++)
evt_dat->len +=
- iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+ iocb->un.cont64[i].tus.f.bdeSize;
}
}
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
bdeBuf1 = iocbq->context2;
bdeBuf2 = iocbq->context3;
+
}
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocbq->wcqe_cmpl.word3;
+ else
+ bde_count = iocbq->iocb.ulpBdeCount;
+ for (i = 0; i < bde_count; i++) {
if (phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED) {
if (i == 0) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.un.ulpWord[0];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
dmabuf = bdeBuf1;
} else if (i == 1) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.unsli3.
- sli3Words[4];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->unsol_rcv_len;
dmabuf = bdeBuf2;
}
if ((offset + size) > evt_dat->len)
lpfc_in_buf_free(phba,
dmabuf);
} else {
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
}
break;
default:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
break;
}
}
phba->ct_ctx[
evt_dat->immed_dat].SID);
phba->ct_ctx[evt_dat->immed_dat].rxid =
- piocbq->iocb.ulpContext;
+ get_job_ulpcontext(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].oxid =
- piocbq->iocb.unsli3.rcvsli3.ox_id;
+ get_job_rcvoxid(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].SID =
- piocbq->iocb.un.rcvels.remoteID;
+ bf_get(wqe_els_did,
+ &piocbq->wqe.xmit_els_rsp.wqe_dest);
phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
- evt_dat->immed_dat = piocbq->iocb.ulpContext;
+ evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
evt_dat->type = FC_REG_CT_EVENT;
list_add(&evt_dat->node, &evt->events_to_see);
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
unsigned long flags;
int rc = 0;
+ u32 ulp_status, ulp_word4;
dd_data = cmdiocbq->context1;
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
ndlp = dd_data->context_un.iocb.ndlp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
/* Copy the completed job data or set the error status */
if (job) {
bsg_reply = job->reply;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
- IOCB_t *icmd;
struct lpfc_iocbq *ctiocb = NULL;
int rc = 0;
struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val;
+ u16 ulp_context, iotag;
ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
if (!ndlp) {
goto no_ctiocb;
}
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
- icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- /* Fill in rest of iocb */
- icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
- icmd->ulpContext = phba->ct_ctx[tag].rxid;
- icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
- ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
- if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "2721 ndlp null for oxid %x SID %x\n",
- icmd->ulpContext,
- phba->ct_ctx[tag].SID);
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
- /* get a refernece count so the ndlp doesn't go away while
- * we respond
- */
- if (!lpfc_nlp_get(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
- icmd->un.ulpWord[3] =
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ phba->ct_ctx[tag].oxid, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].valid = UNSOL_INVALID;
- } else
- icmd->ulpContext = (ushort) tag;
+ iotag = get_wqe_reqtag(ctiocb);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ ctiocb->num_bdes = num_entry;
+ iotag = ctiocb->iocb.ulpIoTag;
+ }
- icmd->ulpTimeout = phba->fc_ratov * 2;
+ ulp_context = get_job_ulpcontext(phba, ctiocb);
/* Xmit CT response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
- icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+ ulp_context, iotag, tag, phba->link_state);
- ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+ ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
ctiocb->context1 = dd_data;
ctiocb->context2 = cmp;
ctiocb->context3 = bmp;
ctiocb->context_un.ndlp = ndlp;
- ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+ ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
- if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+ if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
{
struct lpfc_bsg_event *evt;
struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
struct lpfc_dmabuf *dmabuf;
struct ulp_bde64 *bpl = NULL;
struct lpfc_sli_ct_request *ctreq = NULL;
int time_left;
int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
+ u32 status;
*txxri = 0;
*rxxri = 0;
goto err_get_xri_exit;
}
- cmd = &cmdiocbq->iocb;
- rsp = &rspiocbq->iocb;
-
memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
ctreq->CommandResponse.bits.Size = 0;
-
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
-
- cmd->un.xseq64.w5.hcsw.Fctl = LA;
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = rpi;
-
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->context3 = dmabuf;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
- cmdiocbq->iocb_cmpl = NULL;
+ cmdiocbq->cmd_cmpl = NULL;
+
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
+ FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
- rspiocbq,
- (phba->fc_ratov * 2)
- + LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
+ rspiocbq, (phba->fc_ratov * 2)
+ + LPFC_DRVR_TIMEOUT);
+
+ status = get_job_ulpstatus(phba, rspiocbq);
+ if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
ret_val = -EIO;
goto err_get_xri_exit;
}
- *txxri = rsp->ulpContext;
+ *txxri = get_job_ulpcontext(phba, rspiocbq);
evt->waiting = 1;
evt->wait_time_stamp = jiffies;
}
/**
- * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
* @phba: Pointer to HBA context object
* @rxxri: Receive exchange id
* @len: Number of data bytes
* This function allocates and posts a data buffer of sufficient size to receive
* an unsolicted CT command.
**/
-static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
- size_t len)
+static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+ size_t len)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
/* Queue buffers for the receive exchange */
num_bde = (uint32_t)rxbuffer->flag;
dmp = &rxbuffer->dma;
-
cmd = &cmdiocbq->iocb;
i = 0;
ret_val = -EIO;
goto err_post_rxbufs_exit;
}
-
cmd = &cmdiocbq->iocb;
i = 0;
}
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
- IOCB_t *cmd, *rsp = NULL;
+ union lpfc_wqe128 *cmdwqe, *rspwqe;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
goto loopback_test_exit;
}
- rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
if (rc) {
lpfcdiag_loop_self_unreg(phba, rpi);
goto loopback_test_exit;
goto err_loopback_test_exit;
}
- cmd = &cmdiocbq->iocb;
- if (phba->sli_rev < LPFC_SLI_REV4)
- rsp = &rspiocbq->iocb;
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(union lpfc_wqe));
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rspwqe = &rspiocbq->wqe;
+ memset(rspwqe, 0, sizeof(union lpfc_wqe));
+ }
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
/* Build the XMIT_SEQUENCE iocb */
num_bde = (uint32_t)txbuffer->flag;
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
-
- cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
+ cmdiocbq->num_bdes = num_bde;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->cmd_cmpl = NULL;
+ cmdiocbq->context3 = txbmp;
if (phba->sli_rev < LPFC_SLI_REV4) {
- cmd->ulpContext = txxri;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
+ num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+
} else {
- cmd->un.xseq64.bdl.ulpIoTag32 = 0;
- cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
- cmdiocbq->context3 = txbmp;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
+ phba->sli4_hba.rpi_ids[rpi], 0xffff,
+ full_size, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
cmdiocbq->sli4_xritag = NO_XRI;
- cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
- cmdiocbq->vport = phba->pport;
- cmdiocbq->iocb_cmpl = NULL;
+
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
-
- if ((iocb_stat != IOCB_SUCCESS) ||
- ((phba->sli_rev < LPFC_SLI_REV4) &&
- (rsp->ulpStatus != IOSTAT_SUCCESS))) {
+ if (iocb_stat != IOCB_SUCCESS ||
+ (phba->sli_rev < LPFC_SLI_REV4 &&
+ (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat);
cmd->ulpClass = CLASS3;
cmd->ulpOwner = OWN_CHIP;
cmd->ulpLe = 1; /* Limited Edition */
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
/* We want the firmware to timeout before we do */
cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O abort window is still open */
- if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+ if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return -EAGAIN;
}
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
void lpfc_disc_start(struct lpfc_vport *);
void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_cleanup(struct lpfc_vport *);
+void lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd);
void lpfc_disc_timeout(struct timer_list *);
int lpfc_unregister_fcf_prep(struct lpfc_hba *);
struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
uint8_t, struct lpfc_nodelist *,
uint32_t, uint32_t);
+void lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job);
void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
-int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
+int lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
struct lpfc_iocbq *pwqe);
int lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb, void *cmpl);
+void lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp);
+void lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry,
+ u8 tmo);
+void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq,
+ u8 cr_cx_cmd);
+void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
+ bool ia);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0146 Ignoring unsolicited CT No HBQ "
"status = x%x\n",
- piocbq->iocb.ulpStatus);
+ get_job_ulpstatus(phba, piocbq));
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0145 Ignoring unsolicted CT HBQ Size:%d "
"status = x%x\n",
- size, piocbq->iocb.ulpStatus);
+ size, get_job_ulpstatus(phba, piocbq));
}
static void
* lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
* @ndlp: pointer to a node-list data structure.
* @ct_req: pointer to the CT request data structure.
- * @rx_id: rx_id of the received UNSOL CT command
+ * @ulp_context: context of received UNSOL CT command
* @ox_id: ox_id of the UNSOL CT command
*
* This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
static void
lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
struct lpfc_sli_ct_request *ct_req,
- u16 rx_id, u16 ox_id)
+ u16 ulp_context, u16 ox_id)
{
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *bmp = NULL;
struct lpfc_dmabuf *mp = NULL;
struct ulp_bde64 *bpl;
- IOCB_t *icmd;
u8 rc = 0;
+ u32 tmo;
/* fill in BDEs for command */
mp = kmalloc(sizeof(*mp), GFP_KERNEL);
goto ct_free_bmpvirt;
}
- icmd = &cmdiocbq->iocb;
- icmd->un.genreq64.bdl.ulpIoTag32 = 0;
- icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.genreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
- icmd->un.genreq64.w5.hcsw.Fctl = (LS | LA);
- icmd->un.genreq64.w5.hcsw.Dfctl = 0;
- icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
- icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
- icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ ox_id, 1, FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ }
/* Save for completion so we can release these resources */
- cmdiocbq->context1 = lpfc_nlp_get(ndlp);
cmdiocbq->context2 = (uint8_t *)mp;
cmdiocbq->context3 = (uint8_t *)bmp;
- cmdiocbq->iocb_cmpl = lpfc_ct_unsol_cmpl;
- icmd->ulpContext = rx_id; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = ox_id;
- icmd->un.ulpWord[3] =
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- icmd->ulpTimeout = (3 * phba->fc_ratov);
+ cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
+ tmo = (3 * phba->fc_ratov);
cmdiocbq->retry = 0;
cmdiocbq->vport = vport;
cmdiocbq->context_un.ndlp = NULL;
- cmdiocbq->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
+
+ cmdiocbq->context1 = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->context1)
+ goto ct_no_ndlp;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (!rc)
- return;
+ if (rc) {
+ lpfc_nlp_put(ndlp);
+ goto ct_no_ndlp;
+ }
+ return;
+ct_no_ndlp:
rc = 6;
- lpfc_nlp_put(ndlp);
lpfc_sli_release_iocbq(phba, cmdiocbq);
ct_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
{
struct lpfc_sli_ct_request *ct_req;
struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_vport *vport = NULL;
- IOCB_t *icmd = &ctiocbq->iocb;
- u32 mi_cmd, vpi;
- u32 did = 0;
-
- vpi = ctiocbq->iocb.unsli3.rcvsli3.vpi;
- vport = lpfc_find_vport_by_vpid(phba, vpi);
- if (!vport) {
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "6437 Unsol CT: VPORT NULL vpi : x%x\n",
- vpi);
- return;
- }
-
- did = ctiocbq->iocb.un.rcvels.remoteID;
- if (icmd->ulpStatus) {
+ struct lpfc_vport *vport = ctiocbq->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, ctiocbq);
+ u32 ulp_word4 = get_job_word4(phba, ctiocbq);
+ u32 did;
+ u32 mi_cmd;
+
+ did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp);
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6438 Unsol CT: status:x%x/x%x did : x%x\n",
- icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
return;
}
ct_req = ((struct lpfc_sli_ct_request *)
(((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
-
mi_cmd = ct_req->CommandResponse.bits.CmdRsp;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
lpfc_ct_reject_event(ndlp, ct_req,
- ctiocbq->iocb.ulpContext,
- ctiocbq->iocb.unsli3.rcvsli3.ox_id);
+ bf_get(wqe_ctxt_tag,
+ &ctiocbq->wqe.xmit_els_rsp.wqe_com),
+ bf_get(wqe_rcvoxid,
+ &ctiocbq->wqe.xmit_els_rsp.wqe_com));
}
/**
IOCB_t *icmd = &ctiocbq->iocb;
int i;
struct lpfc_iocbq *iocbq;
+ struct lpfc_iocbq *iocb;
dma_addr_t dma_addr;
uint32_t size;
struct list_head head;
struct lpfc_sli_ct_request *ct_req;
struct lpfc_dmabuf *bdeBuf1 = ctiocbq->context2;
struct lpfc_dmabuf *bdeBuf2 = ctiocbq->context3;
+ u32 status, parameter, bde_count = 0;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
ctiocbq->context1 = NULL;
ctiocbq->context2 = NULL;
ctiocbq->context3 = NULL;
- if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+ wcqe_cmpl = &ctiocbq->wcqe_cmpl;
+ status = get_job_ulpstatus(phba, ctiocbq);
+ parameter = get_job_word4(phba, ctiocbq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = wcqe_cmpl->word3;
+ else
+ bde_count = icmd->ulpBdeCount;
+
+ if (unlikely(status == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ } else if ((status == IOSTAT_LOCAL_REJECT) &&
+ ((parameter & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 2);
+ lpfc_sli3_post_buffer(phba, pring, 2);
return;
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
- if (icmd->ulpBdeCount == 0)
+ if (bde_count == 0)
return;
- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- ctiocbq->context2 = bdeBuf1;
- if (icmd->ulpBdeCount == 2)
- ctiocbq->context3 = bdeBuf2;
- } else {
- dma_addr = getPaddr(icmd->un.cont64[0].addrHigh,
- icmd->un.cont64[0].addrLow);
- ctiocbq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
- dma_addr);
- if (icmd->ulpBdeCount == 2) {
- dma_addr = getPaddr(icmd->un.cont64[1].addrHigh,
- icmd->un.cont64[1].addrLow);
- ctiocbq->context3 = lpfc_sli_ringpostbuf_get(phba,
- pring,
- dma_addr);
- }
- }
+ ctiocbq->context2 = bdeBuf1;
+ if (bde_count == 2)
+ ctiocbq->context3 = bdeBuf2;
ct_req = ((struct lpfc_sli_ct_request *)
(((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
INIT_LIST_HEAD(&head);
list_add_tail(&head, &ctiocbq->list);
- list_for_each_entry(iocbq, &head, list) {
- icmd = &iocbq->iocb;
- if (icmd->ulpBdeCount == 0)
+ list_for_each_entry(iocb, &head, list) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocb->wcqe_cmpl.word3;
+ else
+ bde_count = iocb->iocb.ulpBdeCount;
+
+ if (!bde_count)
continue;
- bdeBuf1 = iocbq->context2;
- iocbq->context2 = NULL;
- size = icmd->un.cont64[0].tus.f.bdeSize;
+ bdeBuf1 = iocb->context2;
+ iocb->context2 = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ size = iocb->wqe.gen_req.bde.tus.f.bdeSize;
+ else
+ size = iocb->iocb.un.cont64[0].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
lpfc_in_buf_free(phba, bdeBuf1);
- if (icmd->ulpBdeCount == 2) {
- bdeBuf2 = iocbq->context3;
- iocbq->context3 = NULL;
- size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
+ if (bde_count == 2) {
+ bdeBuf2 = iocb->context3;
+ iocb->context3 = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ size = iocb->unsol_rcv_len;
+ else
+ size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2,
size);
lpfc_in_buf_free(phba, bdeBuf2);
lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
}
- lpfc_post_buffer(phba, pring, i);
+ lpfc_sli3_post_buffer(phba, pring, i);
}
list_del(&head);
}
static int
lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
- void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *),
+ void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
int rc;
+ u16 ulp_context;
/* Allocate buffer for command iocb */
geniocb = lpfc_sli_get_iocbq(phba);
if (geniocb == NULL)
return 1;
- icmd = &geniocb->iocb;
- icmd->un.genreq64.bdl.ulpIoTag32 = 0;
- icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ /* Update the num_entry bde count */
+ geniocb->num_bdes = num_entry;
geniocb->context3 = (uint8_t *) bmp;
geniocb->event_tag = event_tag;
- /* Fill in payload, bp points to frame payload */
- icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
-
- /* Fill in rest of iocb */
- icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- icmd->un.genreq64.w5.hcsw.Dfctl = 0;
- icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
-
if (!tmo) {
/* FC spec states we need 3 * ratov for CT requests */
tmo = (3 * phba->fc_ratov);
}
- icmd->ulpTimeout = tmo;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- /* For GEN_REQUEST64_CR, use the RPI */
- icmd->ulpCt_h = 0;
- icmd->ulpCt_l = 0;
- }
+ lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo);
/* Issue GEN REQ IOCB for NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0119 Issue GEN REQ IOCB to NPORT x%x "
"Data: x%x x%x\n",
- ndlp->nlp_DID, icmd->ulpIoTag,
+ ndlp->nlp_DID, geniocb->iotag,
vport->port_state);
- geniocb->iocb_cmpl = cmpl;
- geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ geniocb->cmd_cmpl = cmpl;
+ geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
if (!geniocb->context_un.ndlp)
goto out;
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
- geniocb->context_un.ndlp = NULL;
lpfc_nlp_put(ndlp);
goto out;
}
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
int rc, type;
/* First save ndlp, before we overwrite it */
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
+
inp = (struct lpfc_dmabuf *) cmdiocb->context1;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_FT cmpl: status:x%x/x%x rtry:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+ ulp_status, ulp_word4, vport->fc_ns_retry);
/* Ignore response if link flipped after this request was made */
if (cmdiocb->event_tag != phba->fc_eventTag) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0226 NS query failed due to link event\n");
if (vport->fc_flag & FC_RSCN_MODE)
}
spin_unlock_irq(shost->host_lock);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ if (ulp_status != IOSTAT_LOCAL_REJECT ||
+ (ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0257 GID_FT Query error: 0x%x 0x%x\n",
- irsp->ulpStatus, vport->fc_ns_retry);
+ ulp_status, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *) inp->virt;
CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
- irsp->un.genreq64.bdl.bdeSize);
+ get_job_data_placed(phba, rspiocb));
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
- (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+ get_job_data_placed(phba, rspiocb));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
int rc;
/* First save ndlp, before we overwrite it */
cmdiocb->context_un.rsp_iocb = rspiocb;
inp = (struct lpfc_dmabuf *)cmdiocb->context1;
outp = (struct lpfc_dmabuf *)cmdiocb->context2;
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_PT cmpl: status:x%x/x%x rtry:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->fc_ns_retry);
/* Ignore response if link flipped after this request was made */
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4166 NS query failed due to link event\n");
if (vport->fc_flag & FC_RSCN_MODE)
}
spin_unlock_irq(shost->host_lock);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ if (ulp_status != IOSTAT_LOCAL_REJECT ||
+ (ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4103 GID_FT Query error: 0x%x 0x%x\n",
- irsp->ulpStatus, vport->fc_ns_retry);
+ ulp_status, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *)inp->virt;
CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
- irsp->un.genreq64.bdl.bdeSize);
+ get_job_data_placed(phba, rspiocb));
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
- (uint32_t)(irsp->un.genreq64.bdl.bdeSize));
+ get_job_data_placed(phba, rspiocb));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp;
int did, rc, retry;
uint8_t fbits;
struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFF_ID cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
/* Ignore response if link flipped after this request was made */
if (cmdiocb->event_tag != phba->fc_eventTag) {
goto iocb_free;
}
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
/* Check for retry */
if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
retry = 1;
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch ((irsp->un.ulpWord[4] &
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch ((ulp_word4 &
IOERR_PARAM_MASK)) {
case IOERR_NO_RESOURCES:
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0267 NameServer GFF Rsp "
"x%x Error (%d %d) Data: x%x x%x\n",
- did, irsp->ulpStatus, irsp->un.ulpWord[4],
+ did, ulp_status, ulp_word4,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
static void
lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_nodelist *ns_ndlp = NULL;
uint32_t fc4_data_0, fc4_data_1;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
/* Ignore response if link flipped after this request was made */
if ((uint32_t) cmdiocb->event_tag != phba->fc_eventTag) {
/* Preserve the nameserver node to release the reference. */
ns_ndlp = cmdiocb->context_un.ndlp;
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
}
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+ "3065 GFT_ID failed x%08x\n", ulp_status);
out:
lpfc_ct_free_iocb(phba, cmdiocb);
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
- IOCB_t *irsp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp;
int cmdcode, rc;
uint8_t retry;
uint32_t latt;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* First save ndlp, before we overwrite it */
ndlp = cmdiocb->context_un.ndlp;
inp = (struct lpfc_dmabuf *) cmdiocb->context1;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- irsp = &rspiocb->iocb;
cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
CommandResponse.bits.CmdRsp);
latt = lpfc_els_chk_latt(vport);
- /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+ /* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0209 CT Request completes, latt %d, "
- "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
- latt, irsp->ulpStatus,
+ "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+ latt, ulp_status,
CTrsp->CommandResponse.bits.CmdRsp,
- cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
+ get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"CT cmd cmpl: status:x%x/x%x cmd:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+ ulp_status, ulp_word4, cmdcode);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0268 NS cmd x%x Error (x%x x%x)\n",
- cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
+ cmdcode, ulp_status, ulp_word4);
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_DOWN) ||
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_ABORTED)))
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SLI_DOWN) ||
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SLI_ABORTED)))
goto out;
retry = cmdiocb->retry;
lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTrsp = outp->virt;
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_nodelist *ndlp, *free_ndlp = NULL;
uint32_t latt, cmd, err;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
latt = lpfc_els_chk_latt(vport);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"FDMI cmpl: status:x%x/x%x latt:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+ ulp_status, ulp_word4, latt);
- if (latt || irsp->ulpStatus) {
+ if (latt || ulp_status) {
/* Look for a retryable error */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
case IOERR_SLI_DOWN:
/* Driver aborted this IO. No retry as error
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0229 FDMI cmd %04x failed, latt = %d "
- "ulpStatus: x%x, rid x%x\n",
- be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ "ulp_status: x%x, rid x%x\n",
+ be16_to_cpu(fdmi_cmd), latt, ulp_status,
+ ulp_word4);
}
free_ndlp = cmdiocb->context_un.ndlp;
if (cmd == SLI_CTAS_DALLAPP_ID)
lpfc_ct_free_iocb(phba, cmdiocb);
- if (lpfc_els_chk_latt(vport) || rspiocb->iocb.ulpStatus) {
+ if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) {
if (cmd != SLI_CTAS_DALLAPP_ID)
return;
}
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_slow_ring_trc);
if (!phba->slow_ring_trc) {
- phba->slow_ring_trc = kmalloc(
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc),
+ phba->slow_ring_trc = kcalloc(
+ lpfc_debugfs_max_slow_ring_trc,
+ sizeof(struct lpfc_debugfs_trc),
GFP_KERNEL);
if (!phba->slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
goto debug_failed;
}
atomic_set(&phba->slow_ring_trc_cnt, 0);
- memset(phba->slow_ring_trc, 0,
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc));
}
snprintf(name, sizeof(name), "nvmeio_trc");
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
/**
* lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
* @vport: pointer to a host virtual N_Port data structure.
- * @expectRsp: flag indicating whether response is expected.
- * @cmdSize: size of the ELS command.
- * @retry: number of retries to the command IOCB when it fails.
+ * @expect_rsp: flag indicating whether response is expected.
+ * @cmd_size: size of the ELS command.
+ * @retry: number of retries to the command when it fails.
* @ndlp: pointer to a node-list data structure.
* @did: destination identifier.
* @elscmd: the ELS command code.
* NULL - when els iocb data structure allocation/preparation failed
**/
struct lpfc_iocbq *
-lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
- uint16_t cmdSize, uint8_t retry,
- struct lpfc_nodelist *ndlp, uint32_t did,
- uint32_t elscmd)
+lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
+ u16 cmd_size, u8 retry,
+ struct lpfc_nodelist *ndlp, u32 did,
+ u32 elscmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
- struct ulp_bde64 *bpl;
- IOCB_t *icmd;
-
+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp;
+ struct ulp_bde64_le *bpl;
+ u32 timeout = 0;
if (!lpfc_is_link_up(phba))
return NULL;
/* Allocate buffer for command iocb */
elsiocb = lpfc_sli_get_iocbq(phba);
-
- if (elsiocb == NULL)
+ if (!elsiocb)
return NULL;
/*
* in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
*/
if ((did == Fabric_DID) &&
- (phba->hba_flag & HBA_FIP_SUPPORT) &&
- ((elscmd == ELS_CMD_FLOGI) ||
- (elscmd == ELS_CMD_FDISC) ||
- (elscmd == ELS_CMD_LOGO)))
+ (phba->hba_flag & HBA_FIP_SUPPORT) &&
+ ((elscmd == ELS_CMD_FLOGI) ||
+ (elscmd == ELS_CMD_FDISC) ||
+ (elscmd == ELS_CMD_LOGO)))
switch (elscmd) {
case ELS_CMD_FLOGI:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
case ELS_CMD_FDISC:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
case ELS_CMD_LOGO:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
}
else
- elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
-
- icmd = &elsiocb->iocb;
+ elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
if (pcmd)
pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
if (!pcmd || !pcmd->virt)
INIT_LIST_HEAD(&pcmd->list);
/* Allocate buffer for response payload */
- if (expectRsp) {
- prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (expect_rsp) {
+ prsp = kmalloc(sizeof(*prsp), GFP_KERNEL);
if (prsp)
prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&prsp->phys);
if (!prsp || !prsp->virt)
goto els_iocb_free_prsp_exit;
INIT_LIST_HEAD(&prsp->list);
- } else
+ } else {
prsp = NULL;
+ }
/* Allocate buffer for Buffer ptr list */
- pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL);
if (pbuflist)
pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&pbuflist->phys);
INIT_LIST_HEAD(&pbuflist->list);
- if (expectRsp) {
- icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
-
- icmd->un.elsreq64.remoteID = did; /* DID */
- icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
- if (elscmd == ELS_CMD_FLOGI)
- icmd->ulpTimeout = FF_DEF_RATOV * 2;
- else if (elscmd == ELS_CMD_LOGO)
- icmd->ulpTimeout = phba->fc_ratov;
- else
- icmd->ulpTimeout = phba->fc_ratov * 2;
- } else {
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
- icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
- icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
- }
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
-
- /*
- * If we have NPIV enabled, we want to send ELS traffic by VPI.
- * For SLI4, since the driver controls VPIs we also want to include
- * all ELS pt2pt protocol traffic as well.
- */
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
- ((phba->sli_rev == LPFC_SLI_REV4) &&
- (vport->fc_flag & FC_PT2PT))) {
-
- if (expectRsp) {
- icmd->un.elsreq64.myID = vport->fc_myDID;
-
- /* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = phba->vpi_ids[vport->vpi];
+ if (expect_rsp) {
+ switch (elscmd) {
+ case ELS_CMD_FLOGI:
+ timeout = FF_DEF_RATOV * 2;
+ break;
+ case ELS_CMD_LOGO:
+ timeout = phba->fc_ratov;
+ break;
+ default:
+ timeout = phba->fc_ratov * 2;
}
- icmd->ulpCt_h = 0;
- /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
- if (elscmd == ELS_CMD_ECHO)
- icmd->ulpCt_l = 0; /* context = invalid RPI */
- else
- icmd->ulpCt_l = 1; /* context = VPI */
+ /* Fill SGE for the num bde count */
+ elsiocb->num_bdes = 2;
}
- bpl = (struct ulp_bde64 *) pbuflist->virt;
- bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
- bpl->tus.f.bdeSize = cmdSize;
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bmp = pcmd;
+ else
+ bmp = pbuflist;
- if (expectRsp) {
+ lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did,
+ elscmd, timeout, expect_rsp);
+
+ bpl = (struct ulp_bde64_le *)pbuflist->virt;
+ bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys));
+ bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys));
+ bpl->type_size = cpu_to_le32(cmd_size);
+ bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ if (expect_rsp) {
bpl++;
- bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
- bpl->tus.f.bdeSize = FCELSSIZE;
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys));
+ bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys));
+ bpl->type_size = cpu_to_le32(FCELSSIZE);
+ bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
}
elsiocb->context2 = pcmd;
elsiocb->vport = vport;
elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
- if (prsp) {
+ if (prsp)
list_add(&prsp->list, &pcmd->list);
- }
- if (expectRsp) {
+ if (expect_rsp) {
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state:x%x "
- "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n",
+ "rpi x%x fc_flag:x%x\n",
elscmd, did, elsiocb->iotag,
vport->port_state, ndlp->nlp_rpi,
- vport->fc_flag, ndlp->nlp_flag, vport);
+ vport->fc_flag);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"NPORT x%x I/O tag: x%x, size: x%x "
"port_state x%x rpi x%x fc_flag x%x\n",
elscmd, ndlp->nlp_DID, elsiocb->iotag,
- cmdSize, vport->port_state,
+ cmd_size, vport->port_state,
ndlp->nlp_rpi, vport->fc_flag);
}
+
return elsiocb;
els_iocb_free_pbuf_exit:
- if (expectRsp)
+ if (expect_rsp)
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pbuflist);
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
* @sp: pointer to service parameter data structure.
- * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ * @ulp_word4: command response value
*
* This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
* function to handle the completion of a Fabric Login (FLOGI) into a fabric
**/
static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp, IOCB_t *irsp)
+ struct serv_parm *sp, uint32_t ulp_word4)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
spin_unlock_irq(shost->host_lock);
}
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = ulp_word4 & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
ndlp->nlp_class_sup = 0;
if (rc)
vport->fc_myDID = PT2PT_LocalID;
- /* Decrement ndlp reference count indicating that ndlp can be
- * safely released when other references to it are done.
+ /* If not registered with a transport, decrement ndlp reference
+ * count indicating that ndlp can be safely released when other
+ * references are removed.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
if (!ndlp) {
goto fail;
}
} else {
- /* This side will wait for the PLOGI, decrement ndlp reference
- * count indicating that ndlp can be released when other
- * references to it are done.
+ /* This side will wait for the PLOGI. If not registered with
+ * a transport, decrement node reference count indicating that
+ * ndlp can be released when other references are removed.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
/* Start discovery - this should just do CLEAR_LA */
lpfc_disc_start(vport);
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ IOCB_t *irsp;
struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
struct serv_parm *sp;
uint16_t fcf_index;
int rc;
+ u32 ulp_status, ulp_word4, tmo;
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to
* trigger the release of the node
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ lpfc_nlp_put(ndlp);
goto out;
}
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"FLOGI cmpl: status:x%x/x%x state:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->port_state);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/*
* In case of FIP mode, perform roundrobin FCF failover
* due to new FCF discovery
goto stop_rr_fcf_flogi;
if ((phba->fcoe_cvl_eventtag_attn ==
phba->fcoe_cvl_eventtag) &&
- (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ (ulp_status == IOSTAT_LOCAL_REJECT) &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_SLI_ABORTED))
goto stop_rr_fcf_flogi;
else
"status:x%x/x%x, tmo:x%x, perform "
"roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
+ ulp_status, ulp_word4, tmo);
lpfc_sli4_set_fcf_flogi_fail(phba,
phba->fcf.current_rec.fcf_indx);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
stop_rr_fcf_flogi:
/* FLOGI failure */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2858 FLOGI failure Status:x%x/x%x TMO"
":x%x Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ ulp_status, ulp_word4, tmo,
+ phba->hba_flag, phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
"0150 FLOGI failure Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout,
- kref_read(&ndlp->kref));
+ ulp_status, ulp_word4, cmdiocb->sli4_xritag,
+ tmo, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_LOOP_OPEN_FAILURE)))
+ if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE))) {
+ /* FLOGI failure */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0100 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x\n",
+ ulp_status, ulp_word4, tmo);
goto flogifail;
+ }
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
}
/* Do not register VFI if the driver aborted FLOGI */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0101 FLOGI completes successfully, I/O tag:x%x, "
+ "0101 FLOGI completes successfully, I/O tag:x%x "
"xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
- irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+ ulp_word4, sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
vport->port_state, vport->fc_flag,
sp->cmn.priority_tagging, kref_read(&ndlp->kref));
* we are point to point, if Fport we are Fabric.
*/
if (sp->cmn.fPort)
- rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
+ rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
+ ulp_word4);
else if (!(phba->hba_flag & HBA_FCOE_MODE))
rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
else {
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
- if (!lpfc_error_lost_link(irsp)) {
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
- } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ } else if (((ulp_status != IOSTAT_LOCAL_REJECT) ||
+ (((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_SLI_ABORTED) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ ((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_SLI_DOWN))) &&
(phba->link_state != LPFC_CLEAR_LA)) {
/* If FLOGI failed enable link interrupt. */
lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
uint32_t *pcmd;
uint32_t cmd;
+ u32 ulp_status, ulp_word4;
pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
cmd = *pcmd;
- irsp = &rspiocb->iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"6445 ELS completes after LINK_DOWN: "
" Status %x/%x cmd x%x flg x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
- cmdiocb->iocb_flag);
+ ulp_status, ulp_word4, cmd,
+ cmdiocb->cmd_flag);
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) {
+ cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
lpfc_els_free_iocb(phba, cmdiocb);
{
struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
- IOCB_t *icmd;
+ union lpfc_wqe128 *wqe = NULL;
+ IOCB_t *icmd = NULL;
struct lpfc_iocbq *elsiocb;
struct lpfc_iocbq defer_flogi_acc;
- uint8_t *pcmd;
+ u8 *pcmd, ct;
uint16_t cmdsize;
uint32_t tmo, did;
int rc;
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
+ wqe = &elsiocb->wqe;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ icmd = &elsiocb->iocb;
/* For FLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
if (phba->sli_rev == LPFC_SLI_REV4) {
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
- elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
- elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
/* FLOGI needs to be 3 for WQE FCFI */
+ ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+
/* Set the fcfi to the fcfi we registered with */
- elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->fcf.fcfi);
}
+
/* Can't do SLI4 class2 without support sequence coalescing */
sp->cls2.classValid = 0;
sp->cls2.seqDelivery = 0;
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
- } else
+ } else {
sp->cmn.request_multiple_Nport = 0;
- }
+ }
- if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
+ if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+ }
}
tmo = phba->fc_ratov;
phba->fc_ratov = tmo;
phba->fc_stat.elsXmitFLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue FLOGI: opt:x%x",
/* Check for a deferred FLOGI ACC condition */
if (phba->defer_flogi_acc_flag) {
+ /* lookup ndlp for received FLOGI */
+ ndlp = lpfc_findnode_did(vport, 0);
+ if (!ndlp)
+ return 0;
+
did = vport->fc_myDID;
vport->fc_myDID = Fabric_DID;
memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
- defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
- defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
- phba->defer_flogi_acc_ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(wqe_ctxt_tag,
+ &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
+ phba->defer_flogi_acc_rx_id);
+ bf_set(wqe_rcvoxid,
+ &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
+ phba->defer_flogi_acc_ox_id);
+ } else {
+ icmd = &defer_flogi_acc.iocb;
+ icmd->ulpContext = phba->defer_flogi_acc_rx_id;
+ icmd->unsli3.rcvsli3.ox_id =
+ phba->defer_flogi_acc_ox_id;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
ndlp, NULL);
phba->defer_flogi_acc_flag = false;
-
vport->fc_myDID = did;
+
+ /* Decrement ndlp reference count to indicate the node can be
+ * released when other references are removed.
+ */
+ lpfc_nlp_put(ndlp);
}
return 0;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_nodelist *ndlp;
- IOCB_t *icmd;
+ u32 ulp_command;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ ulp_command = get_job_cmnd(phba, iocb);
+ if (ulp_command == CMD_ELS_REQUEST64_CR) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
if (ndlp && ndlp->nlp_DID == Fabric_DID) {
if ((phba->pport->fc_flag & FC_PT2PT) &&
!(phba->pport->fc_flag & FC_PT2PT_PLOGI))
- iocb->fabric_iocb_cmpl =
+ iocb->fabric_cmd_cmpl =
lpfc_ignore_els_cmpl;
lpfc_sli_issue_abort_iotag(phba, pring, iocb,
NULL);
static void
lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
struct lpfc_nodelist *ndlp = cmdiocb->context1;
struct lpfc_node_rrq *rrq;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* we pass cmdiocb to state machine which needs rspiocb as well */
rrq = cmdiocb->context_un.rrq;
cmdiocb->context_un.rsp_iocb = rspiocb;
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"RRQ cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4,
+ get_job_els_rsp64_did(phba, cmdiocb));
+
/* rrq completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2880 RRQ completes to DID x%x "
"Data: x%x x%x x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
/* RRQ failed Don't print the vport to vport rjts */
- if (irsp->ulpStatus != IOSTAT_LS_RJT ||
- (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
- ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
- (phba)->pport->cfg_log_verbose & LOG_ELS)
+ if (ulp_status != IOSTAT_LS_RJT ||
+ (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
+ ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2881 RRQ failure DID:%06X Status:"
"x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
}
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
struct lpfc_dmabuf *prsp;
int disc;
struct serv_parm *sp = NULL;
+ u32 ulp_status, ulp_word4, did, iotag;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
- irsp = &rspiocb->iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PLOGI cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
- ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpIoTag);
+ did, ulp_status, ulp_word4, iotag);
goto out_freeiocb;
}
"0102 PLOGI completes to NPort x%06x "
"Data: x%x x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_fc4_type,
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
goto out;
}
/* PLOGI failed Don't print the vport to vport rjts */
- if (irsp->ulpStatus != IOSTAT_LS_RJT ||
- (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
- ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
- (phba)->pport->cfg_log_verbose & LOG_ELS)
+ if (ulp_status != IOSTAT_LS_RJT ||
+ (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
+ ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ "2753 PLOGI failure DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
}
phba->fc_stat.elsXmitPLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x refcnt %d",
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
char *mode;
u32 loglevel;
+ u32 ulp_status;
+ u32 ulp_word4;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
- irsp = &(rspiocb->iocb);
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PRLI cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
"Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID, ulp_status, ulp_word4,
vport->num_disc_nodes, ndlp->fc4_prli_sent);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
goto out;
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4, ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
/* For FCP support */
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
+ elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ;
/* Remove FCP type - processed. */
local_nlp_type &= ~NLP_FC4_FCP;
npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
- elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
+ elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ;
/* Remove NVME type - processed. */
local_nlp_type &= ~NLP_FC4_NVME;
}
phba->fc_stat.elsXmitPRLI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
int disc;
+ u32 ulp_status, ulp_word4, tmo;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
- irsp = &(rspiocb->iocb);
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ADISC cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* Since ndlp can be freed in the disc state machine, note if this node
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0104 ADISC completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, disc, vport->num_disc_nodes);
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ tmo, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
spin_lock_irq(&ndlp->lock);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
/* ADISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2755 ADISC failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
-
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_ADISC);
+ NLP_EVT_CMPL_ADISC);
/* As long as this node is not registered with the SCSI or NVMe
* transport, it is no longer an active node. Otherwise
ap->DID = be32_to_cpu(vport->fc_myDID);
phba->fc_stat.elsXmitADISC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue ADISC: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
unsigned long flags;
uint32_t skip_recovery = 0;
int wake_up_waiter = 0;
+ u32 ulp_status;
+ u32 ulp_word4;
+ u32 tmo;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
- irsp = &(rspiocb->iocb);
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
+
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* LOGO completes to NPort <nlp_DID> */
"0105 LOGO completes to NPort x%x "
"refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes);
+ ulp_status, ulp_word4,
+ tmo, vport->num_disc_nodes);
if (lpfc_els_chk_latt(vport)) {
skip_recovery = 1;
* all acceptable. Note the failure and move forward with
* discovery. The PLOGI will retry.
*/
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* LOGO failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
- /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp)) {
+ "2756 LOGO failure, No Retry DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
+
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
skip_recovery = 1;
goto out;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3187 LOGO completes to NPort x%x: Start "
"Recovery Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4, tmo,
vport->num_disc_nodes);
lpfc_disc_start(vport);
return;
memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
phba->fc_stat.elsXmitLOGO++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue LOGO: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_nodelist *free_ndlp;
IOCB_t *irsp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
- irsp = &rspiocb->iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ELS cmd cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ iotag, ulp_status, ulp_word4, tmo);
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
u32 *pdata;
u32 cmd;
struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
- irsp = &rspiocb->iocb;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ELS cmd cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
+
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
- "x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
- cmdiocb->retry);
+ "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
+ iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
if (!pcmd)
cmd = *pdata;
/* Only 1 retry for ELS Timeout only */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_SEQUENCE_TIMEOUT)) {
cmdiocb->retry++;
if (cmdiocb->retry <= 1) {
lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
return;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* ELS discovery cmd completes with error */
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
"4203 ELS cmd x%x error: x%x x%X\n", cmd,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
"4677 Fabric RDF Notification Grant "
"Data: 0x%08x Reg: %x %x\n",
be32_to_cpu(
- prdf->reg_d1.desc_tags[i]),
+ prdf->reg_d1.desc_tags[i]),
phba->cgn_reg_signal,
phba->cgn_reg_fpin);
}
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitSCR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
event->portid.rscn_fid[2] = nportid & 0x000000FF;
phba->fc_stat.elsXmitRSCN++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_RNID);
+ ndlp->nlp_DID, ELS_CMD_FARPR);
if (!elsiocb)
return 1;
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitFARPR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
phba->cgn_reg_fpin);
phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
+ IOCB_t *irsp_iocb;
struct fc_els_edc_resp *edc_rsp;
struct fc_tlv_desc *tlv;
struct fc_diag_cg_sig_desc *pcgd;
int desc_cnt = 0, bytes_remain;
bool rcv_cap_desc = false;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
- irsp = &rspiocb->iocb;
ndlp = cmdiocb->context1;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(rspiocb);
+ iotag = get_wqe_reqtag(rspiocb);
+ } else {
+ irsp_iocb = &rspiocb->iocb;
+ tmo = irsp_iocb->ulpTimeout;
+ iotag = irsp_iocb->ulpIoTag;
+ }
+
lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
"EDC cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ iotag, ulp_status, ulp_word4, tmo);
pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
if (!pcmd)
goto out;
/* Need to clear signal values, send features MB and RDF with FPIN. */
- if (irsp->ulpStatus)
+ if (ulp_status)
goto out;
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
ndlp->nlp_DID, phba->cgn_reg_signal,
phba->cgn_reg_fpin);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp = &rspiocb->iocb;
+ union lpfc_wqe128 *irsp = &rspiocb->wqe;
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
uint32_t *elscmd;
uint32_t cmd = 0;
uint32_t did;
int link_reset = 0, rc;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* Note: context2 may be 0 for internal driver abort
did = ndlp->nlp_DID;
else {
/* We should only hit this case for retrying PLOGI */
- did = irsp->un.elsreq64.remoteID;
+ did = get_job_els_rsp64_did(phba, rspiocb);
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp && (cmd != ELS_CMD_PLOGI))
return 0;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Retry ELS: wd7:x%x wd4:x%x did:x%x",
- *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did);
+ *(((uint32_t *)irsp) + 7), ulp_word4, did);
- switch (irsp->ulpStatus) {
+ switch (ulp_status) {
case IOSTAT_FCP_RSP_ERROR:
break;
case IOSTAT_REMOTE_STOP:
}
break;
case IOSTAT_LOCAL_REJECT:
- switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
if (cmd == ELS_CMD_FLOGI) {
if (PCI_DEVICE_ID_HORNET ==
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
- if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ if (ulp_word4 & RJT_UNAVAIL_TEMP) {
retry = 1;
break;
}
break;
case IOSTAT_LS_RJT:
- stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+ stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
* on this rport.
*/
if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
- retry = 0;
- goto out_retry;
+ LSEXP_REQ_UNSUPPORTED) {
+ if (cmd == ELS_CMD_PRLI) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
+ spin_unlock_irq(&ndlp->lock);
+ retry = 0;
+ goto out_retry;
+ }
}
break;
}
if ((cmd == ELS_CMD_FLOGI) &&
(phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
- !lpfc_error_lost_link(irsp)) {
+ !lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* FLOGI retry policy */
retry = 1;
/* retry FLOGI forever */
delay = 5000;
else if (cmdiocb->retry >= 32)
delay = 1000;
- } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+ } else if ((cmd == ELS_CMD_FDISC) &&
+ !lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* retry FDISCs every second up to devloss */
retry = 1;
maxretry = vport->cfg_devloss_tmo;
cmd, did, cmdiocb->retry, delay);
if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
- ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ ((ulp_status != IOSTAT_LOCAL_REJECT) ||
+ ((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES))) {
/* Don't reset timer for no resources */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0137 No retry ELS command x%x to remote "
"NPORT x%x: Out of Resources: Error:x%x/%x\n",
- cmd, did, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ cmd, did, ulp_status,
+ ulp_word4);
}
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0108 No retry ELS command x%x to remote "
"NPORT x%x Retried:%d Error:x%x/%x\n",
- cmd, did, cmdiocb->retry, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ cmd, did, cmdiocb->retry, ulp_status,
+ ulp_word4);
}
return 0;
}
/* context2 = cmd, context2->next = rsp, context3 = bpl */
if (elsiocb->context2) {
- if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
+ if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
- elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
+ elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
buf_ptr = elsiocb->context2;
elsiocb->context2 = NULL;
if (buf_ptr) {
{
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
+ u32 ulp_status, ulp_word4;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"ACC LOGO cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
+ ulp_status, ulp_word4, ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0109 ACC to LOGO completes to NPort x%x refcnt %d "
goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
-
/* If PLOGI is being retried, PLOGI completion will cleanup the
* node. The NLP_NPR_2B_DISC flag needs to be retained to make
* progress on nodes discovered from last RSCN.
IOCB_t *irsp;
LPFC_MBOXQ_t *mbox = NULL;
struct lpfc_dmabuf *mp = NULL;
-
- irsp = &rspiocb->iocb;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
+
/* Check to see if link went down during discovery */
if (!ndlp || lpfc_els_chk_latt(vport)) {
if (mbox) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"ELS rsp cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->iocb.un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0110 ELS response tag x%x completes "
- "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n",
- cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
- rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
+ iotag, ulp_status, ulp_word4, tmo,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox);
+ ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
if (mbox) {
- if ((rspiocb->iocb.ulpStatus == 0) &&
- (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+ if (ulp_status == 0
+ && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
(!(vport->fc_flag & FC_PT2PT))) {
- if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ ndlp->nlp_state ==
+ NLP_STE_REG_LOGIN_ISSUE) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0314 PLOGI recov "
(vport && vport->port_type == LPFC_NPIV_PORT) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
ndlp->nlp_flag & NLP_RELEASE_RPI) {
- lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
- spin_unlock_irq(&ndlp->lock);
- lpfc_drop_node(vport, ndlp);
+ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_drop_node(vport, ndlp);
+ }
}
/* Release the originating I/O reference. */
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
+ union lpfc_wqe128 *oldwqe = &oldiocb->wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
struct serv_parm *sp;
ELS_PKT *els_pkt_ptr;
struct fc_els_rdf_resp *rdf_resp;
- oldcmd = &oldiocb->iocb;
-
switch (flag) {
case ELS_CMD_ACC:
cmdsize = sizeof(uint32_t);
return 1;
}
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
if (mbox)
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
rdf_resp = (struct fc_els_rdf_resp *)pcmd;
memset(rdf_resp, 0, sizeof(*rdf_resp));
ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
spin_unlock_irq(&ndlp->lock);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
} else {
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
}
phba->fc_stat.elsXmitACC++;
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
rejectError, elsiocb->iotag,
- elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue LS_RJT: did:x%x flg:x%x err:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
struct lpfc_els_edc_rsp *edc_rsp;
struct lpfc_iocbq *elsiocb;
IOCB_t *icmd, *cmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
int cmdsize, rc;
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- cmd = &cmdiocb->iocb;
- icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, cmdiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ cmd = &cmdiocb->iocb;
+ icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
+ }
+
pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
memset(pcmd, 0, cmdsize);
edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
"Issue EDC ACC: did:x%x flg:x%x refcnt %d",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
struct lpfc_hba *phba = vport->phba;
ADISC *ap;
IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
cmdsize = sizeof(uint32_t) + sizeof(ADISC);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb));
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0130 Xmit ADISC ACC response iotag x%x xri: "
"x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- /* Xmit ELS ACC response tag <ulpIoTag> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
- "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x\n",
- rc, elsiocb->iotag, elsiocb->sli4_xritag,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, vport->fc_flag);
return 0;
}
lpfc_vpd_t *vpd;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
uint32_t prli_fc4_req, *req_payload;
struct lpfc_dmabuf *req_buf;
int rc;
- u32 elsrspcmd;
+ u32 elsrspcmd, ulp_context;
/* Need the incoming PRLI payload to determine if the ACC is for an
* FC4 or NVME PRLI type. The PRLI type is at word 1.
}
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
- ndlp->nlp_DID, elsrspcmd);
+ ndlp->nlp_DID, elsrspcmd);
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0131 Xmit PRLI ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
struct lpfc_hba *phba = vport->phba;
RNID *rn;
IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ (2 * sizeof(struct lpfc_name));
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0132 Xmit RNID ACC response tag x%x xri x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext);
+ elsiocb->iotag, ulp_context);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
be32_to_cpu(bf_get(rrq_did, rrq)),
bf_get(rrq_oxid, rrq),
rxid,
- iocb->iotag, iocb->iocb.ulpContext);
+ get_wqe_reqtag(iocb),
+ get_job_ulpcontext(phba, iocb));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
- cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmdsize = oldiocb->wcqe_cmpl.total_data_placed;
+ else
+ cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
/* The accumulated length can exceed the BPL_SIZE. For
* now, use this as the limit
if (!elsiocb)
return 1;
- elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
- elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit ECHO ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2876 Xmit ECHO ACC response tag x%x xri x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext);
+ elsiocb->iotag, ulp_context);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
struct lpfc_iocbq *elsiocb;
struct ulp_bde64 *bpl;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
struct ls_rjt *stat;
struct fc_rdp_res_frame *rdp_res;
uint32_t cmdsize, len;
uint16_t *flag_ptr;
int rc;
+ u32 ulp_context;
if (status != SUCCESS)
goto error;
cmdsize = sizeof(struct fc_rdp_res_frame);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
- lpfc_max_els_tries, rdp_context->ndlp,
- rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
+ lpfc_max_els_tries, rdp_context->ndlp,
+ rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
goto free_rdp_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rdp_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->ox_id);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->rx_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2171 Xmit RDP response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
rdp_res = (struct fc_rdp_res_frame *)
rdp_context->page_a0, vport);
rdp_res->length = cpu_to_be32(len - 8);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
/* Now that we know the true size of the payload, update the BPL */
bpl = (struct ulp_bde64 *)
if (!elsiocb)
goto free_rdp_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rdp_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->ox_id);
+ bf_set(wqe_ctxt_tag,
+ &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->rx_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ }
+
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lpfc_els_free_iocb(phba, elsiocb);
free_rdp_context:
/* This reference put is for the original unsolicited RDP. If the
- * iocb prep failed, there is no reference to remove.
+ * prep failed, there is no reference to remove.
*/
lpfc_nlp_put(ndlp);
kfree(rdp_context);
uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
struct fc_rdp_req_frame *rdp_req;
struct lpfc_rdp_context *rdp_context;
- IOCB_t *cmd = NULL;
+ union lpfc_wqe128 *cmd = NULL;
struct ls_rjt stat;
if (phba->sli_rev < LPFC_SLI_REV4 ||
goto error;
}
- cmd = &cmdiocb->iocb;
+ cmd = &cmdiocb->wqe;
rdp_context->ndlp = lpfc_nlp_get(ndlp);
if (!rdp_context->ndlp) {
kfree(rdp_context);
rjt_err = LSRJT_UNABLE_TPC;
goto error;
}
- rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
- rdp_context->rx_id = cmd->ulpContext;
+ rdp_context->ox_id = bf_get(wqe_rcvoxid,
+ &cmd->xmit_els_rsp.wqe_com);
+ rdp_context->rx_id = bf_get(wqe_ctxt_tag,
+ &cmd->xmit_els_rsp.wqe_com);
rdp_context->cmpl = lpfc_els_rdp_cmpl;
if (lpfc_get_rdp_info(phba, rdp_context)) {
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
{
MAILBOX_t *mb;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
- icmd = &elsiocb->iocb;
- icmd->ulpContext = lcb_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ lcb_context->ox_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ }
pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
lcb_res->capability = lcb_context->capability;
lcb_res->lcb_frequency = lcb_context->frequency;
lcb_res->lcb_duration = lcb_context->duration;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
error:
cmdsize = sizeof(struct fc_lcb_res_frame);
elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
- lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LS_RJT);
lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_lcb_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = lcb_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ lcb_context->ox_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ }
+
pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
*((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
lcb_context->type = beacon->lcb_type;
lcb_context->frequency = beacon->lcb_frequency;
lcb_context->duration = beacon->lcb_duration;
- lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
- lcb_context->rx_id = cmdiocb->iocb.ulpContext;
+ lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb);
+ lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb);
lcb_context->ndlp = lpfc_nlp_get(ndlp);
if (!lcb_context->ndlp) {
rjt_err = LSRJT_UNABLE_TPC;
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
ndlp, NULL);
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies +
+ msecs_to_jiffies(1000 * tmo));
+ }
return 0;
}
}
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
uint32_t *lp = (uint32_t *) pcmd->virt;
- IOCB_t *icmd = &cmdiocb->iocb;
+ union lpfc_wqe128 *wqe = &cmdiocb->wqe;
struct serv_parm *sp;
LPFC_MBOXQ_t *mbox;
uint32_t cmd, did;
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* We should never receive a FLOGI in loop mode, ignore it */
- did = icmd->un.elsreq64.remoteID;
+ did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
/* An FLOGI ELS command <elsCmd> was received from DID <did> in
Loop Mode */
/* Defer ACC response until AFTER we issue a FLOGI */
if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
- phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
- phba->defer_flogi_acc_ox_id =
- cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
+ &wqe->xmit_els_rsp.wqe_com);
+ phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
+ &wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
int rc = 0;
MAILBOX_t *mb;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
struct RLS_RSP *rls_rsp;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
uint16_t oxid;
uint16_t rxid;
uint32_t cmdsize;
+ u32 ulp_context;
mb = &pmb->u.mb;
return;
}
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rxid;
- icmd->unsli3.rcvsli3.ox_id = oxid;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* Xri / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
+ }
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
"2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
struct ls_rjt stat;
+ u32 ctx = get_job_ulpcontext(phba, cmdiocb);
+ u32 ox_id = get_job_rcvoxid(phba, cmdiocb);
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
mbox->ctx_buf = (void *)((unsigned long)
- ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
- cmdiocb->iocb.ulpContext)); /* rx_id */
+ (ox_id << 16 | ctx));
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp)
goto node_err;
struct lpfc_nodelist *ndlp)
{
int rc = 0;
+ IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_hba *phba = vport->phba;
struct ls_rjt stat;
struct RTV_RSP *rtv_rsp;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
uint32_t cmdsize;
-
+ u32 ulp_context;
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
/* use the command's xri in the response */
- elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
- elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, cmdiocb));
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, cmdiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb);
+ icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb);
+ }
rtv_rsp = (struct RTV_RSP *)pcmd;
"2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
"Data: x%x x%x x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi,
rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
"Issue RRQ: did:x%x",
did, rrq->xritag, rrq->rxid);
elsiocb->context_un.rrq = rrq;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
lpfc_nlp_get(ndlp);
elsiocb->context1 = ndlp;
{
int rc = 0;
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd, *oldcmd;
+ IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
RPL_RSP rpl_rsp;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
+ u32 ulp_context;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* Xri / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb));
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = get_job_ulpcontext(phba, oldiocb);
+ icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb);
+ }
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
"0120 Xmit ELS RPL ACC response tag x%x "
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
FARP *fp;
uint32_t cnt, did;
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
+ did = get_job_els_rsp64_did(vport->phba, cmdiocb);
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
uint32_t did;
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
+ did = get_job_els_rsp64_did(vport->phba, cmdiocb);
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
LIST_HEAD(abort_list);
+ u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0;
timeout = (uint32_t)(phba->fc_ratov << 1);
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- cmd = &piocb->iocb;
+ ulp_command = get_job_cmnd(phba, piocb);
+ ulp_context = get_job_ulpcontext(phba, piocb);
+ did = get_job_els_rsp64_did(phba, piocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(piocb);
+ } else {
+ cmd = &piocb->iocb;
+ iotag = cmd->ulpIoTag;
+ }
- if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
- piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 ||
+ ulp_command == CMD_ABORT_XRI_CX ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN)
continue;
if (piocb->vport != vport)
}
remote_ID = 0xffffffff;
- if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
- remote_ID = cmd->un.elsreq64.remoteID;
- else {
+ if (ulp_command != CMD_GEN_REQUEST64_CR) {
+ remote_ID = did;
+ } else {
struct lpfc_nodelist *ndlp;
- ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
+ ndlp = __lpfc_findnode_rpi(vport, ulp_context);
if (ndlp)
remote_ID = ndlp->nlp_DID;
}
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- cmd = &piocb->iocb;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0127 ELS timeout Data: x%x x%x x%x "
"x%x\n", els_command,
- remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+ remote_ID, ulp_command, iotag);
+
spin_lock_irq(&phba->hbalock);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
- IOCB_t *cmd = NULL;
+ u32 ulp_command;
unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
if (piocb->vport != vport)
continue;
- if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
continue;
/* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response.
*/
- cmd = &piocb->iocb;
- if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ ulp_command = get_job_cmnd(phba, piocb);
+ if (ulp_command == CMD_ELS_REQUEST64_CR) {
list_add_tail(&piocb->dlist, &abort_list);
/* If the link is down when flushing ELS commands
* and avoid any retry logic.
*/
if (phba->link_state == LPFC_LINK_DOWN)
- piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
+ piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
}
- if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
+ if (ulp_command == CMD_GEN_REQUEST64_CR)
list_add_tail(&piocb->dlist, &abort_list);
}
* just queue them up for lpfc_sli_cancel_iocbs
*/
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
- cmd = &piocb->iocb;
+ ulp_command = get_job_cmnd(phba, piocb);
- if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
- }
/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
- if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
- cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
- cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmd->ulpCommand == CMD_ABORT_XRI_CN)
+ if (ulp_command == CMD_QUE_RING_BUF_CN ||
+ ulp_command == CMD_QUE_RING_BUF64_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_CX)
continue;
if (piocb->vport != vport)
if (vport == phba->pport) {
list_for_each_entry_safe(piocb, tmp_iocb,
&phba->fabric_iocb_list, list) {
- cmd = &piocb->iocb;
list_del_init(&piocb->list);
list_add_tail(&piocb->list, &abort_list);
}
struct ls_rjt stat;
struct lpfc_nodelist *ndlp;
uint32_t *pcmd;
+ u32 ulp_status, ulp_word4;
ndlp = cmdiocbp->context1;
if (!ndlp)
return;
- if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+ ulp_status = get_job_ulpstatus(phba, rspiocbp);
+ ulp_word4 = get_job_word4(phba, rspiocbp);
+
+ if (ulp_status == IOSTAT_LS_RJT) {
lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
cmdiocbp->context2)->virt);
lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
- stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+ stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
fc_host_post_vendor_event(shost,
LPFC_NL_VENDOR_ID);
return;
}
- if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
- (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+ if (ulp_status == IOSTAT_NPORT_BSY ||
+ ulp_status == IOSTAT_FABRIC_BSY) {
fabric_event.event_type = FC_REG_FABRIC_EVENT;
- if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+ if (ulp_status == IOSTAT_NPORT_BSY)
fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
else
fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
{
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
- uint32_t *payload, payload_len;
- uint32_t cmd, did, newnode;
+ u32 *payload, payload_len;
+ u32 cmd = 0, did = 0, newnode, status = 0;
uint8_t rjt_exp, rjt_err = 0, init_link = 0;
- IOCB_t *icmd = &elsiocb->iocb;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
LPFC_MBOXQ_t *mbox;
if (!vport || !(elsiocb->context2))
goto dropit;
newnode = 0;
+ wcqe_cmpl = &elsiocb->wcqe_cmpl;
payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
- payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ payload_len = wcqe_cmpl->total_data_placed;
+ else
+ payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
+ status = get_job_ulpstatus(phba, elsiocb);
cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
- lpfc_post_buffer(phba, pring, 1);
+ lpfc_sli3_post_buffer(phba, pring, 1);
- did = icmd->un.rcvels.remoteID;
- if (icmd->ulpStatus) {
+ did = get_job_els_rsp64_did(phba, elsiocb);
+ if (status) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV Unsol ELS: status:x%x/x%x did:x%x",
- icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ status, get_job_word4(phba, elsiocb), did);
goto dropit;
}
* the vfi. This is done in lpfc_rcv_plogi but
* that is called after the reg_vfi.
*/
- vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+ vport->fc_myDID =
+ bf_get(els_rsp64_sid,
+ &elsiocb->wqe.xmit_els_rsp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3312 Remote port assigned DID x%x "
"%x\n", vport->fc_myDID,
}
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
+ /* retain node if our response is deferred */
+ if (phba->defer_flogi_acc_flag)
+ break;
if (newnode)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
if (vport && !(vport->load_flag & FC_UNLOADING))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0111 Dropping received ELS cmd "
- "Data: x%x x%x x%x\n",
- icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
+ "Data: x%x x%x x%x x%x\n",
+ cmd, status, get_job_word4(phba, elsiocb), did);
+
phba->fc_stat.elsRcvDrop++;
}
lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *elsiocb)
{
- struct lpfc_vport *vport = phba->pport;
- IOCB_t *icmd = &elsiocb->iocb;
- dma_addr_t paddr;
+ struct lpfc_vport *vport = elsiocb->vport;
+ u32 ulp_command, status, parameter, bde_count = 0;
+ IOCB_t *icmd;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+ dma_addr_t paddr;
elsiocb->context1 = NULL;
elsiocb->context2 = NULL;
elsiocb->context3 = NULL;
- if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+ wcqe_cmpl = &elsiocb->wcqe_cmpl;
+ ulp_command = get_job_cmnd(phba, elsiocb);
+ status = get_job_ulpstatus(phba, elsiocb);
+ parameter = get_job_word4(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = wcqe_cmpl->word3;
+ else
+ bde_count = elsiocb->iocb.ulpBdeCount;
+
+ if (status == IOSTAT_NEED_BUFFER) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ } else if (status == IOSTAT_LOCAL_REJECT &&
+ (parameter & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING) {
phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 0);
+ lpfc_sli3_post_buffer(phba, pring, 0);
return;
}
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
- icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
- if (icmd->unsli3.rcvsli3.vpi == 0xffff)
- vport = phba->pport;
- else
- vport = lpfc_find_vport_by_vpid(phba,
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ icmd = &elsiocb->iocb;
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (ulp_command == CMD_IOCB_RCV_ELS64_CX ||
+ ulp_command == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+ vport = phba->pport;
+ else
+ vport = lpfc_find_vport_by_vpid(phba,
icmd->unsli3.rcvsli3.vpi);
+ }
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
- if (icmd->ulpBdeCount == 0)
+ if (bde_count == 0)
return;
- /* type of ELS cmd is first 32bit word
- * in packet
- */
+ /* Account for SLI2 or SLI3 and later unsolicited buffering */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
elsiocb->context2 = bdeBuf1;
+ if (bde_count == 2)
+ elsiocb->context3 = bdeBuf2;
} else {
+ icmd = &elsiocb->iocb;
paddr = getPaddr(icmd->un.cont64[0].addrHigh,
icmd->un.cont64[0].addrLow);
elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
paddr);
+ if (bde_count == 2) {
+ paddr = getPaddr(icmd->un.cont64[1].addrHigh,
+ icmd->un.cont64[1].addrLow);
+ elsiocb->context3 = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ paddr);
+ }
}
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
elsiocb->context2 = NULL;
}
- /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
- if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
- icmd->ulpBdeCount == 2) {
- elsiocb->context2 = bdeBuf2;
- lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
- /* free mp if we are done with it */
- if (elsiocb->context2) {
- lpfc_in_buf_free(phba, elsiocb->context2);
- elsiocb->context2 = NULL;
- }
+ if (elsiocb->context3) {
+ lpfc_in_buf_free(phba, elsiocb->context3);
+ elsiocb->context3 = NULL;
}
}
struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
- (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+ if (ulp_status != IOSTAT_FABRIC_RJT ||
+ ulp_word4 != RJT_LOGIN_REQUIRED)
return 0;
else
return 1;
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
struct serv_parm *sp;
uint8_t fabric_param_changed;
+ u32 ulp_status, ulp_word4;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->fc_prevDID);
/* Since all FDISCs are being single threaded, we
* must reset the discovery timer for ALL vports
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"FDISC cmpl: status:x%x/x%x prevdid:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+ ulp_status, ulp_word4, vport->fc_prevDID);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
lpfc_retry_pport_discovery(phba);
/* FDISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0126 FDISC failed. (x%x/x%x)\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto fdisc_failed;
}
vport->fc_flag |= FC_PUBLIC_LOOP;
spin_unlock_irq(shost->host_lock);
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = ulp_word4 & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
{
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe = NULL;
struct lpfc_iocbq *elsiocb;
struct serv_parm *sp;
uint8_t *pcmd;
return 1;
}
- icmd = &elsiocb->iocb;
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
-
- /*
- * SLI3 ports require a different context type value than SLI4.
- * Catch SLI3 ports here and override the prep.
- */
- if (phba->sli_rev == LPFC_SLI_REV3) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
}
lpfc_set_disctmo(vport);
phba->fc_stat.elsXmitFDISC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue FDISC: did:x%x",
did, 0, 0);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
- lpfc_els_free_iocb(phba, elsiocb);
+ if (!elsiocb->context1)
goto err_out;
- }
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
goto err_out;
}
return 0;
err_out:
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0256 Issue FDISC: Cannot send IOCB\n");
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ u32 ulp_status, ulp_word4, did, tmo;
ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
- irsp = &rspiocb->iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ did = get_job_els_rsp64_did(phba, rspiocb);
+ tmo = irsp->ulpTimeout;
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO npiv cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+ ulp_status, ulp_word4, did);
/* NPIV LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2928 NPIV LOGO completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes,
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ tmo, vport->num_disc_nodes,
kref_read(&ndlp->kref), ndlp->nlp_flag,
ndlp->fc4_xpt_flags);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
vport->fc_flag &= ~FC_FABRIC;
"Issue LOGO npiv did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(&ndlp->lock);
struct lpfc_iocbq *iocb;
unsigned long iflags;
int ret;
- IOCB_t *cmd;
repeat:
iocb = NULL;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb) {
- iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
- iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
- iocb->iocb_flag |= LPFC_IO_FABRIC;
+ iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
+ iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->cmd_flag |= LPFC_IO_FABRIC;
lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
- "Fabric sched1: ste:x%x",
- iocb->vport->port_state, 0, 0);
+ "Fabric sched1: ste:x%x",
+ iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
- iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
- iocb->fabric_iocb_cmpl = NULL;
- iocb->iocb_flag &= ~LPFC_IO_FABRIC;
- cmd = &iocb->iocb;
- cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
- cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
- iocb->iocb_cmpl(phba, iocb, iocb);
+ iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
+ iocb->fabric_cmd_cmpl = NULL;
+ iocb->cmd_flag &= ~LPFC_IO_FABRIC;
+ set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT);
+ iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
+ iocb->cmd_cmpl(phba, iocb, iocb);
atomic_dec(&phba->fabric_iocb_count);
goto repeat;
* @rspiocb: pointer to lpfc response iocb data structure.
*
* This routine is the callback function that is put to the fabric iocb's
- * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
- * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * callback function pointer (iocb->cmd_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback
* function first restores and invokes the original iocb's callback function
* and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
* fabric bound iocb from the driver internal fabric iocb list onto the wire.
**/
static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct ls_rjt stat;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
+ WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
- switch (rspiocb->iocb.ulpStatus) {
+ switch (ulp_status) {
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
- if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ if (ulp_word4 & RJT_UNAVAIL_TEMP)
lpfc_block_fabric_iocbs(phba);
- }
break;
case IOSTAT_NPORT_BSY:
break;
case IOSTAT_LS_RJT:
- stat.un.lsRjtError =
- be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+ stat.un.ls_rjt_error_be =
+ cpu_to_be32(ulp_word4);
if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
lpfc_block_fabric_iocbs(phba);
BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
- cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
- cmdiocb->fabric_iocb_cmpl = NULL;
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
- cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+ cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl;
+ cmdiocb->fabric_cmd_cmpl = NULL;
+ cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
+ cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb);
atomic_dec(&phba->fabric_iocb_count);
if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
atomic_inc(&phba->fabric_iocb_count);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ready) {
- iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
- iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
- iocb->iocb_flag |= LPFC_IO_FABRIC;
+ iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
+ iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->cmd_flag |= LPFC_IO_FABRIC;
lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
- "Fabric sched2: ste:x%x",
- iocb->vport->port_state, 0, 0);
+ "Fabric sched2: ste:x%x",
+ iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
- iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
- iocb->fabric_iocb_cmpl = NULL;
- iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
+ iocb->fabric_cmd_cmpl = NULL;
+ iocb->cmd_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
} else {
struct lpfc_vmid_priority_range *vmid_range = NULL;
u32 *data;
struct lpfc_dmabuf *dmabuf = cmdiocb->context2;
- IOCB_t *irsp = &rspiocb->iocb;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
u8 *pcmd, max_desc;
u32 len, i;
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
data[0], data[1]);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
"6529 QFPA failed with status x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
*((u32 *)(pcmd)) = ELS_CMD_QFPA;
pcmd += 4;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
}
inst_desc->word6 = cpu_to_be32(inst_desc->word6);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1) {
struct lpfc_nodelist *ndlp = icmdiocb->context1;
u8 *pcmd;
u32 *data;
- IOCB_t *irsp = &rspiocb->iocb;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
struct lpfc_dmabuf *dmabuf = icmdiocb->context2;
struct lpfc_vmid *vmid;
"4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
"4533 UVEM error status %x: %x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
spin_lock(&phba->hbalock);
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Turn off discovery timer if its running */
- if (vport->fc_flag & FC_DISC_TMO) {
+ if (vport->fc_flag & FC_DISC_TMO ||
+ timer_pending(&vport->fc_disctmo)) {
spin_lock_irqsave(shost->host_lock, iflags);
vport->fc_flag &= ~FC_DISC_TMO;
spin_unlock_irqrestore(shost->host_lock, iflags);
struct lpfc_iocbq *iocb,
struct lpfc_nodelist *ndlp)
{
- IOCB_t *icmd = &iocb->iocb;
- struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_vport *vport = ndlp->vport;
+ u8 ulp_command;
+ u16 ulp_context;
+ u32 remote_id;
if (iocb->vport != vport)
return 0;
+ ulp_command = get_job_cmnd(phba, iocb);
+ ulp_context = get_job_ulpcontext(phba, iocb);
+ remote_id = get_job_els_rsp64_did(phba, iocb);
+
if (pring->ringno == LPFC_ELS_RING) {
- switch (icmd->ulpCommand) {
+ switch (ulp_command) {
case CMD_GEN_REQUEST64_CR:
if (iocb->context_un.ndlp == ndlp)
return 1;
fallthrough;
case CMD_ELS_REQUEST64_CR:
- if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+ if (remote_id == ndlp->nlp_DID)
return 1;
fallthrough;
case CMD_XMIT_ELS_RSP64_CX:
(ndlp->nlp_flag & NLP_DELAY_TMO)) {
return 0;
}
- if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+ if (ulp_context == ndlp->nlp_rpi)
return 1;
- }
}
return 0;
}
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring;
+ u32 ulp_command;
pring = lpfc_phba_elsring(phba);
if (unlikely(!pring))
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->context1 != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
- (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
list_move_tail(&iocb->list, &completions);
}
/* Next check the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->context1 != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
- icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
}
}
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
struct ls_rjt { /* Structure is in Big Endian format */
union {
+ __be32 ls_rjt_error_be;
uint32_t lsRjtError;
struct {
uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
}
/*
- * Determine if an IOCB failed because of a link event or firmware reset.
+ * Determine if failed because of a link event or firmware reset.
*/
-
static inline int
-lpfc_error_lost_link(IOCB_t *iocbp)
+lpfc_error_lost_link(u32 ulp_status, u32 ulp_word4)
{
- return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
- iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
- iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+ return (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 == IOERR_SLI_ABORTED ||
+ ulp_word4 == IOERR_LINK_DOWN ||
+ ulp_word4 == IOERR_SLI_DOWN));
}
#define MENLO_TRANSPORT_TYPE 0xfe
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+#define get_wqe_reqtag(x) (((x)->wqe.words[9] >> 0) & 0xFFFF)
+#define get_wqe_tmo(x) (((x)->wqe.words[7] >> 24) & 0x00FF)
+
+#define get_job_ulpword(x, y) ((x)->iocb.un.ulpWord[y])
+
+#define set_job_ulpstatus(x, y) bf_set(lpfc_wcqe_c_status, &(x)->wcqe_cmpl, y)
+#define set_job_ulpword4(x, y) ((&(x)->wcqe_cmpl)->parameter = y)
+
struct dma_address {
uint32_t addr_lo;
uint32_t addr_hi;
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
+enum ulp_bde64_word3 {
+ ULP_BDE64_SIZE_MASK = 0xffffff,
+
+ ULP_BDE64_TYPE_SHIFT = 24,
+ ULP_BDE64_TYPE_MASK = (0xff << ULP_BDE64_TYPE_SHIFT),
+
+ /* BDE (Host_resident) */
+ ULP_BDE64_TYPE_BDE_64 = (0x00 << ULP_BDE64_TYPE_SHIFT),
+ /* Immediate Data BDE */
+ ULP_BDE64_TYPE_BDE_IMMED = (0x01 << ULP_BDE64_TYPE_SHIFT),
+ /* BDE (Port-resident) */
+ ULP_BDE64_TYPE_BDE_64P = (0x02 << ULP_BDE64_TYPE_SHIFT),
+ /* Input BDE (Host-resident) */
+ ULP_BDE64_TYPE_BDE_64I = (0x08 << ULP_BDE64_TYPE_SHIFT),
+ /* Input BDE (Port-resident) */
+ ULP_BDE64_TYPE_BDE_64IP = (0x0A << ULP_BDE64_TYPE_SHIFT),
+ /* BLP (Host-resident) */
+ ULP_BDE64_TYPE_BLP_64 = (0x40 << ULP_BDE64_TYPE_SHIFT),
+ /* BLP (Port-resident) */
+ ULP_BDE64_TYPE_BLP_64P = (0x42 << ULP_BDE64_TYPE_SHIFT),
+};
+
+struct ulp_bde64_le {
+ __le32 type_size; /* type 31:24, size 23:0 */
+ __le32 addr_low;
+ __le32 addr_high;
+};
+
struct ulp_bde64 {
union ULP_BDE_TUS {
uint32_t w;
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
/**
* lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
- * cfg_soft_wwnn, cfg_soft_wwpn
* @vport: pointer to lpfc vport data structure.
*
*
uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
- /* If the soft name exists then update it using the service params */
- if (vport->phba->cfg_soft_wwnn)
- u64_to_wwn(vport->phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (vport->phba->cfg_soft_wwpn)
- u64_to_wwn(vport->phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
-
/*
* If the name is empty or there exists a soft name
* then copy the service params name, otherwise use the fc name
*/
- if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ if (vport->fc_nodename.u.wwn[0] == 0)
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
else
vport->vport_flag |= FAWWPN_PARAM_CHG;
if (vport->fc_portname.u.wwn[0] == 0 ||
- vport->phba->cfg_soft_wwpn ||
(vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
vport->vport_flag & FAWWPN_SET) {
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
spin_lock_irq(&pring->ring_lock);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
list_splice_init(&pring->txcmplq, &completions);
pring->txcmplq_cnt = 0;
spin_unlock_irq(&pring->ring_lock);
}
/**
- * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
+ * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
* @phba: pointer to lpfc hba data structure.
* @pring: pointer to a IOCB ring.
* @cnt: the number of IOCBs to be posted to the IOCB ring.
* The number of IOCBs NOT able to be posted to the IOCB ring.
**/
int
-lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
+lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
{
IOCB_t *icmd;
struct lpfc_iocbq *iocb;
struct lpfc_sli *psli = &phba->sli;
/* Ring 0, ELS / CT buffers */
- lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
+ lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
/* Ring 2 - FCP no buffers needed */
return 0;
qp = &phba->sli4_hba.hdwq[idx];
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
- lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
- lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+ lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
spin_lock(&qp->io_buf_list_put_lock);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
{
if (phba->wq) {
- flush_workqueue(phba->wq);
destroy_workqueue(phba->wq);
phba->wq = NULL;
}
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
/*
- * SLI-3, Message Signaled Interrupt Fearure.
+ * SLI-3, Message Signaled Interrupt Feature.
*/
/* Multi-message attention configuration */
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
void *ptr = NULL;
- IOCB_t *irsp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
/* For lpfc_els_abort, context2 could be zero'ed to delay
ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
}
} else {
- /* Force ulpStatus error since we are returning NULL ptr */
- if (!(irsp->ulpStatus)) {
- irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
- irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ /* Force ulp_status error since we are returning NULL ptr */
+ if (!(ulp_status)) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl,
+ IOSTAT_LOCAL_REJECT);
+ rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
+ } else {
+ rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+ rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ }
}
ptr = NULL;
}
struct lpfc_dmabuf *mp;
uint64_t nlp_portwwn = 0;
uint32_t *lp;
+ union lpfc_wqe128 *wqe;
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
struct ls_rjt stat;
uint32_t vid, flag;
int rc;
+ u32 remote_did;
memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
NULL);
return 0;
}
- icmd = &cmdiocb->iocb;
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ wqe = &cmdiocb->wqe;
+ else
+ icmd = &cmdiocb->iocb;
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* rcv'ed PLOGI decides what our NPortId will be */
- vport->fc_myDID = icmd->un.rcvels.parmRo;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ vport->fc_myDID = bf_get(els_rsp64_sid,
+ &cmdiocb->wqe.xmit_els_rsp);
+ } else {
+ vport->fc_myDID = icmd->un.rcvels.parmRo;
+ }
/* If there is an outstanding FLOGI, abort it now.
* The remote NPort is not going to ACC our FLOGI
/* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
* always be deferring the ACC.
*/
- rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ remote_did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
+ else
+ remote_did = icmd->un.rcvels.remoteID;
+ rc = lpfc_reg_rpi(phba, vport->vpi, remote_did,
(uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
if (rc)
goto out;
lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd;
struct serv_parm *sp;
struct lpfc_name *pnn, *ppn;
struct ls_rjt stat;
ADISC *ap;
- IOCB_t *icmd;
uint32_t *lp;
uint32_t cmd;
ppn = (struct lpfc_name *) & sp->portName;
}
- icmd = &cmdiocb->iocb;
- if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+ if (get_job_ulpstatus(phba, cmdiocb) == 0 &&
+ lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
/*
* As soon as we send ACC, the remote NPort can
elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
GFP_KERNEL);
if (elsiocb) {
-
/* Save info from cmd IOCB used in rsp */
memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
sizeof(struct lpfc_iocbq));
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
uint32_t vid, flag;
- IOCB_t *irsp;
struct serv_parm *sp;
uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
int rc;
+ u32 ulp_status;
+ u32 did;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
/* Recovery from PLOGI collision logic */
return ndlp->nlp_state;
}
- irsp = &rspiocb->iocb;
-
- if (irsp->ulpStatus)
+ if (ulp_status)
goto out;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
goto out;
}
- if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (lpfc_reg_rpi(phba, vport->vpi, did,
(uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
ADISC *ap;
int rc;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
- irsp = &rspiocb->iocb;
- if ((irsp->ulpStatus) ||
+ if ((ulp_status) ||
(!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc,
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_hba *phba = vport->phba;
- IOCB_t *irsp;
PRLI *npr;
struct lpfc_nvme_prli *nvpr;
void *temp_ptr;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
/* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
* format is different so NULL the two PRLI types so that the
* driver correctly gets the correct context.
npr = NULL;
nvpr = NULL;
temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
- if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
+ if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ)
npr = (PRLI *) temp_ptr;
- else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
+ else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ)
nvpr = (struct lpfc_nvme_prli *) temp_ptr;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
+ if (ulp_status) {
if ((vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
goto out;
lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
+ if (ulp_status)
return NLP_STE_FREED_NODE;
- }
+
return ndlp->nlp_state;
}
lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
+ if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+
+ if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
static void
lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
struct lpfc_nvme_lport *lport;
uint32_t status;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
struct lpfc_dmabuf *inp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_wcqe_complete *),
+ struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
memset(wqe, 0, sizeof(union lpfc_wqe));
genwqe->context3 = (uint8_t *)bmp;
- genwqe->iocb_flag |= LPFC_IO_NVME_LS;
+ genwqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Save for completion so we can release these resources */
genwqe->context1 = lpfc_nlp_get(ndlp);
first_len = xmit_len;
}
- genwqe->rsvd2 = num_entry;
+ genwqe->num_bdes = num_entry;
genwqe->hba_wqidx = 0;
/* Words 0 - 2 */
/* Issue GEN REQ WQE for NPORT <did> */
- genwqe->wqe_cmpl = cmpl;
- genwqe->iocb_cmpl = NULL;
+ genwqe->cmd_cmpl = cmpl;
genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
genwqe->vport = vport;
genwqe->retry = retry;
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe))
+ struct lpfc_iocbq *rspwqe))
{
struct lpfc_dmabuf *bmp;
struct ulp_bde64 *bpl;
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
if (wqe->context2 == pnvme_lsreq) {
- wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+ wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
foundit = true;
break;
}
/*
- * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
+ * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
* TODO: What are the failure codes.
**/
static void
-lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
- struct lpfc_wcqe_complete *wcqe)
+lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ struct lpfc_iocbq *pwqeOut)
{
struct lpfc_io_buf *lpfc_ncmd =
(struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct nvmefc_fcp_req *nCmd;
struct nvme_fc_ersp_iu *ep;
"6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"req_tag x%x, status x%x, hwstatus x%x\n",
- cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag,
+ bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
+ get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
bf_get(lpfc_wcqe_c_status, abts_cmpl),
bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
}
/* Don't abort IOs no longer on the pending queue. */
- if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
/* Outstanding abort is in progress */
- if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq x%px, "
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
- pwqeq->iocb_flag = LPFC_IO_NVME;
- pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
+ pwqeq->cmd_flag = LPFC_IO_NVME;
+ pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0;
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
bf_set(lpfc_wcqe_c_xb, wcqep, 1);
- (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
+ memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
+ (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
#endif
}
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe));
+ struct lpfc_iocbq *rspwqe));
void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
struct nvmefc_ls_rsp *ls_rsp,
void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe));
+ struct lpfc_iocbq *rspwqe));
void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
+ struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe);
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* transmission of an NVME LS response.
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. The function frees memory resources used for the command
**/
void
__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
uint32_t status, result;
* lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME LS commands
**/
static void
lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_nvmet_tgtport *tgtp;
uint32_t status, result;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
if (!phba->targetport)
goto finish;
}
finish:
- __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
+ __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe);
}
/**
* lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME FCP commands
**/
static void
lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
struct lpfc_async_xchg_ctx *ctxp;
uint32_t status, result, op, start_clean, logerr;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int id;
#endif
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
- start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
+ start_clean = offsetof(struct lpfc_iocbq, cmd_flag);
memset(((char *)cmdwqe) + start_clean, 0,
(sizeof(struct lpfc_iocbq) - start_clean));
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct nvmefc_ls_rsp *ls_rsp,
void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe))
+ struct lpfc_iocbq *rspwqe))
{
struct lpfc_hba *phba = axchg->phba;
struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
}
/* Save numBdes for bpl2sgl */
- nvmewqeq->rsvd2 = 1;
+ nvmewqeq->num_bdes = 1;
nvmewqeq->hba_wqidx = 0;
nvmewqeq->context3 = &dmabuf;
dmabuf.virt = &bpl;
* be referenced after it returns back to this routine.
*/
- nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
- nvmewqeq->iocb_cmpl = NULL;
+ nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
nvmewqeq->context2 = axchg;
lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
goto aerr;
}
- nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
- nvmewqeq->iocb_cmpl = NULL;
+ nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
nvmewqeq->context2 = ctxp;
- nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
+ nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
ctxp->wqeq->hba_wqidx = rsp->hwqid;
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
* lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
* @phba: Pointer to HBA context object
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* This function is the completion handler for NVME LS requests.
* The function updates any states and statistics, then calls the
**/
static void
lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
__lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
}
"6406 Ran out of NVMET iocb/WQEs\n");
return -ENOMEM;
}
- ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+ ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET;
nvmewqe = ctx_buf->iocbq;
wqe = &nvmewqe->wqe;
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock,
iflags);
+ memcpy(&nvmewqeq->wcqe_cmpl, wcqep,
+ sizeof(*wcqep));
lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
- wcqep);
+ nvmewqeq);
return;
}
continue;
/* Flush all IOs */
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
+ memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep));
+ lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
}
}
nvmewqe->retry = 1;
nvmewqe->vport = phba->pport;
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
- nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
+ nvmewqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Xmit NVMET response to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
* lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
**/
static void
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
unsigned long flags;
bool released = false;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
ctxp = cmdwqe->context2;
result = wcqe->parameter;
* lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
**/
static void
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
unsigned long flags;
uint32_t result;
bool released = false;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
ctxp = cmdwqe->context2;
result = wcqe->parameter;
* lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for LS cmds
**/
static void
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
ctxp = cmdwqe->context2;
result = wcqe->parameter;
abts_wqeq->context1 = ndlp;
abts_wqeq->context2 = ctxp;
abts_wqeq->context3 = NULL;
- abts_wqeq->rsvd2 = 0;
+ abts_wqeq->num_bdes = 0;
/* hba_wqidx should already be setup from command we are aborting */
abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
abts_wqeq->iocb.ulpLe = 1;
}
/* Outstanding abort is in progress */
- if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
}
/* Ready - mark outstanding as aborted by driver. */
- abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
+ abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVME;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport;
if (!ctxp->hdwq)
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq = ctxp->wqeq;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVMET;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
}
spin_lock_irqsave(&phba->hbalock, flags);
- abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
kfree(psb);
break;
}
- psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+ psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
psb->fcp_cmnd = psb->data;
psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
- if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
+ if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
continue;
if (psb->rdata && psb->rdata->pnode &&
list_del_init(&psb->list);
psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
- if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
+ if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
* for command completion wake up the thread.
*/
spin_lock_irqsave(&psb->buf_lock, iflag);
- psb->cur_iocbq.iocb_flag &=
+ psb->cur_iocbq.cmd_flag &=
~LPFC_DRIVER_ABORTED;
if (psb->waitq)
wake_up(psb->waitq);
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ (iocbq->cmd_flag & LPFC_IO_LIBDFC))
continue;
if (iocbq->sli4_xritag != xri)
continue;
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
- lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
lpfc_cmd->prot_seg_cnt = 0;
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->timeout = 0;
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
psb->pCmd = NULL;
- psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
}
physaddr = sg_dma_address(sgel);
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
+ !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel);
*/
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
+ !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
/*
* The extended IOCB format can only fit 3 BDE or a BPL.
* -1 - Internal error (bad profile, ...etc)
*/
static int
-lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_wcqe_complete *wcqe)
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_iocbq *pIocbOut)
{
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ struct sli3_bg_fields *bgf;
int ret = 0;
- u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
+ struct lpfc_wcqe_complete *wcqe;
+ u32 status;
u32 bghm = 0;
u32 bgstat = 0;
u64 failing_sector = 0;
- if (status == CQE_STATUS_DI_ERROR) {
- if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
- bgstat |= BGS_GUARD_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
- bgstat |= BGS_APPTAG_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
- bgstat |= BGS_REFTAG_ERR_MASK;
-
- /* Check to see if there was any good data before the error */
- if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
- bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
- bghm = wcqe->total_data_placed;
- }
-
- /*
- * Set ALL the error bits to indicate we don't know what
- * type of error it is.
- */
- if (!bgstat)
- bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
- BGS_GUARD_ERR_MASK);
- }
-
- if (lpfc_bgs_get_guard_err(bgstat)) {
- ret = 1;
-
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
- set_host_byte(cmd, DID_ABORT);
- phba->bg_guard_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9059 BLKGRD: Guard Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
-
- if (lpfc_bgs_get_reftag_err(bgstat)) {
- ret = 1;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wcqe = &pIocbOut->wcqe_cmpl;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
- set_host_byte(cmd, DID_ABORT);
+ if (status == CQE_STATUS_DI_ERROR) {
+ /* Guard Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
+ bgstat |= BGS_GUARD_ERR_MASK;
- phba->bg_reftag_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9060 BLKGRD: Ref Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
+ /* AppTag Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
+ bgstat |= BGS_APPTAG_ERR_MASK;
- if (lpfc_bgs_get_apptag_err(bgstat)) {
- ret = 1;
+ /* RefTag Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
+ bgstat |= BGS_REFTAG_ERR_MASK;
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
- set_host_byte(cmd, DID_ABORT);
-
- phba->bg_apptag_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9062 BLKGRD: App Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
-
- if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
- /*
- * setup sense data descriptor 0 per SPC-4 as an information
- * field, and put the failing LBA in it.
- * This code assumes there was also a guard/app/ref tag error
- * indication.
- */
- cmd->sense_buffer[7] = 0xc; /* Additional sense length */
- cmd->sense_buffer[8] = 0; /* Information descriptor type */
- cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
- cmd->sense_buffer[10] = 0x80; /* Validity bit */
+ /* Check to see if there was any good data before the
+ * error
+ */
+ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+ bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
+ bghm = wcqe->total_data_placed;
+ }
- /* bghm is a "on the wire" FC frame based count */
- switch (scsi_get_prot_op(cmd)) {
- case SCSI_PROT_READ_INSERT:
- case SCSI_PROT_WRITE_STRIP:
- bghm /= cmd->device->sector_size;
- break;
- case SCSI_PROT_READ_STRIP:
- case SCSI_PROT_WRITE_INSERT:
- case SCSI_PROT_READ_PASS:
- case SCSI_PROT_WRITE_PASS:
- bghm /= (cmd->device->sector_size +
- sizeof(struct scsi_dif_tuple));
- break;
+ /*
+ * Set ALL the error bits to indicate we don't know what
+ * type of error it is.
+ */
+ if (!bgstat)
+ bgstat |= (BGS_REFTAG_ERR_MASK |
+ BGS_APPTAG_ERR_MASK |
+ BGS_GUARD_ERR_MASK);
}
- failing_sector = scsi_get_lba(cmd);
- failing_sector += bghm;
-
- /* Descriptor Information */
- put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
- }
-
- if (!ret) {
- /* No error was reported - problem in FW? */
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9068 BLKGRD: Unknown error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
-
- /* Calculate what type of error it was */
- lpfc_calc_bg_err(phba, lpfc_cmd);
+ } else {
+ bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+ bghm = bgf->bghm;
+ bgstat = bgf->bgstat;
}
- return ret;
-}
-
-/*
- * This function checks for BlockGuard errors detected by
- * the HBA. In case of errors, the ASC/ASCQ fields in the
- * sense buffer will be set accordingly, paired with
- * ILLEGAL_REQUEST to signal to the kernel that the HBA
- * detected corruption.
- *
- * Returns:
- * 0 - No error found
- * 1 - BlockGuard error found
- * -1 - Internal error (bad profile, ...etc)
- */
-static int
-lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_iocbq *pIocbOut)
-{
- struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
- int ret = 0;
- uint32_t bghm = bgf->bghm;
- uint32_t bgstat = bgf->bgstat;
- uint64_t failing_sector = 0;
if (lpfc_bgs_get_invalid_prof(bgstat)) {
cmd->result = DID_ERROR << 16;
if (lpfc_bgs_get_guard_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
if (lpfc_bgs_get_reftag_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
set_host_byte(cmd, DID_ABORT);
-
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9056 BLKGRD: Ref Tag error in cmd "
if (lpfc_bgs_get_apptag_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
set_host_byte(cmd, DID_ABORT);
-
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9061 BLKGRD: App Tag error in cmd "
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled) {
- lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->priority;
switch (scsi_get_prot_op(scsi_cmnd)) {
case SCSI_PROT_WRITE_STRIP:
case SCSI_PROT_READ_STRIP:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
break;
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_INSERT:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
break;
case SCSI_PROT_WRITE_PASS:
case SCSI_PROT_READ_PASS:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
break;
}
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled) {
- lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
/* Word 10 */
bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
}
/* Word 7. DIF Flags */
- if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
+ if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
- else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
+ else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
- else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
+ else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
return 0;
* lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
* @phba: The hba for which this call is being executed.
* @pwqeIn: The command WQE for the scsi cmnd.
- * @wcqe: Pointer to driver response CQE object.
+ * @pwqeOut: Pointer to driver response WQE object.
*
* This routine assigns scsi command result by looking into response WQE
* status field appropriately. This routine handles QUEUE FULL condition as
**/
static void
lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *pwqeOut)
{
struct lpfc_io_buf *lpfc_cmd =
(struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
u32 logit = LOG_FCP;
u32 status, idx;
- unsigned long iflags = 0;
u32 lat;
u8 wait_xb_clr = 0;
rdata = lpfc_cmd->rdata;
ndlp = rdata->pnode;
- if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- /* TOREMOVE - currently this flag is checked during
- * the release of lpfc_iocbq. Remove once we move
- * to lpfc_wqe_job construct.
- *
- * This needs to be done outside buf_lock
- */
- spin_lock_irqsave(&phba->hbalock, iflags);
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
- /* Guard against abort handler being called at same time */
- spin_lock(&lpfc_cmd->buf_lock);
-
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
if (!cmd) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"9042 I/O completion: Not an active IO\n");
- spin_unlock(&lpfc_cmd->buf_lock);
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
}
+ /* Guard against abort handler being called at same time */
+ spin_lock(&lpfc_cmd->buf_lock);
idx = lpfc_cmd->cur_iocbq.hba_wqidx;
if (phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
* This is a response for a BG enabled
* cmd. Parse BG error
*/
- lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
- wcqe);
+ lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
break;
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_BG,
+ "9040 non-zero BGSTAT "
+ "on unprotected cmd\n");
}
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9040 non-zero BGSTAT on unprotected cmd\n");
}
lpfc_printf_vlog(vport, KERN_WARNING, logit,
"9036 Local Reject FCP cmd x%x failed"
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock(&lpfc_cmd->buf_lock);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exchange busy status from HBA */
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
- if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock(&lpfc_cmd->buf_lock);
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
piocbq->context1 = lpfc_cmd;
- if (!piocbq->iocb_cmpl)
- piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ if (!piocbq->cmd_cmpl)
+ piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
piocbq->iocb.ulpTimeout = tmo;
piocbq->vport = vport;
return 0;
pwqeq->vport = vport;
pwqeq->context1 = lpfc_cmd;
pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
- pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
+ pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
return 0;
}
}
/**
- * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_io_buf data structure.
* @lun: Logical unit number.
* 1 - Success
**/
static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
- struct lpfc_io_buf *lpfc_cmd,
- uint64_t lun,
- uint8_t task_mgmt_cmd)
+lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd)
{
struct lpfc_iocbq *piocbq;
IOCB_t *piocb;
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
- if (vport->phba->sli_rev == 3 &&
- !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+ if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
- if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- piocb->ulpContext =
- vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- }
piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
piocb->ulpPU = 0;
} else
piocb->ulpTimeout = lpfc_cmd->timeout;
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
- lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
+ return 1;
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd)
+{
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+ struct fcp_cmnd *fcp_cmnd;
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+
+ if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ return 0;
+
+ pwqeq->vport = vport;
+ /* Initialize 64 bytes only */
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* From the icmnd template, initialize words 4 - 11 */
+ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
+ sizeof(uint32_t) * 8);
+
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ /* Clear out any old data in the FCP command area */
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
+ fcp_cmnd->fcpCntl3 = 0;
+ fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
+ ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
+ bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
+ (ndlp->nlp_fcp_info & 0x0f));
+
+ /* ulpTimeout is only one byte */
+ if (lpfc_cmd->timeout > 0xff) {
+ /*
+ * Do not timeout the command at the firmware level.
+ * The driver will provide the timeout mechanism.
+ */
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
+ } else {
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
+ }
+
+ lpfc_prep_embed_io(vport->phba, lpfc_cmd);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+ wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+ lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
return 1;
}
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cur_iocbq = NULL;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
struct lpfc_io_buf *lpfc_cmd;
}
lpfc_cmd->rx_cmd_start = start;
+ cur_iocbq = &lpfc_cmd->cur_iocbq;
/*
* Store the midlayer's command structure for the completion phase
* and complete the command initialization.
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->ndlp = ndlp;
- lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+ cur_iocbq->cmd_cmpl = NULL;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
goto out_host_busy_free_buf;
}
-
/* check the necessary and sufficient condition to support VMID */
if (lpfc_is_vmid_enabled(phba) &&
(ndlp->vmid_support ||
if (uuid) {
err = lpfc_vmid_get_appid(vport, uuid, cmnd,
(union lpfc_vmid_io_tag *)
- &lpfc_cmd->cur_iocbq.vmid_tag);
+ &cur_iocbq->vmid_tag);
if (!err)
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
+ cur_iocbq->cmd_flag |= LPFC_IO_VMID;
}
}
-
atomic_inc(&ndlp->cmd_pending);
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
#endif
/* Issue I/O to adapter */
- err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
- &lpfc_cmd->cur_iocbq,
+ err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
SLI_IOCB_RET_IOCB);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (start) {
#endif
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "3376 FCP could not issue IOCB err %x "
- "FCP cmd x%x <%d/%llu> "
- "sid: x%x did: x%x oxid: x%x "
- "Data: x%x x%x x%x x%x\n",
- err, cmnd->cmnd[0],
- cmnd->device ? cmnd->device->id : 0xffff,
- cmnd->device ? cmnd->device->lun : (u64)-1,
- vport->fc_myDID, ndlp->nlp_DID,
- phba->sli_rev == LPFC_SLI_REV4 ?
- lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
- phba->sli_rev == LPFC_SLI_REV4 ?
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
- lpfc_cmd->cur_iocbq.iocb.ulpContext,
- lpfc_cmd->cur_iocbq.iotag,
- phba->sli_rev == LPFC_SLI_REV4 ?
- bf_get(wqe_tmo,
- &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
- lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
- (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
+ "3376 FCP could not issue iocb err %x "
+ "FCP cmd x%x <%d/%llu> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : (u64)-1,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ cur_iocbq->sli4_xritag : 0xffff,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
+ cur_iocbq->iocb.ulpContext,
+ cur_iocbq->iotag,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ bf_get(wqe_tmo,
+ &cur_iocbq->wqe.generic.wqe_com) :
+ cur_iocbq->iocb.ulpTimeout,
+ (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
goto out_host_busy_free_buf;
}
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocb;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
spin_lock(&pring_s4->ring_lock);
}
/* the command is in process of being cancelled */
- if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3169 SCSI Layer abort requested I/O has been "
"cancelled by LLD.\n");
BUG_ON(iocb->context1 != lpfc_cmd);
/* abort issued in recovery is still in progress */
- if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3389 SCSI Layer I/O Abort Request is pending\n");
if (phba->sli_rev == LPFC_SLI_REV4)
}
lpfc_cmd->waitq = &waitq;
- if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
- ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
- lpfc_sli4_abort_fcp_cmpl);
- } else {
+ else
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
- ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
- lpfc_sli_abort_fcp_cmpl);
- }
+
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ lpfc_sli_abort_fcp_cmpl);
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
wait_for_cmpl:
/*
- * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
+ * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
* for abort to complete.
*/
wait_event_timeout(waitq,
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
- * @cmnd: Pointer to scsi_cmnd data structure.
+ * @rport: Pointer to remote port
* @tgt_id: Target ID of remote device.
* @lun_id: Lun number for the TMF
* @task_mgmt_cmd: type of TMF to send
* 0x2002 - Success.
**/
static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
unsigned int tgt_id, uint64_t lun_id,
uint8_t task_mgmt_cmd)
{
int ret;
int status;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode)
return FAILED;
pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
- lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->pCmd = NULL;
lpfc_cmd->ndlp = pnode;
- status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
- task_mgmt_cmd);
+ status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+ task_mgmt_cmd);
if (!status) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
- iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+ iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
+ iocbq->vport = vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %llu "
"rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
- iocbq->iocb_flag);
+ iocbq->cmd_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if ((status != IOCB_SUCCESS) ||
- (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
+ (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
if (status != IOCB_SUCCESS ||
- iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
+ get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0727 TMF %s to TGT %d LUN %llu "
- "failed (%d, %d) iocb_flag x%x\n",
+ "failed (%d, %d) cmd_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id,
- iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4],
- iocbq->iocb_flag);
+ get_job_ulpstatus(phba, iocbqrsp),
+ get_job_word4(phba, iocbqrsp),
+ iocbq->cmd_flag);
/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
if (status == IOCB_SUCCESS) {
- if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ if (get_job_ulpstatus(phba, iocbqrsp) ==
+ IOSTAT_FCP_RSP_ERROR)
/* Something in the FCP_RSP was invalid.
* Check conditions */
ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
else
ret = FAILED;
- } else if (status == IOCB_TIMEDOUT) {
+ } else if ((status == IOCB_TIMEDOUT) ||
+ (status == IOCB_ABORTED)) {
ret = TIMEOUT_ERROR;
} else {
ret = FAILED;
lpfc_sli_release_iocbq(phba, iocbqrsp);
- if (ret != TIMEOUT_ERROR)
+ if (status != IOCB_TIMEDOUT)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return ret;
/**
* lpfc_chk_tgt_mapped -
* @vport: The virtual port to check on
- * @cmnd: Pointer to scsi_cmnd data structure.
+ * @rport: Pointer to fc_rport data structure.
*
* This routine delays until the scsi target (aka rport) for the
* command exists (is present and logged in) or we declare it non-existent.
* 0x2002 - Success
**/
static int
-lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
+lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
+ struct lpfc_nodelist *pnode = NULL;
unsigned long later;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0797 Tgt Map rport failure: rdata x%px\n", rdata);
return FAILED;
}
pnode = rdata->pnode;
+
/*
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata)
return FAILED;
pnode = rdata->pnode;
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
int status;
u32 logit = LOG_FCP;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0798 Device Reset rdata failure: rdata x%px\n",
return FAILED;
}
pnode = rdata->pnode;
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
- status = lpfc_chk_tgt_mapped(vport, cmnd);
+ status = lpfc_chk_tgt_mapped(vport, rport);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0721 Device Reset rport failure: rdata x%px\n", rdata);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
FCP_LUN_RESET);
if (status != SUCCESS)
logit = LOG_TRACE_EVENT;
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0799 Target Reset rdata failure: rdata x%px\n",
return FAILED;
}
pnode = rdata->pnode;
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
- status = lpfc_chk_tgt_mapped(vport, cmnd);
+ status = lpfc_chk_tgt_mapped(vport, rport);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0722 Target Reset rport failure: rdata x%px\n", rdata);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
FCP_TARGET_RESET);
if (status != SUCCESS) {
logit = LOG_TRACE_EVENT;
return status;
}
-/**
- * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
- * @cmnd: Pointer to scsi_cmnd data structure.
- *
- * This routine does target reset to all targets on @cmnd->device->host.
- * This emulates Parallel SCSI Bus Reset Semantics.
- *
- * Return code :
- * 0x2003 - Error
- * 0x2002 - Success
- **/
-static int
-lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
-{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_scsi_event_header scsi_event;
- int match;
- int ret = SUCCESS, status, i;
- u32 logit = LOG_FCP;
-
- scsi_event.event_type = FC_REG_SCSI_EVENT;
- scsi_event.subcategory = LPFC_EVENT_BUSRESET;
- scsi_event.lun = 0;
- memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
- memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
-
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
-
- status = fc_block_scsi_eh(cmnd);
- if (status != 0 && status != SUCCESS)
- return status;
-
- /*
- * Since the driver manages a single bus device, reset all
- * targets known to the driver. Should any target reset
- * fail, this routine returns failure to the midlayer.
- */
- for (i = 0; i < LPFC_MAX_TARGET; i++) {
- /* Search for mapped node by target ID */
- match = 0;
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
-
- if (vport->phba->cfg_fcp2_no_tgt_reset &&
- (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
- continue;
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
- ndlp->nlp_sid == i &&
- ndlp->rport &&
- ndlp->nlp_type & NLP_FCP_TARGET) {
- match = 1;
- break;
- }
- }
- spin_unlock_irq(shost->host_lock);
- if (!match)
- continue;
-
- status = lpfc_send_taskmgmt(vport, cmnd,
- i, 0, FCP_TARGET_RESET);
-
- if (status != SUCCESS) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0700 Bus Reset on target %d failed\n",
- i);
- ret = FAILED;
- }
- }
- /*
- * We have to clean up i/o as : they may be orphaned by the TMFs
- * above; or if any of the TMFs failed, they may be in an
- * indeterminate state.
- * We will report success if all the i/o aborts successfully.
- */
-
- status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
- if (status != SUCCESS)
- ret = FAILED;
- if (ret == FAILED)
- logit = LOG_TRACE_EVENT;
-
- lpfc_printf_vlog(vport, KERN_ERR, logit,
- "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
- return ret;
-}
-
/**
* lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
* @cmnd: Pointer to scsi_cmnd data structure.
return SCSI_MLQUEUE_HOST_BUSY;
}
-static int
-lpfc_no_handler(struct scsi_cmnd *cmnd)
-{
- return FAILED;
-}
-
static int
lpfc_no_slave(struct scsi_device *sdev)
{
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_no_command,
- .eh_abort_handler = lpfc_no_handler,
- .eh_device_reset_handler = lpfc_no_handler,
- .eh_target_reset_handler = lpfc_no_handler,
- .eh_bus_reset_handler = lpfc_no_handler,
- .eh_host_reset_handler = lpfc_no_handler,
.slave_alloc = lpfc_no_slave,
.slave_configure = lpfc_no_slave,
.scan_finished = lpfc_scan_finished,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
.eh_host_reset_handler = lpfc_host_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
uint32_t);
static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint8_t *, uint32_t *);
-static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
- struct lpfc_iocbq *);
+static struct lpfc_iocbq *
+lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *rspiocbq);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
struct lpfc_queue *cq,
struct lpfc_cqe *cqe);
+static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pwqeq,
+ struct lpfc_sglq *sglq);
union lpfc_wqe128 lpfc_iread_cmd_template;
union lpfc_wqe128 lpfc_iwrite_cmd_template;
union lpfc_wqe128 lpfc_icmnd_cmd_template;
-static IOCB_t *
-lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
-{
- return &iocbq->iocb;
-}
-
/* Setup WQE templates for IOs */
void lpfc_wqe_cmd_template(void)
{
struct lpfc_sglq *start_sglq = NULL;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
- struct lpfc_sli_ring *pring = NULL;
int found = 0;
+ u8 cmnd;
- if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
- pring = phba->sli4_hba.nvmels_wq->pring;
- else
- pring = lpfc_phba_elsring(phba);
-
- lockdep_assert_held(&pring->ring_lock);
+ cmnd = get_job_cmnd(phba, piocbq);
- if (piocbq->iocb_flag & LPFC_IO_FCP) {
+ if (piocbq->cmd_flag & LPFC_IO_FCP) {
lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
ndlp = lpfc_cmd->rdata->pnode;
- } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
- !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
+ !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
ndlp = piocbq->context_un.ndlp;
- } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
- if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+ } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
+ if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
ndlp = NULL;
else
ndlp = piocbq->context_un.ndlp;
if (sglq) {
- if (iocbq->iocb_flag & LPFC_IO_NVMET) {
+ if (iocbq->cmd_flag & LPFC_IO_NVMET) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
sglq->state = SGL_FREED;
goto out;
}
- if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+ if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
(!(unlikely(pci_channel_offline(phba->pcidev)))) &&
sglq->state != SGL_XRI_ABORTED) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
- iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
+ iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
LPFC_IO_NVME_LS);
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
- if (piocb->wqe_cmpl) {
- if (piocb->iocb_flag & LPFC_IO_NVME)
+ if (piocb->cmd_cmpl) {
+ if (piocb->cmd_flag & LPFC_IO_NVME) {
lpfc_nvme_cancel_iocb(phba, piocb,
ulpstatus, ulpWord4);
- else
- lpfc_sli_release_iocbq(phba, piocb);
-
- } else if (piocb->iocb_cmpl) {
- piocb->iocb.ulpStatus = ulpstatus;
- piocb->iocb.un.ulpWord[4] = ulpWord4;
- (piocb->iocb_cmpl) (phba, piocb, piocb);
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(lpfc_wcqe_c_status,
+ &piocb->wcqe_cmpl, ulpstatus);
+ piocb->wcqe_cmpl.parameter = ulpWord4;
+ } else {
+ piocb->iocb.ulpStatus = ulpstatus;
+ piocb->iocb.un.ulpWord[4] = ulpWord4;
+ }
+ (piocb->cmd_cmpl) (phba, piocb, piocb);
+ }
} else {
lpfc_sli_release_iocbq(phba, piocb);
}
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- if (phba->sli_rev == LPFC_SLI_REV4)
- lockdep_assert_held(&pring->ring_lock);
- else
- lockdep_assert_held(&phba->hbalock);
+ u32 ulp_command = 0;
BUG_ON(!piocb);
+ ulp_command = get_job_cmnd(phba, piocb);
list_add_tail(&piocb->list, &pring->txcmplq);
- piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt++;
-
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
- (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (ulp_command != CMD_ABORT_XRI_WQE) &&
+ (ulp_command != CMD_ABORT_XRI_CN) &&
+ (ulp_command != CMD_CLOSE_XRI_CN)) {
BUG_ON(!piocb->vport);
if (!(piocb->vport->load_flag & FC_UNLOADING))
mod_timer(&piocb->vport->els_tmofunc,
* lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
* @phba: Pointer to HBA context object.
* @cmdiocb: Pointer to driver command iocb object.
- * @cmf_cmpl: Pointer to completed WCQE.
+ * @rspiocb: Pointer to driver response iocb object.
*
* This routine will inform the driver of any BW adjustments we need
* to make. These changes will be picked up during the next CMF
**/
static void
lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *cmf_cmpl)
+ struct lpfc_iocbq *rspiocb)
{
union lpfc_wqe128 *wqe;
uint32_t status, info;
+ struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
uint64_t bw, bwdif, slop;
uint64_t pcent, bwpcent;
int asig, afpin, sigcnt, fpincnt;
char *s;
/* First check for error */
- status = bf_get(lpfc_wcqe_c_status, cmf_cmpl);
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
if (status) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6211 CMF_SYNC_WQE Error "
"req_tag x%x status x%x hwstatus x%x "
"tdatap x%x parm x%x\n",
- bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl),
- bf_get(lpfc_wcqe_c_status, cmf_cmpl),
- bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl),
- cmf_cmpl->total_data_placed,
- cmf_cmpl->parameter);
+ bf_get(lpfc_wcqe_c_request_tag, wcqe),
+ bf_get(lpfc_wcqe_c_status, wcqe),
+ bf_get(lpfc_wcqe_c_hw_status, wcqe),
+ wcqe->total_data_placed,
+ wcqe->parameter);
goto out;
}
/* Gather congestion information on a successful cmpl */
- info = cmf_cmpl->parameter;
+ info = wcqe->parameter;
phba->cmf_active_info = info;
/* See if firmware info count is valid or has changed */
else
phba->cmf_info_per_interval = info;
- tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl);
- cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl);
+ tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
+ cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
/* Get BW requirement from firmware */
bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
if (!bw) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6212 CMF_SYNC_WQE x%x: NULL bw\n",
- bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl));
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
goto out;
}
bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
sync_buf->vport = phba->pport;
- sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl;
- sync_buf->iocb_cmpl = NULL;
+ sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
sync_buf->context1 = NULL;
sync_buf->context2 = NULL;
sync_buf->context3 = NULL;
sync_buf->sli4_xritag = NO_XRI;
- sync_buf->iocb_flag |= LPFC_IO_CMF;
+ sync_buf->cmd_flag |= LPFC_IO_CMF;
ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
if (ret_val)
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
/*
* Set up an iotag
*/
- nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
+ nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
if (pring->ringno == LPFC_ELS_RING) {
/*
* If there is no completion routine to call, we can release the
* IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
- * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+ * that have no rsp ring completion, cmd_cmpl MUST be NULL.
*/
- if (nextiocb->iocb_cmpl)
+ if (nextiocb->cmd_cmpl)
lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
else
__lpfc_sli_release_iocbq(phba, nextiocb);
return 0;
}
+static void
+lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
+ struct lpfc_iocbq *saveq)
+{
+ IOCB_t *irsp;
+ union lpfc_wqe128 *wqe;
+ u16 i = 0;
+
+ irsp = &saveq->iocb;
+ wqe = &saveq->wqe;
+
+ /* Fill wcqe with the IOCB status fields */
+ bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
+ saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
+ saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
+ saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
+
+ /* Source ID */
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
+
+ /* rx-id of the response frame */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
+
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ irsp->unsli3.rcvsli3.ox_id);
+
+ /* DID */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+ irsp->un.rcvels.remoteID);
+
+ /* unsol data len */
+ for (i = 0; i < irsp->ulpBdeCount; i++) {
+ struct lpfc_hbq_entry *hbqe = NULL;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ if (i == 0) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &irsp->un.ulpWord[0];
+ saveq->wqe.gen_req.bde.tus.f.bdeSize =
+ hbqe->bde.tus.f.bdeSize;
+ } else if (i == 1) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &irsp->unsli3.sli3Words[4];
+ saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
+ }
+ }
+ }
+}
+
/**
* lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
* @phba: Pointer to HBA context object.
{
IOCB_t * irsp;
WORD5 * w5p;
+ dma_addr_t paddr;
uint32_t Rctl, Type;
struct lpfc_iocbq *iocbq;
struct lpfc_dmabuf *dmzbuf;
- irsp = &(saveq->iocb);
+ irsp = &saveq->iocb;
+ saveq->vport = phba->pport;
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
if (pring->lpfc_sli_rcv_async_status)
}
if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
- (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
+ (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
if (irsp->ulpBdeCount > 0) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->un.ulpWord[3]);
+ irsp->un.ulpWord[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 1) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->unsli3.sli3Words[3]);
+ irsp->unsli3.sli3Words[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 2) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->unsli3.sli3Words[7]);
+ irsp->unsli3.sli3Words[7]);
lpfc_in_buf_free(phba, dmzbuf);
}
irsp->unsli3.sli3Words[7]);
}
list_for_each_entry(iocbq, &saveq->list, list) {
- irsp = &(iocbq->iocb);
+ irsp = &iocbq->iocb;
if (irsp->ulpBdeCount != 0) {
- iocbq->context2 = lpfc_sli_get_buff(phba, pring,
+ iocbq->context2 = lpfc_sli_get_buff(phba,
+ pring,
irsp->un.ulpWord[3]);
if (!iocbq->context2)
lpfc_printf_log(phba,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
- iocbq->context3 = lpfc_sli_get_buff(phba, pring,
+ iocbq->context3 = lpfc_sli_get_buff(phba,
+ pring,
irsp->unsli3.sli3Words[7]);
if (!iocbq->context3)
lpfc_printf_log(phba,
irsp->unsli3.sli3Words[7]);
}
}
+ } else {
+ paddr = getPaddr(irsp->un.cont64[0].addrHigh,
+ irsp->un.cont64[0].addrLow);
+ saveq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ if (irsp->ulpBdeCount == 2) {
+ paddr = getPaddr(irsp->un.cont64[1].addrHigh,
+ irsp->un.cont64[1].addrLow);
+ saveq->context3 = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ paddr);
+ }
}
+
if (irsp->ulpBdeCount != 0 &&
(irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
if (!found)
list_add_tail(&saveq->clist,
&pring->iocb_continue_saveq);
+
if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
list_del_init(&iocbq->clist);
saveq = iocbq;
- irsp = &(saveq->iocb);
- } else
+ irsp = &saveq->iocb;
+ } else {
return 0;
+ }
}
if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
(irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
}
}
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+ irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (irsp->unsli3.rcvsli3.vpi == 0xffff)
+ saveq->vport = phba->pport;
+ else
+ saveq->vport = lpfc_find_vport_by_vpid(phba,
+ irsp->unsli3.rcvsli3.vpi);
+ }
+
+ /* Prepare WQE with Unsol frame */
+ lpfc_sli_prep_unsol_wqe(phba, saveq);
+
if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0313 Ring %d handler: unexpected Rctl x%x "
struct lpfc_iocbq *prspiocb)
{
struct lpfc_iocbq *cmd_iocb = NULL;
- uint16_t iotag;
- spinlock_t *temp_lock = NULL;
- unsigned long iflag = 0;
+ u16 iotag;
if (phba->sli_rev == LPFC_SLI_REV4)
- temp_lock = &pring->ring_lock;
+ iotag = get_wqe_reqtag(prspiocb);
else
- temp_lock = &phba->hbalock;
-
- spin_lock_irqsave(temp_lock, iflag);
- iotag = prspiocb->iocb.ulpIoTag;
+ iotag = prspiocb->iocb.ulpIoTag;
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
- spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}
- spin_unlock_irqrestore(temp_lock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0317 iotag x%x is out of "
- "range: max iotag x%x wd0 x%x\n",
- iotag, phba->sli.last_iotag,
- *(((uint32_t *) &prspiocb->iocb) + 7));
+ "range: max iotag x%x\n",
+ iotag, phba->sli.last_iotag);
return NULL;
}
struct lpfc_sli_ring *pring, uint16_t iotag)
{
struct lpfc_iocbq *cmd_iocb = NULL;
- spinlock_t *temp_lock = NULL;
- unsigned long iflag = 0;
-
- if (phba->sli_rev == LPFC_SLI_REV4)
- temp_lock = &pring->ring_lock;
- else
- temp_lock = &phba->hbalock;
- spin_lock_irqsave(temp_lock, iflag);
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
- spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}
- spin_unlock_irqrestore(temp_lock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0372 iotag x%x lookup error: max iotag (x%x) "
- "iocb_flag x%x\n",
+ "cmd_flag x%x\n",
iotag, phba->sli.last_iotag,
- cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
+ cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
return NULL;
}
struct lpfc_iocbq *cmdiocbp;
int rc = 1;
unsigned long iflag;
+ u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+
+ ulp_command = get_job_cmnd(phba, saveq);
+ ulp_status = get_job_ulpstatus(phba, saveq);
+ ulp_word4 = get_job_word4(phba, saveq);
+ ulp_context = get_job_ulpcontext(phba, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ iotag = get_wqe_reqtag(saveq);
+ else
+ iotag = saveq->iocb.ulpIoTag;
+
if (cmdiocbp) {
- if (cmdiocbp->iocb_cmpl) {
+ ulp_command = get_job_cmnd(phba, cmdiocbp);
+ if (cmdiocbp->cmd_cmpl) {
/*
* If an ELS command failed send an event to mgmt
* application.
*/
- if (saveq->iocb.ulpStatus &&
+ if (ulp_status &&
(pring->ringno == LPFC_ELS_RING) &&
- (cmdiocbp->iocb.ulpCommand ==
- CMD_ELS_REQUEST64_CR))
+ (ulp_command == CMD_ELS_REQUEST64_CR))
lpfc_send_els_failure_event(phba,
cmdiocbp, saveq);
*/
if (pring->ringno == LPFC_ELS_RING) {
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- (cmdiocbp->iocb_flag &
+ (cmdiocbp->cmd_flag &
LPFC_DRIVER_ABORTED)) {
spin_lock_irqsave(&phba->hbalock,
iflag);
- cmdiocbp->iocb_flag &=
+ cmdiocbp->cmd_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
*/
spin_lock_irqsave(&phba->hbalock,
iflag);
- saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
}
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (saveq->iocb_flag &
+ if (saveq->cmd_flag &
LPFC_EXCHANGE_BUSY) {
/* Set cmdiocb flag for the
* exchange busy so sgl (xri)
*/
spin_lock_irqsave(
&phba->hbalock, iflag);
- cmdiocbp->iocb_flag |=
+ cmdiocbp->cmd_flag |=
LPFC_EXCHANGE_BUSY;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
- if (cmdiocbp->iocb_flag &
+ if (cmdiocbp->cmd_flag &
LPFC_DRIVER_ABORTED) {
/*
* Clear LPFC_DRIVER_ABORTED
*/
spin_lock_irqsave(
&phba->hbalock, iflag);
- cmdiocbp->iocb_flag &=
+ cmdiocbp->cmd_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
- cmdiocbp->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- cmdiocbp->iocb.un.ulpWord[4] =
- IOERR_ABORT_REQUESTED;
+ set_job_ulpstatus(cmdiocbp,
+ IOSTAT_LOCAL_REJECT);
+ set_job_ulpword4(cmdiocbp,
+ IOERR_ABORT_REQUESTED);
/*
* For SLI4, irsiocb contains
* NO_XRI in sli_xritag, it
* shall not affect releasing
* sgl (xri) process.
*/
- saveq->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- saveq->iocb.un.ulpWord[4] =
- IOERR_SLI_ABORTED;
+ set_job_ulpstatus(saveq,
+ IOSTAT_LOCAL_REJECT);
+ set_job_ulpword4(saveq,
+ IOERR_SLI_ABORTED);
spin_lock_irqsave(
&phba->hbalock, iflag);
- saveq->iocb_flag |=
+ saveq->cmd_flag |=
LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
}
}
- (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq);
} else
lpfc_sli_release_iocbq(phba, cmdiocbp);
} else {
"0322 Ring %d handler: "
"unexpected completion IoTag x%x "
"Data: x%x x%x x%x x%x\n",
- pring->ringno,
- saveq->iocb.ulpIoTag,
- saveq->iocb.ulpStatus,
- saveq->iocb.un.ulpWord[4],
- saveq->iocb.ulpCommand,
- saveq->iocb.ulpContext);
+ pring->ringno, iotag, ulp_status,
+ ulp_word4, ulp_command, ulp_context);
}
}
spin_lock_irqsave(&phba->hbalock, iflag);
if (unlikely(!cmdiocbq))
break;
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- if (cmdiocbq->iocb_cmpl) {
+ if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+ (cmdiocbq->cmd_cmpl)(phba, cmdiocbq,
&rspiocbq);
spin_lock_irqsave(&phba->hbalock, iflag);
}
struct lpfc_iocbq *rspiocbp)
{
struct lpfc_iocbq *saveq;
- struct lpfc_iocbq *cmdiocbp;
+ struct lpfc_iocbq *cmdiocb;
struct lpfc_iocbq *next_iocb;
- IOCB_t *irsp = NULL;
+ IOCB_t *irsp;
uint32_t free_saveq;
- uint8_t iocb_cmd_type;
+ u8 cmd_type;
lpfc_iocb_type type;
unsigned long iflag;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
+ u32 ulp_word4 = get_job_word4(phba, rspiocbp);
+ u32 ulp_command = get_job_cmnd(phba, rspiocbp);
int rc;
spin_lock_irqsave(&phba->hbalock, iflag);
/* First add the response iocb to the countinueq list */
- list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+ list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
pring->iocb_continueq_cnt++;
- /* Now, determine whether the list is completed for processing */
- irsp = &rspiocbp->iocb;
- if (irsp->ulpLe) {
- /*
- * By default, the driver expects to free all resources
- * associated with this iocb completion.
- */
- free_saveq = 1;
- saveq = list_get_first(&pring->iocb_continueq,
- struct lpfc_iocbq, list);
- irsp = &(saveq->iocb);
- list_del_init(&pring->iocb_continueq);
- pring->iocb_continueq_cnt = 0;
+ /*
+ * By default, the driver expects to free all resources
+ * associated with this iocb completion.
+ */
+ free_saveq = 1;
+ saveq = list_get_first(&pring->iocb_continueq,
+ struct lpfc_iocbq, list);
+ list_del_init(&pring->iocb_continueq);
+ pring->iocb_continueq_cnt = 0;
- pring->stats.iocb_rsp++;
+ pring->stats.iocb_rsp++;
- /*
- * If resource errors reported from HBA, reduce
- * queuedepths of the SCSI device.
- */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_NO_RESOURCES)) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- phba->lpfc_rampdown_queue_depth(phba);
- spin_lock_irqsave(&phba->hbalock, iflag);
- }
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ phba->lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
- if (irsp->ulpStatus) {
- /* Rsp ring <ringno> error: IOCB */
+ if (ulp_status) {
+ /* Rsp ring <ringno> error: IOCB */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ irsp = &rspiocbp->iocb;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0328 Rsp Ring %d error: ulp_status x%x "
+ "IOCB Data: "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x\n",
+ pring->ringno, ulp_status,
+ get_job_ulpword(rspiocbp, 0),
+ get_job_ulpword(rspiocbp, 1),
+ get_job_ulpword(rspiocbp, 2),
+ get_job_ulpword(rspiocbp, 3),
+ get_job_ulpword(rspiocbp, 4),
+ get_job_ulpword(rspiocbp, 5),
+ *(((uint32_t *)irsp) + 6),
+ *(((uint32_t *)irsp) + 7),
+ *(((uint32_t *)irsp) + 8),
+ *(((uint32_t *)irsp) + 9),
+ *(((uint32_t *)irsp) + 10),
+ *(((uint32_t *)irsp) + 11),
+ *(((uint32_t *)irsp) + 12),
+ *(((uint32_t *)irsp) + 13),
+ *(((uint32_t *)irsp) + 14),
+ *(((uint32_t *)irsp) + 15));
+ } else {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0328 Rsp Ring %d error: "
+ "0321 Rsp Ring %d error: "
"IOCB Data: "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
"x%x x%x x%x x%x\n",
pring->ringno,
- irsp->un.ulpWord[0],
- irsp->un.ulpWord[1],
- irsp->un.ulpWord[2],
- irsp->un.ulpWord[3],
- irsp->un.ulpWord[4],
- irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7),
- *(((uint32_t *) irsp) + 8),
- *(((uint32_t *) irsp) + 9),
- *(((uint32_t *) irsp) + 10),
- *(((uint32_t *) irsp) + 11),
- *(((uint32_t *) irsp) + 12),
- *(((uint32_t *) irsp) + 13),
- *(((uint32_t *) irsp) + 14),
- *(((uint32_t *) irsp) + 15));
+ rspiocbp->wcqe_cmpl.word0,
+ rspiocbp->wcqe_cmpl.total_data_placed,
+ rspiocbp->wcqe_cmpl.parameter,
+ rspiocbp->wcqe_cmpl.word3);
}
+ }
- /*
- * Fetch the IOCB command type and call the correct completion
- * routine. Solicited and Unsolicited IOCBs on the ELS ring
- * get freed back to the lpfc_iocb_list by the discovery
- * kernel thread.
- */
- iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
- type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
- switch (type) {
- case LPFC_SOL_IOCB:
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- break;
-
- case LPFC_UNSOL_IOCB:
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (!rc)
- free_saveq = 0;
- break;
- case LPFC_ABORT_IOCB:
- cmdiocbp = NULL;
- if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
+ /*
+ * Fetch the iocb command type and call the correct completion
+ * routine. Solicited and Unsolicited IOCBs on the ELS ring
+ * get freed back to the lpfc_iocb_list by the discovery
+ * kernel thread.
+ */
+ cmd_type = ulp_command & CMD_IOCB_MASK;
+ type = lpfc_sli_iocb_cmd_type(cmd_type);
+ switch (type) {
+ case LPFC_SOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ break;
+ case LPFC_UNSOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (!rc)
+ free_saveq = 0;
+ break;
+ case LPFC_ABORT_IOCB:
+ cmdiocb = NULL;
+ if (ulp_command != CMD_XRI_ABORTED_CX)
+ cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
+ saveq);
+ if (cmdiocb) {
+ /* Call the specified completion routine */
+ if (cmdiocb->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
- saveq);
+ cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
spin_lock_irqsave(&phba->hbalock, iflag);
- }
- if (cmdiocbp) {
- /* Call the specified completion routine */
- if (cmdiocbp->iocb_cmpl) {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
- (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
- saveq);
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- } else
- __lpfc_sli_release_iocbq(phba,
- cmdiocbp);
- }
- break;
-
- case LPFC_UNKNOWN_IOCB:
- if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
- char adaptermsg[LPFC_MAX_ADPTMSG];
- memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
- memcpy(&adaptermsg[0], (uint8_t *)irsp,
- MAX_MSG_DATA);
- dev_warn(&((phba->pcidev)->dev),
- "lpfc%d: %s\n",
- phba->brd_no, adaptermsg);
} else {
- /* Unknown IOCB command */
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0335 Unknown IOCB "
- "command Data: x%x "
- "x%x x%x x%x\n",
- irsp->ulpCommand,
- irsp->ulpStatus,
- irsp->ulpIoTag,
- irsp->ulpContext);
+ __lpfc_sli_release_iocbq(phba, cmdiocb);
}
- break;
}
+ break;
+ case LPFC_UNKNOWN_IOCB:
+ if (ulp_command == CMD_ADAPTER_MSG) {
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+
+ memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s\n",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown command */
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0335 Unknown IOCB "
+ "command Data: x%x "
+ "x%x x%x x%x\n",
+ ulp_command,
+ ulp_status,
+ get_wqe_reqtag(rspiocbp),
+ get_job_ulpcontext(phba, rspiocbp));
+ }
+ break;
+ }
- if (free_saveq) {
- list_for_each_entry_safe(rspiocbp, next_iocb,
- &saveq->list, list) {
- list_del_init(&rspiocbp->list);
- __lpfc_sli_release_iocbq(phba, rspiocbp);
- }
- __lpfc_sli_release_iocbq(phba, saveq);
+ if (free_saveq) {
+ list_for_each_entry_safe(rspiocbp, next_iocb,
+ &saveq->list, list) {
+ list_del_init(&rspiocbp->list);
+ __lpfc_sli_release_iocbq(phba, rspiocbp);
}
- rspiocbp = NULL;
+ __lpfc_sli_release_iocbq(phba, saveq);
}
+ rspiocbp = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflag);
return rspiocbp;
}
irspiocbq = container_of(cq_event, struct lpfc_iocbq,
cq_event);
/* Translate ELS WCQE to response IOCBQ */
- irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
- irspiocbq);
+ irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
+ irspiocbq);
if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq);
list_splice_init(&pring->txq, &txq);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txq_cnt = 0;
list_splice_init(&pring->txq, &txq);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txq_cnt = 0;
lockdep_assert_held(&phba->hbalock);
- if (piocb->iocb_cmpl && (!piocb->vport) &&
+ if (piocb->cmd_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
case CMD_QUE_RING_BUF64_CN:
/*
* For IOCBs, like QUE_RING_BUF, that have no rsp ring
- * completion, iocb_cmpl MUST be 0.
+ * completion, cmd_cmpl MUST be 0.
*/
- if (piocb->iocb_cmpl)
- piocb->iocb_cmpl = NULL;
+ if (piocb->cmd_cmpl)
+ piocb->cmd_cmpl = NULL;
fallthrough;
case CMD_CREATE_XRI_CR:
case CMD_CLOSE_XRI_CN:
}
/**
- * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
+ * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
* @phba: Pointer to HBA context object.
- * @piocbq: Pointer to command iocb.
- * @sglq: Pointer to the scatter gather queue object.
+ * @ring_number: SLI ring number to issue wqe on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
*
- * This routine converts the bpl or bde that is in the IOCB
- * to a sgl list for the sli4 hardware. The physical address
- * of the bpl/bde is converted back to a virtual address.
- * If the IOCB contains a BPL then the list of BDE's is
- * converted to sli4_sge's. If the IOCB contains a single
- * BDE then it is converted to a single sli_sge.
- * The IOCB is still in cpu endianess so the contents of
- * the bpl can be used without byte swapping.
+ * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
+ * send an iocb command to an HBA with SLI-4 interface spec.
*
- * Returns valid XRI = Success, NO_XRI = Failure.
-**/
-static uint16_t
-lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
- struct lpfc_sglq *sglq)
+ * This function takes the hbalock before invoking the lockless version.
+ * The function will return success after it successfully submit the wqe to
+ * firmware or after adding to the txq.
+ **/
+static int
+__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
{
- uint16_t xritag = NO_XRI;
- struct ulp_bde64 *bpl = NULL;
- struct ulp_bde64 bde;
- struct sli4_sge *sgl = NULL;
- struct lpfc_dmabuf *dmabuf;
- IOCB_t *icmd;
- int numBdes = 0;
- int i = 0;
- uint32_t offset = 0; /* accumulated offset in the sg request list */
- int inbound = 0; /* number of sg reply entries inbound from firmware */
-
- if (!piocbq || !sglq)
- return xritag;
-
- sgl = (struct sli4_sge *)sglq->sgl;
- icmd = &piocbq->iocb;
- if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
- return sglq->sli4_xritag;
- if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
- numBdes = icmd->un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- /* The addrHigh and addrLow fields within the IOCB
- * have not been byteswapped yet so there is no
- * need to swap them back.
- */
- if (piocbq->context3)
- dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
- else
- return xritag;
-
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- if (!bpl)
- return xritag;
+ unsigned long iflags;
+ int rc;
- for (i = 0; i < numBdes; i++) {
- /* Should already be byte swapped. */
- sgl->addr_hi = bpl->addrHigh;
- sgl->addr_lo = bpl->addrLow;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((i+1) == numBdes)
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- bde.tus.w = le32_to_cpu(bpl->tus.w);
- sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
- /* The offsets in the sgl need to be accumulated
- * separately for the request and reply lists.
- * The request is always first, the reply follows.
- */
- if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
- /* add up the reply sg entries */
- if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
- inbound++;
- /* first inbound? reset the offset */
- if (inbound == 1)
- offset = 0;
- bf_set(lpfc_sli4_sge_offset, sgl, offset);
- bf_set(lpfc_sli4_sge_type, sgl,
- LPFC_SGE_TYPE_DATA);
- offset += bde.tus.f.bdeSize;
- }
- sgl->word2 = cpu_to_le32(sgl->word2);
- bpl++;
- sgl++;
- }
- } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
- /* The addrHigh and addrLow fields of the BDE have not
- * been byteswapped yet so they need to be swapped
- * before putting them in the sgl.
- */
- sgl->addr_hi =
- cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
- sgl->addr_lo =
- cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
- sgl->word2 = le32_to_cpu(sgl->word2);
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len =
- cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
- }
- return sglq->sli4_xritag;
+ return rc;
}
/**
- * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
+ * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
* @phba: Pointer to HBA context object.
- * @iocbq: Pointer to command iocb.
- * @wqe: Pointer to the work queue entry.
+ * @ring_number: SLI ring number to issue wqe on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
*
- * This routine converts the iocb command to its Work Queue Entry
- * equivalent. The wqe pointer should not have any fields set when
- * this routine is called because it will memcpy over them.
- * This routine does not set the CQ_ID or the WQEC bits in the
- * wqe.
+ * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
+ * an wqe command to an HBA with SLI-4 interface spec.
*
- * Returns: 0 = Success, IOCB_ERROR = Failure.
+ * This function is a lockless version. The function will return success
+ * after it successfully submit the wqe to firmware or after adding to the
+ * txq.
**/
static int
-lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
- union lpfc_wqe128 *wqe)
-{
- uint32_t xmit_len = 0, total_len = 0;
- uint8_t ct = 0;
- uint32_t fip;
- uint32_t abort_tag;
- uint8_t command_type = ELS_COMMAND_NON_FIP;
- uint8_t cmnd;
- uint16_t xritag;
- uint16_t abrt_iotag;
- struct lpfc_iocbq *abrtiocbq;
- struct ulp_bde64 *bpl = NULL;
- uint32_t els_id = LPFC_ELS_ID_DEFAULT;
- int numBdes, i;
- struct ulp_bde64 bde;
- struct lpfc_nodelist *ndlp;
- uint32_t *pcmd;
- uint32_t if_type;
-
- fip = phba->hba_flag & HBA_FIP_SUPPORT;
- /* The fcp commands will set command type */
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- command_type = FCP_COMMAND;
- else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
- command_type = ELS_COMMAND_FIP;
- else
- command_type = ELS_COMMAND_NON_FIP;
-
- if (phba->fcp_embed_io)
- memset(wqe, 0, sizeof(union lpfc_wqe128));
- /* Some of the fields are in the right position already */
- memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
- /* The ct field has moved so reset */
- wqe->generic.wqe_com.word7 = 0;
- wqe->generic.wqe_com.word10 = 0;
-
- abort_tag = (uint32_t) iocbq->iotag;
- xritag = iocbq->sli4_xritag;
- /* words0-2 bpl convert bde */
- if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
- numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- bpl = (struct ulp_bde64 *)
- ((struct lpfc_dmabuf *)iocbq->context3)->virt;
- if (!bpl)
- return IOCB_ERROR;
-
- /* Should already be byte swapped. */
- wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
- wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
- xmit_len = wqe->generic.bde.tus.f.bdeSize;
- total_len = 0;
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- total_len += bde.tus.f.bdeSize;
- }
- } else
- xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
-
- iocbq->iocb.ulpIoTag = iocbq->iotag;
- cmnd = iocbq->iocb.ulpCommand;
-
- switch (iocbq->iocb.ulpCommand) {
- case CMD_ELS_REQUEST64_CR:
- if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
- ndlp = iocbq->context_un.ndlp;
- else
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- if (!iocbq->iocb.ulpLe) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2007 Only Limited Edition cmd Format"
- " supported 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
-
- wqe->els_req.payload_len = xmit_len;
- /* Els_reguest64 has a TMO */
- bf_set(wqe_tmo, &wqe->els_req.wqe_com,
- iocbq->iocb.ulpTimeout);
- /* Need a VF for word 4 set the vf bit*/
- bf_set(els_req64_vf, &wqe->els_req, 0);
- /* And a VFID for word 12 */
- bf_set(els_req64_vfid, &wqe->els_req, 0);
- ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- iocbq->iocb.ulpContext);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
- bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
- /* CCP CCPE PV PRI in word10 were set in the memcpy */
- if (command_type == ELS_COMMAND_FIP)
- els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
- >> LPFC_FIP_ELS_ID_SHIFT);
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- iocbq->context2)->virt);
- if_type = bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf);
- if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
- *pcmd == ELS_CMD_SCR ||
- *pcmd == ELS_CMD_RDF ||
- *pcmd == ELS_CMD_EDC ||
- *pcmd == ELS_CMD_RSCN_XMT ||
- *pcmd == ELS_CMD_FDISC ||
- *pcmd == ELS_CMD_LOGO ||
- *pcmd == ELS_CMD_QFPA ||
- *pcmd == ELS_CMD_UVEM ||
- *pcmd == ELS_CMD_PLOGI)) {
- bf_set(els_req64_sp, &wqe->els_req, 1);
- bf_set(els_req64_sid, &wqe->els_req,
- iocbq->vport->fc_myDID);
- if ((*pcmd == ELS_CMD_FLOGI) &&
- !(phba->fc_topology ==
- LPFC_TOPOLOGY_LOOP))
- bf_set(els_req64_sid, &wqe->els_req, 0);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->vpi_ids[iocbq->vport->vpi]);
- } else if (pcmd && iocbq->context1) {
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- }
- }
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
- bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
- wqe->els_req.max_response_payload_len = total_len - xmit_len;
- break;
- case CMD_XMIT_SEQUENCE64_CX:
- bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.un.ulpWord[3]);
- bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.unsli3.rcvsli3.ox_id);
- /* The entire sequence is transmitted for this IOCB */
- xmit_len = total_len;
- cmnd = CMD_XMIT_SEQUENCE64_CR;
- if (phba->link_flag & LS_LOOPBACK_MODE)
- bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
- fallthrough;
- case CMD_XMIT_SEQUENCE64_CR:
- /* word3 iocb=io_tag32 wqe=reserved */
- wqe->xmit_sequence.rsvd3 = 0;
- /* word4 relative_offset memcpy */
- /* word5 r_ctl/df_ctl memcpy */
- bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
- bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
- LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
- LPFC_WQE_LENLOC_WORD12);
- bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
- wqe->xmit_sequence.xmit_len = xmit_len;
- command_type = OTHER_COMMAND;
- break;
- case CMD_XMIT_BCAST64_CN:
- /* word3 iocb=iotag32 wqe=seq_payload_len */
- wqe->xmit_bcast64.seq_payload_len = xmit_len;
- /* word4 iocb=rsvd wqe=rsvd */
- /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
- /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
- bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
- LPFC_WQE_LENLOC_WORD3);
- bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
- break;
- case CMD_FCP_IWRITE64_CR:
- command_type = FCP_COMMAND_DATA_OUT;
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_iwrite,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_iwrite,
- 0);
- /* word4 iocb=parameter wqe=total_xfer_length memcpy */
- /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
- bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
- /* Always open the exchange */
- bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
- LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
- else
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_FCP_IREAD64_CR:
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_iread,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_iread,
- 0);
- /* word4 iocb=parameter wqe=total_xfer_length memcpy */
- /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
- bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
- /* Always open the exchange */
- bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
- LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
- else
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_FCP_ICMND64_CR:
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_icmd,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_icmd,
- 0);
- /* word3 iocb=IO_TAG wqe=reserved */
- bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
- /* Always open the exchange */
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_GEN_REQUEST64_CR:
- /* For this command calculate the xmit length of the
- * request bde.
- */
- xmit_len = 0;
- numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- break;
- xmit_len += bde.tus.f.bdeSize;
- }
- /* word3 iocb=IO_TAG wqe=request_payload_len */
- wqe->gen_req.request_payload_len = xmit_len;
- /* word4 iocb=parameter wqe=relative_offset memcpy */
- /* word5 [rctl, type, df_ctl, la] copied in memcpy */
- /* word6 context tag copied in memcpy */
- if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
- ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2015 Invalid CT %x command 0x%x\n",
- ct, iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
- bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
- bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
- bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
- wqe->gen_req.max_response_payload_len = total_len - xmit_len;
- command_type = OTHER_COMMAND;
- break;
- case CMD_XMIT_ELS_RSP64_CX:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- /* words0-2 BDE memcpy */
- /* word3 iocb=iotag32 wqe=response_payload_len */
- wqe->xmit_els_rsp.response_payload_len = xmit_len;
- /* word4 */
- wqe->xmit_els_rsp.word4 = 0;
- /* word5 iocb=rsvd wge=did */
- bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
- iocbq->iocb.un.xseq64.xmit_els_remoteID);
-
- if_type = bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf);
- if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (iocbq->vport->fc_flag & FC_PT2PT) {
- bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
- bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
- iocbq->vport->fc_myDID);
- if (iocbq->vport->fc_myDID == Fabric_DID) {
- bf_set(wqe_els_did,
- &wqe->xmit_els_rsp.wqe_dest, 0);
- }
- }
- }
- bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
- iocbq->iocb.unsli3.rcvsli3.ox_id);
- if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
- bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- phba->vpi_ids[iocbq->vport->vpi]);
- bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
- LPFC_WQE_LENLOC_WORD3);
- bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
- bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
- iocbq->vport->fc_myDID);
- bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
- }
- command_type = OTHER_COMMAND;
- break;
- case CMD_CLOSE_XRI_CN:
- case CMD_ABORT_XRI_CN:
- case CMD_ABORT_XRI_CX:
- /* words 0-2 memcpy should be 0 rserved */
- /* port will send abts */
- abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
- if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
- abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
- fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
- } else
- fip = 0;
-
- if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
- /*
- * The link is down, or the command was ELS_FIP
- * so the fw does not need to send abts
- * on the wire.
- */
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- else
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
- wqe->abort_cmd.rsrvd5 = 0;
- bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- abort_tag = iocbq->iocb.un.acxri.abortIoTag;
- /*
- * The abort handler will send us CMD_ABORT_XRI_CN or
- * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
- */
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- cmnd = CMD_ABORT_XRI_CX;
- command_type = OTHER_COMMAND;
- xritag = 0;
- break;
- case CMD_XMIT_BLS_RSP64_CX:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- /* As BLS ABTS RSP WQE is very different from other WQEs,
- * we re-construct this WQE here based on information in
- * iocbq from scratch.
- */
- memset(wqe, 0, sizeof(*wqe));
- /* OX_ID is invariable to who sent ABTS to CT exchange */
- bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
- if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
- LPFC_ABTS_UNSOL_INT) {
- /* ABTS sent by initiator to CT exchange, the
- * RX_ID field will be filled with the newly
- * allocated responder XRI.
- */
- bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- iocbq->sli4_xritag);
- } else {
- /* ABTS sent by responder to CT exchange, the
- * RX_ID field will be filled with the responder
- * RX_ID from ABTS.
- */
- bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
- }
- bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
- bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
-
- /* Use CT=VPI */
- bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
- ndlp->nlp_DID);
- bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
- iocbq->iocb.ulpContext);
- bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
- bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- /* Overwrite the pre-set comnd type with OTHER_COMMAND */
- command_type = OTHER_COMMAND;
- if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
- bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
- bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
- bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
- bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
- bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
- bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
- }
-
- break;
- case CMD_SEND_FRAME:
- bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
- bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
- bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
- bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
- bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
- bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
- bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
- bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
- return 0;
- case CMD_XRI_ABORTED_CX:
- case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
- case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
- case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
- case CMD_FCP_TRSP64_CX: /* Target mode rcv */
- case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2014 Invalid command 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
-
- if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
- else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
- else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
- iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
- LPFC_IO_DIF_INSERT);
- bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
- bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
- wqe->generic.wqe_com.abort_tag = abort_tag;
- bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
- bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
- bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- return 0;
-}
-
-/**
- * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
- * @phba: Pointer to HBA context object.
- * @ring_number: SLI ring number to issue wqe on.
- * @piocb: Pointer to command iocb.
- * @flag: Flag indicating if this command can be put into txq.
- *
- * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
- * send an iocb command to an HBA with SLI-4 interface spec.
- *
- * This function takes the hbalock before invoking the lockless version.
- * The function will return success after it successfully submit the wqe to
- * firmware or after adding to the txq.
- **/
-static int
-__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
- struct lpfc_iocbq *piocb, uint32_t flag)
-{
- unsigned long iflags;
- int rc;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ int rc;
+ struct lpfc_io_buf *lpfc_cmd =
+ (struct lpfc_io_buf *)piocb->context1;
+ lpfc_prep_embed_io(phba, lpfc_cmd);
+ rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
return rc;
}
-/**
- * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
- * @phba: Pointer to HBA context object.
- * @ring_number: SLI ring number to issue wqe on.
- * @piocb: Pointer to command iocb.
- * @flag: Flag indicating if this command can be put into txq.
- *
- * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
- * an wqe command to an HBA with SLI-4 interface spec.
- *
- * This function is a lockless version. The function will return success
- * after it successfully submit the wqe to firmware or after adding to the
- * txq.
- **/
-static int
-__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
- struct lpfc_iocbq *piocb, uint32_t flag)
+void
+lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
- int rc;
- struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *)piocb->context1;
- union lpfc_wqe128 *wqe = &piocb->wqe;
+ struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
struct sli4_sge *sgl;
/* 128 byte wqe support here */
}
/* add the VMID tags as per switch response */
- if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
+ if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
if (phba->pport->vmid_priority_tagging) {
bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
wqe->words[31] = piocb->vmid_tag.app_id;
}
}
- rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
- return rc;
}
/**
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sglq *sglq;
- union lpfc_wqe128 wqe;
+ union lpfc_wqe128 *wqe;
struct lpfc_queue *wq;
struct lpfc_sli_ring *pring;
+ u32 ulp_command = get_job_cmnd(phba, piocb);
/* Get the WQ */
- if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if ((piocb->cmd_flag & LPFC_IO_FCP) ||
+ (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
} else {
wq = phba->sli4_hba.els_wq;
*/
lockdep_assert_held(&pring->ring_lock);
-
+ wqe = &piocb->wqe;
if (piocb->sli4_xritag == NO_XRI) {
- if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ if (ulp_command == CMD_ABORT_XRI_CX)
sglq = NULL;
else {
- if (!list_empty(&pring->txq)) {
+ sglq = __lpfc_sli_get_els_sglq(phba, piocb);
+ if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
- pring, piocb);
+ pring,
+ piocb);
return IOCB_SUCCESS;
} else {
return IOCB_BUSY;
}
- } else {
- sglq = __lpfc_sli_get_els_sglq(phba, piocb);
- if (!sglq) {
- if (!(flag & SLI_IOCB_RET_IOCB)) {
- __lpfc_sli_ringtx_put(phba,
- pring,
- piocb);
- return IOCB_SUCCESS;
- } else
- return IOCB_BUSY;
- }
}
}
- } else if (piocb->iocb_flag & LPFC_IO_FCP) {
+ } else if (piocb->cmd_flag & LPFC_IO_FCP) {
/* These IO's already have an XRI and a mapped sgl. */
sglq = NULL;
}
if (sglq) {
piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
- if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
+
+ /* ABTS sent by initiator to CT exchange, the
+ * RX_ID field will be filled with the newly
+ * allocated responder XRI.
+ */
+ if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
+ piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
+ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+ piocb->sli4_xritag);
+
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
+ piocb->sli4_xritag);
+
+ if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
return IOCB_ERROR;
}
- if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+ if (lpfc_sli4_wq_put(wq, wqe))
return IOCB_ERROR;
- if (lpfc_sli4_wq_put(wq, &wqe))
- return IOCB_ERROR;
lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
return 0;
return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
}
+static void
+__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *cmd;
+
+ cmd = &cmdiocbq->iocb;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+
+ if (expect_rsp) {
+ cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ cmd->un.elsreq64.remoteID = did; /* DID */
+ cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+ cmd->ulpTimeout = tmo;
+ } else {
+ cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
+ cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ }
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+
+ /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ if (expect_rsp) {
+ cmd->un.elsreq64.myID = vport->fc_myDID;
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ cmd->ulpContext = phba->vpi_ids[vport->vpi];
+ }
+
+ cmd->ulpCt_h = 0;
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ cmd->ulpCt_l = 0; /* context = invalid RPI */
+ else
+ cmd->ulpCt_l = 1; /* context = VPI */
+ }
+}
+
+static void
+__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ union lpfc_wqe128 *wqe;
+ struct ulp_bde64_le *bde;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Word 0 - 2 BDE */
+ bde = (struct ulp_bde64_le *)&wqe->generic.bde;
+ bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
+ bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->type_size = cpu_to_le32(cmd_size);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ if (expect_rsp) {
+ bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_CR);
+
+ /* Transfer length */
+ wqe->els_req.payload_len = cmd_size;
+ wqe->els_req.max_response_payload_len = FCELSSIZE;
+
+ /* DID */
+ bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
+ } else {
+ /* DID */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
+
+ /* Transfer length */
+ wqe->xmit_els_rsp.response_payload_len = cmd_size;
+
+ bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
+ CMD_XMIT_ELS_RSP64_CX);
+ }
+
+ bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
+ bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+
+ /* If we have NPIV enabled, we want to send ELS traffic by VPI.
+ * For SLI4, since the driver controls VPIs we also want to include
+ * all ELS pt2pt protocol traffic as well.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
+ (vport->fc_flag & FC_PT2PT)) {
+ if (expect_rsp) {
+ bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[vport->vpi]);
+ }
+
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
+ else
+ bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
+ }
+}
+
+void
+lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+ u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
+ u8 expect_rsp)
+{
+ phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
+ elscmd, tmo, expect_rsp);
+}
+
+static void
+__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
+ u16 rpi, u32 num_entry, u8 tmo)
+{
+ IOCB_t *cmd;
+
+ cmd = &cmdiocbq->iocb;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
+
+ cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+
+ cmd->ulpContext = rpi;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpOwner = OWN_CHIP;
+ cmd->ulpTimeout = tmo;
+}
+
+static void
+__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
+ u16 rpi, u32 num_entry, u8 tmo)
+{
+ union lpfc_wqe128 *cmdwqe;
+ struct ulp_bde64_le *bde, *bpl;
+ u32 xmit_len = 0, total_len = 0, size, type, i;
+
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(*cmdwqe));
+
+ /* Calculate total_len and xmit_len */
+ bpl = (struct ulp_bde64_le *)bmp->virt;
+ for (i = 0; i < num_entry; i++) {
+ size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
+ total_len += size;
+ }
+ for (i = 0; i < num_entry; i++) {
+ size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
+ type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
+ if (type != ULP_BDE64_TYPE_BDE_64)
+ break;
+ xmit_len += size;
+ }
+
+ /* Words 0 - 2 */
+ bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
+ bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
+ bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->type_size = cpu_to_le32(xmit_len);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64);
+
+ /* Word 3 */
+ cmdwqe->gen_req.request_payload_len = xmit_len;
+
+ /* Word 5 */
+ bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
+ bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+ bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
+ bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
+ bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
+ bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
+ bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
+
+ /* Word 12 */
+ cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
+}
+
+void
+lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
+{
+ phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
+}
+
+static void
+__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ IOCB_t *icmd;
+
+ icmd = &cmdiocbq->iocb;
+ memset(icmd, 0, sizeof(*icmd));
+
+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ icmd->un.xseq64.w5.hcsw.Fctl = LA;
+ if (last_seq)
+ icmd->un.xseq64.w5.hcsw.Fctl |= LS;
+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ icmd->un.xseq64.w5.hcsw.Rctl = rctl;
+ icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ switch (cr_cx_cmd) {
+ case CMD_XMIT_SEQUENCE64_CR:
+ icmd->ulpContext = rpi;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+ break;
+ case CMD_XMIT_SEQUENCE64_CX:
+ icmd->ulpContext = ox_id;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ union lpfc_wqe128 *wqe;
+ struct ulp_bde64 *bpl;
+ struct ulp_bde64_le *bde;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Words 0 - 2 */
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
+ wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
+ wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
+ wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
+ } else {
+ bde = (struct ulp_bde64_le *)&wqe->xmit_sequence.bde;
+ bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
+ bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->type_size = cpu_to_le32(bpl->tus.f.bdeSize);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+ }
+
+ /* Word 5 */
+ bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
+ bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
+ bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
+ bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
+ bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
+
+ bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
+ CMD_XMIT_SEQUENCE64_WQE);
+
+ /* Word 7 */
+ bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
+
+ /* Word 9 */
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
+
+ /* Word 12 */
+ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
+ wqe->xmit_sequence.xmit_len = full_size;
+ else
+ wqe->xmit_sequence.xmit_len =
+ wqe->xmit_sequence.bde.tus.f.bdeSize;
+}
+
+void
+lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
+ rctl, last_seq, cr_cx_cmd);
+}
+
+static void
+__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia)
+{
+ IOCB_t *icmd = NULL;
+
+ icmd = &cmdiocbq->iocb;
+ memset(icmd, 0, sizeof(*icmd));
+
+ /* Word 5 */
+ icmd->un.acxri.abortContextTag = ulp_context;
+ icmd->un.acxri.abortIoTag = iotag;
+
+ if (ia) {
+ /* Word 7 */
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+ } else {
+ /* Word 3 */
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+
+ /* Word 7 */
+ icmd->ulpClass = ulp_class;
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ }
+
+ /* Word 7 */
+ icmd->ulpLe = 1;
+}
+
+static void
+__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia)
+{
+ union lpfc_wqe128 *wqe;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Word 3 */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+ if (ia)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ else
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
+
+ /* Word 8 */
+ wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
+
+ /* Word 10 */
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+}
+
+void
+lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
+ bool ia)
+{
+ phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
+ cqid, ia);
+}
+
/**
* lpfc_sli_api_table_setup - Set up sli api function jump table
* @phba: The hba struct for which this call is being executed.
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
+ phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
+ phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
+ phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
+ phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
break;
case LPFC_PCI_DEV_OC:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
+ phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
+ phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
+ phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
+ phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
dev_grp);
return -ENODEV;
}
- phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
return 0;
}
{
struct lpfc_io_buf *lpfc_cmd;
- if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (unlikely(!phba->sli4_hba.hdwq))
return NULL;
/*
* for abort iocb hba_wqidx should already
* be setup based on what work queue we used.
*/
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
int rc;
if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli_prep_wqe(phba, piocb);
+
eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
pring = lpfc_sli4_calc_ring(phba, piocb);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3095 Event Context not found, no "
"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
- iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
- vpi, rpi);
+ vpi, rpi, iocbq->iocb.ulpStatus,
+ iocbq->iocb.ulpContext);
}
/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
- uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb = NULL;
-
- if (irsp->ulpStatus) {
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ u8 cmnd = get_job_cmnd(phba, cmdiocb);
+ if (ulp_status) {
/*
* Assume that the port already completed and returned, or
* will return the iocb. Just Log the message.
*/
- abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
- abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
-
- spin_lock_irq(&phba->hbalock);
if (phba->sli_rev < LPFC_SLI_REV4) {
- if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
- irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
- spin_unlock_irq(&phba->hbalock);
+ if (cmnd == CMD_ABORT_XRI_CX &&
+ ulp_status == IOSTAT_LOCAL_REJECT &&
+ ulp_word4 == IOERR_ABORT_REQUESTED) {
goto release_iocb;
}
- if (abort_iotag != 0 &&
- abort_iotag <= phba->sli.last_iotag)
- abort_iocb =
- phba->sli.iocbq_lookup[abort_iotag];
- } else
- /* For sli4 the abort_tag is the XRI,
- * so the abort routine puts the iotag of the iocb
- * being aborted in the context field of the abort
- * IOCB.
- */
- abort_iocb = phba->sli.iocbq_lookup[abort_context];
+ }
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb x%px "
- "with tag %x context %x, abort status %x, "
- "abort code %x\n",
- abort_iocb, abort_iotag, abort_context,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ "with io cmd xri %x abort tag : x%x, "
+ "abort status %x abort code %x\n",
+ cmdiocb, get_job_abtsiotag(phba, cmdiocb),
+ (phba->sli_rev == LPFC_SLI_REV4) ?
+ get_wqe_reqtag(cmdiocb) :
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ ulp_status, ulp_word4);
- spin_unlock_irq(&phba->hbalock);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
struct lpfc_iocbq *rspiocb)
{
struct lpfc_nodelist *ndlp = NULL;
- IOCB_t *irsp = &rspiocb->iocb;
+ IOCB_t *irsp;
+ u32 ulp_command, ulp_status, ulp_word4, iotag;
+
+ ulp_command = get_job_cmnd(phba, cmdiocb);
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ }
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0139 Ignoring ELS cmd code x%x completion Data: "
"x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ ulp_command, ulp_status, ulp_word4, iotag);
+
/*
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
* if exchange is busy.
*/
- if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+ if (ulp_command == CMD_GEN_REQUEST64_CR) {
ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
} else {
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
- IOCB_t *icmd = NULL;
- IOCB_t *iabt = NULL;
int retval = IOCB_ERROR;
unsigned long iflags;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
+ u32 ulp_command = get_job_cmnd(phba, cmdiocb);
+ u16 ulp_context, iotag;
+ bool ia;
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
* being aborted.
*/
- icmd = &cmdiocb->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ if (ulp_command == CMD_ABORT_XRI_WQE ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
return IOCB_ABORTING;
if (!pring) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
return retval;
}
*/
if ((vport->load_flag & FC_UNLOADING) &&
pring->ringno == LPFC_ELS_RING) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
return retval;
}
/* This signals the response to set the correct status
* before calling the completion handler
*/
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
- iabt = &abtsiocbp->iocb;
- iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
- iabt->un.acxri.abortContextTag = icmd->ulpContext;
if (phba->sli_rev == LPFC_SLI_REV4) {
- iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
- if (pring->ringno == LPFC_ELS_RING)
- iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+ ulp_context = cmdiocb->sli4_xritag;
+ iotag = abtsiocbp->iotag;
} else {
- iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ iotag = cmdiocb->iocb.ulpIoTag;
if (pring->ringno == LPFC_ELS_RING) {
ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
- iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
+ ulp_context = ndlp->nlp_rpi;
+ } else {
+ ulp_context = cmdiocb->iocb.ulpContext;
}
}
- iabt->ulpLe = 1;
- iabt->ulpClass = icmd->ulpClass;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
- abtsiocbp->iocb_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
- if (cmdiocb->iocb_flag & LPFC_IO_FOF)
- abtsiocbp->iocb_flag |= LPFC_IO_FOF;
if (phba->link_state < LPFC_LINK_UP ||
(phba->sli_rev == LPFC_SLI_REV4 &&
phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
- iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+ ia = true;
else
- iabt->ulpCommand = CMD_ABORT_XRI_CN;
+ ia = false;
+
+ lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
+ cmdiocb->iocb.ulpClass,
+ LPFC_WQE_CQ_ID_DEFAULT, ia);
+
+ abtsiocbp->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
+ if (cmdiocb->cmd_flag & LPFC_IO_FCP)
+ abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
+
+ if (cmdiocb->cmd_flag & LPFC_IO_FOF)
+ abtsiocbp->cmd_flag |= LPFC_IO_FOF;
if (cmpl)
- abtsiocbp->iocb_cmpl = cmpl;
+ abtsiocbp->cmd_cmpl = cmpl;
else
- abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+ abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
abtsiocbp->vport = vport;
if (phba->sli_rev == LPFC_SLI_REV4) {
abort_iotag_exit:
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0339 Abort xri x%x, original iotag x%x, "
- "abort cmd iotag x%x retval x%x\n",
- iabt->un.acxri.abortIoTag,
- iabt->un.acxri.abortContextTag,
- abtsiocbp->iotag, retval);
-
+ "0339 Abort IO XRI x%x, Original iotag x%x, "
+ "abort tag x%x Cmdjob : x%px Abortjob : x%px "
+ "retval x%x\n",
+ ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
+ cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
+ retval);
if (retval) {
- cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
__lpfc_sli_release_iocbq(phba, abtsiocbp);
}
lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
struct lpfc_vport *vport)
{
- IOCB_t *icmd = NULL;
+ u8 ulp_command;
/* No null ptr vports */
if (!iocbq || iocbq->vport != vport)
/* iocb must be for FCP IO, already exists on the TX cmpl queue,
* can't be premarked as driver aborted, nor be an ABORT iocb itself
*/
- icmd = &iocbq->iocb;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
- (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
- (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN))
+ ulp_command = get_job_cmnd(vport->phba, iocbq);
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
+ (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
+ (ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_WQE))
return -EINVAL;
return 0;
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
- IOCB_t *icmd = NULL;
int sum, i;
unsigned long iflags;
+ u8 ulp_command;
spin_lock_irqsave(&phba->hbalock, iflags);
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
if (!iocbq || iocbq->vport != vport)
continue;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
continue;
/* Include counting outstanding aborts */
- icmd = &iocbq->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN) {
+ ulp_command = get_job_cmnd(phba, iocbq);
+ if (ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_WQE) {
sum++;
continue;
}
- if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
- ctx_cmd) == 0)
- sum++;
- }
- spin_unlock_irqrestore(&phba->hbalock, iflags);
-
- return sum;
-}
-
-/**
- * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
- * @phba: Pointer to HBA context object
- * @cmdiocb: Pointer to command iocb object.
- * @wcqe: pointer to the complete wcqe
- *
- * This function is called when an aborted FCP iocb completes. This
- * function is called by the ring event handler with no lock held.
- * This function frees the iocb. It is called for sli-4 adapters.
- **/
-void
-lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *wcqe)
-{
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3017 ABORT_XRI_CN completing on rpi x%x "
- "original iotag x%x, abort cmd iotag x%x "
- "status 0x%x, reason 0x%x\n",
- cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag,
- (bf_get(lpfc_wcqe_c_status, wcqe)
- & LPFC_IOCB_STATUS_MASK),
- wcqe->parameter);
- lpfc_sli_release_iocbq(phba, cmdiocb);
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ ctx_cmd) == 0)
+ sum++;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return sum;
}
/**
struct lpfc_iocbq *rspiocb)
{
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3096 ABORT_XRI_CN completing on rpi x%x "
+ "3096 ABORT_XRI_CX completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"status 0x%x, reason 0x%x\n",
+ (phba->sli_rev == LPFC_SLI_REV4) ?
+ cmdiocb->sli4_xritag :
cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag, rspiocb->iocb.ulpStatus,
- rspiocb->iocb.un.ulpWord[4]);
+ get_job_abtsiotag(phba, cmdiocb),
+ cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
+ get_job_word4(phba, rspiocb));
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
int errcnt = 0, ret_val = 0;
unsigned long iflags;
int i;
- void *fcp_cmpl = NULL;
/* all I/Os are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH)
spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV3) {
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
- fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
} else if (phba->sli_rev == LPFC_SLI_REV4) {
pring = lpfc_sli4_calc_ring(phba, iocbq);
- fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
}
ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
- fcp_cmpl);
+ lpfc_sli_abort_fcp_cmpl);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ret_val != IOCB_SUCCESS)
errcnt++;
struct lpfc_hba *phba = vport->phba;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_iocbq *abtsiocbq;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
struct lpfc_iocbq *iocbq;
- IOCB_t *icmd;
int sum, i, ret_val;
unsigned long iflags;
struct lpfc_sli_ring *pring_s4 = NULL;
+ u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
+ bool ia;
spin_lock_irqsave(&phba->hbalock, iflags);
* If the iocbq is already being aborted, don't take a second
* action, but do count it.
*/
- if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
spin_unlock(&lpfc_cmd->buf_lock);
continue;
}
- icmd = &iocbq->iocb;
- abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
- abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- abtsiocbq->iocb.un.acxri.abortIoTag =
- iocbq->sli4_xritag;
- else
- abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
- abtsiocbq->iocb.ulpLe = 1;
- abtsiocbq->iocb.ulpClass = icmd->ulpClass;
- abtsiocbq->vport = vport;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocbq->iocb_flag & LPFC_IO_FOF)
- abtsiocbq->iocb_flag |= LPFC_IO_FOF;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = abtsiocbq->iotag;
+ ulp_context = iocbq->sli4_xritag;
+ cqid = lpfc_cmd->hdwq->io_cq_map;
+ } else {
+ iotag = iocbq->iocb.ulpIoTag;
+ if (pring->ringno == LPFC_ELS_RING) {
+ ndlp = (struct lpfc_nodelist *)(iocbq->context1);
+ ulp_context = ndlp->nlp_rpi;
+ } else {
+ ulp_context = iocbq->iocb.ulpContext;
+ }
+ }
ndlp = lpfc_cmd->rdata->pnode;
if (lpfc_is_link_up(phba) &&
(ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
- abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ ia = false;
else
- abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+ ia = true;
+
+ lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
+ iocbq->iocb.ulpClass, cqid,
+ ia);
+
+ abtsiocbq->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
+ if (iocbq->cmd_flag & LPFC_IO_FCP)
+ abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
+ if (iocbq->cmd_flag & LPFC_IO_FOF)
+ abtsiocbq->cmd_flag |= LPFC_IO_FOF;
/* Setup callback routine and issue the command. */
- abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+ abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
/*
* Indicate the IO is being aborted by the driver and set
* the caller's flag into the aborted IO.
*/
- iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+ iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
if (phba->sli_rev == LPFC_SLI_REV4) {
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
wait_queue_head_t *pdone_q;
unsigned long iflags;
struct lpfc_io_buf *lpfc_cmd;
+ size_t offset = offsetof(struct lpfc_iocbq, wqe);
spin_lock_irqsave(&phba->hbalock, iflags);
- if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
/*
* A time out has occurred for the iocb. If a time out
*/
spin_unlock_irqrestore(&phba->hbalock, iflags);
- cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
- cmdiocbq->wait_iocb_cmpl = NULL;
- if (cmdiocbq->iocb_cmpl)
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
+ cmdiocbq->wait_cmd_cmpl = NULL;
+ if (cmdiocbq->cmd_cmpl)
+ (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL);
else
lpfc_sli_release_iocbq(phba, cmdiocbq);
return;
}
- cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+ /* Copy the contents of the local rspiocb into the caller's buffer. */
+ cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
- memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
- &rspiocbq->iocb, sizeof(IOCB_t));
+ memcpy((char *)cmdiocbq->context2 + offset,
+ (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
/* Set the exchange busy flag for task management commands */
- if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
- !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+ !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq);
- if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
* @piocbq: Pointer to command iocb.
* @flag: Flag to test.
*
- * This routine grabs the hbalock and then test the iocb_flag to
+ * This routine grabs the hbalock and then test the cmd_flag to
* see if the passed in flag is set.
* Returns:
* 1 if flag is set.
int ret;
spin_lock_irqsave(&phba->hbalock, iflags);
- ret = piocbq->iocb_flag & flag;
+ ret = piocbq->cmd_flag & flag;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
- * iocb to complete. The iocb_cmpl field of the shall be used
+ * iocb to complete. The cmd_cmpl field of the shall be used
* to handle iocbs which time out. If the field is NULL, the
* function shall free the iocbq structure. If more clean up is
* needed, the caller is expected to provide a completion function
* that will provide the needed clean up. If the iocb command is
* not completed within timeout seconds, the function will either
- * free the iocbq structure (if iocb_cmpl == NULL) or execute the
- * completion function set in the iocb_cmpl field and then return
+ * free the iocbq structure (if cmd_cmpl == NULL) or execute the
+ * completion function set in the cmd_cmpl field and then return
* a status of IOCB_TIMEDOUT. The caller should not free the iocb
* resources if this function returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
* This function assumes that the iocb completions occur while
* this function sleep. So, this function cannot be called from
* the thread which process iocb completion for this ring.
- * This function clears the iocb_flag of the iocb object before
+ * This function clears the cmd_flag of the iocb object before
* issuing the iocb and the iocb completion handler sets this
* flag and wakes this thread when the iocb completes.
* The contents of the response iocb will be copied to prspiocbq
unsigned long iflags;
bool iocb_completed = true;
- if (phba->sli_rev >= LPFC_SLI_REV4)
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ lpfc_sli_prep_wqe(phba, piocb);
+
pring = lpfc_sli4_calc_ring(phba, piocb);
- else
+ } else
pring = &phba->sli.sli3_ring[ring_number];
/*
* If the caller has provided a response iocbq buffer, then context2
piocb->context2 = prspiocbq;
}
- piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
- piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
+ piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
+ piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
- piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
+ piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val))
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
spin_lock_irqsave(&phba->hbalock, iflags);
- if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
+ if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
/*
* IOCB timed out. Inform the wake iocb wait
*/
iocb_completed = false;
- piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb_completed) {
piocb->context2 = NULL;
piocb->context_un.wait_queue = NULL;
- piocb->iocb_cmpl = NULL;
+ piocb->cmd_cmpl = NULL;
return retval;
}
}
/**
- * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
- * @phba: pointer to lpfc hba data structure
- * @pIocbIn: pointer to the rspiocbq
- * @pIocbOut: pointer to the cmdiocbq
- * @wcqe: pointer to the complete wcqe
- *
- * This routine transfers the fields of a command iocbq to a response iocbq
- * by copying all the IOCB fields from command iocbq and transferring the
- * completion status information from the complete wcqe.
- **/
-static void
-lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
- struct lpfc_iocbq *pIocbIn,
- struct lpfc_iocbq *pIocbOut,
- struct lpfc_wcqe_complete *wcqe)
-{
- int numBdes, i;
- unsigned long iflags;
- uint32_t status, max_response;
- struct lpfc_dmabuf *dmabuf;
- struct ulp_bde64 *bpl, bde;
- size_t offset = offsetof(struct lpfc_iocbq, iocb);
-
- memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
- sizeof(struct lpfc_iocbq) - offset);
- /* Map WCQE parameters into irspiocb parameters */
- status = bf_get(lpfc_wcqe_c_status, wcqe);
- pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
- if (pIocbOut->iocb_flag & LPFC_IO_FCP)
- if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
- pIocbIn->iocb.un.fcpi.fcpi_parm =
- pIocbOut->iocb.un.fcpi.fcpi_parm -
- wcqe->total_data_placed;
- else
- pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- else {
- pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- switch (pIocbOut->iocb.ulpCommand) {
- case CMD_ELS_REQUEST64_CR:
- dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- bde.tus.w = le32_to_cpu(bpl[1].tus.w);
- max_response = bde.tus.f.bdeSize;
- break;
- case CMD_GEN_REQUEST64_CR:
- max_response = 0;
- if (!pIocbOut->context3)
- break;
- numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
- sizeof(struct ulp_bde64);
- dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- max_response += bde.tus.f.bdeSize;
- }
- break;
- default:
- max_response = wcqe->total_data_placed;
- break;
- }
- if (max_response < wcqe->total_data_placed)
- pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
- else
- pIocbIn->iocb.un.genreq64.bdl.bdeSize =
- wcqe->total_data_placed;
- }
-
- /* Convert BG errors for completion status */
- if (status == CQE_STATUS_DI_ERROR) {
- pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
-
- if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
- pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
- else
- pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
-
- pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
- if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_GUARD_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_APPTAG_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_REFTAG_ERR_MASK;
-
- /* Check to see if there was any good data before the error */
- if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_HI_WATER_MARK_PRESENT_MASK;
- pIocbIn->iocb.unsli3.sli3_bg.bghm =
- wcqe->total_data_placed;
- }
-
- /*
- * Set ALL the error bits to indicate we don't know what
- * type of error it is.
- */
- if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
- BGS_GUARD_ERR_MASK);
- }
-
- /* Pick up HBA exchange busy condition */
- if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-}
-
-/**
- * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
* @phba: Pointer to HBA context object.
* @irspiocbq: Pointer to work-queue completion queue entry.
*
* Return: Pointer to the receive IOCBQ, NULL otherwise.
**/
static struct lpfc_iocbq *
-lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
- struct lpfc_iocbq *irspiocbq)
+lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *irspiocbq)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
return NULL;
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+ spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
if (unlikely(!cmdiocbq)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
return NULL;
}
- spin_lock_irqsave(&pring->ring_lock, iflags);
+ memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
+ memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
+
/* Put the iocb back on the txcmplq */
lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- /* Fake the irspiocbq and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
return irspiocbq;
}
{
struct lpfc_sli_ring *pring = cq->pring;
struct lpfc_iocbq *cmdiocbq;
- struct lpfc_iocbq irspiocbq;
unsigned long iflags;
/* Check for response status */
/* Look up the FCP command IOCB and create pseudo response IOCB */
spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0374 FCP complete with no corresponding "
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
cmdiocbq->isr_timestamp = cq->isr_timestamp;
#endif
- if (cmdiocbq->iocb_cmpl == NULL) {
- if (cmdiocbq->wqe_cmpl) {
- /* For FCP the flag is cleared in wqe_cmpl */
- if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
- cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
- /* Pass the cmd_iocb and the wcqe to the upper layer */
- (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
- return;
+ if (cmdiocbq->cmd_cmpl) {
+ /* For FCP the flag is cleared in cmd_cmpl */
+ if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+ cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
+
+ /* Pass the cmd_iocb and the wcqe to the upper layer */
+ memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
+ sizeof(struct lpfc_wcqe_complete));
+ (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq);
+ } else {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0375 FCP cmdiocb not callback function "
"iotag: (%d)\n",
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- return;
}
-
- /* Only SLI4 non-IO commands stil use IOCB */
- /* Fake the irspiocb and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
-
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
- /* Pass the cmd_iocb and the rsp state to the upper layer */
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
}
/**
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3154 BLS ABORT RSP failed, data: x%x/x%x\n",
- rsp_iocbq->iocb.ulpStatus,
- rsp_iocbq->iocb.un.ulpWord[4]);
+ get_job_ulpstatus(phba, rsp_iocbq),
+ get_job_word4(phba, rsp_iocbq));
}
/**
struct lpfc_nodelist *ndlp;
uint16_t oxid, rxid, xri, lxri;
uint32_t sid, fctl;
- IOCB_t *icmd;
+ union lpfc_wqe128 *icmd;
int rc;
if (!lpfc_is_link_up(phba))
if (!ctiocb)
return;
+ icmd = &ctiocb->wqe;
+
/* Extract the F_CTL field from FC_HDR */
fctl = sli4_fctl_from_fc_hdr(fc_hdr);
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.bdeSize = 0;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
-
- /* Fill in the rest of iocb fields */
- icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
- icmd->ulpBdeCount = 0;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = lpfc_nlp_get(ndlp);
if (!ctiocb->context1) {
lpfc_sli_release_iocbq(phba, ctiocb);
}
ctiocb->vport = phba->pport;
- ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
+ ctiocb->abort_rctl = FC_RCTL_BA_ACC;
if (fctl & FC_FC_EX_CTX)
/* Exchange responder sent the abort so we
*/
if ((fctl & FC_FC_EX_CTX) &&
(lxri > lpfc_sli4_get_iocb_cnt(phba))) {
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
- bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
- bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
- bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ ctiocb->abort_rctl = FC_RCTL_BA_RJT;
+ bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
+ bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_INV_XID);
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_UNABLE);
}
/* If BA_ABTS failed to abort a partially assembled receive sequence,
* the IOCB for a BA_RJT.
*/
if (aborted == false) {
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
- bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
- bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
- bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ ctiocb->abort_rctl = FC_RCTL_BA_RJT;
+ bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
+ bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_INV_XID);
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_UNABLE);
}
if (fctl & FC_FC_EX_CTX) {
* of BA_ACC will use OX_ID from ABTS for the XRI_TAG
* field and RX_ID from ABTS for RX_ID field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+ ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
+ bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
} else {
/* ABTS sent by initiator to CT exchange, construction
* of BA_ACC will need to allocate a new XRI as for the
* XRI_TAG field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+ ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
}
- bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
- bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
+
+ /* OX_ID is invariable to who sent ABTS to CT exchange */
+ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
+ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
+
+ /* Use CT=VPI */
+ bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
+ ndlp->nlp_DID);
+ bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
+
/* Xmit CT abts response on exchange <xid> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+ ctiocb->abort_rctl, oxid, phba->link_state);
+ lpfc_sli_prep_wqe(phba, ctiocb);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2925 Failed to issue CT ABTS RSP x%x on "
"xri x%x, Data x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ ctiocb->abort_rctl, oxid,
phba->link_state);
lpfc_nlp_put(ndlp);
ctiocb->context1 = NULL;
struct fc_frame_header *fc_hdr;
uint32_t sid;
uint32_t len, tot_len;
- struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
/* Initialize the first IOCB. */
- first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
- first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+ first_iocbq->wcqe_cmpl.total_data_placed = 0;
+ bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
+ IOSTAT_SUCCESS);
first_iocbq->vport = vport;
/* Check FC Header to see what TYPE of frame we are rcv'ing */
if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
- first_iocbq->iocb.un.rcvels.parmRo =
- sli4_did_from_fc_hdr(fc_hdr);
- first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
- } else
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
- first_iocbq->iocb.ulpContext = NO_XRI;
- first_iocbq->iocb.unsli3.rcvsli3.ox_id =
- be16_to_cpu(fc_hdr->fh_ox_id);
- /* iocbq is prepped for internal consumption. Physical vpi. */
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->phba->vpi_ids[vport->vpi];
- /* put the first buffer into the first IOCBq */
+ bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
+ sli4_did_from_fc_hdr(fc_hdr));
+ }
+
+ bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
+ NO_XRI);
+ bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
+ be16_to_cpu(fc_hdr->fh_ox_id));
+
+ /* put the first buffer into the first iocb */
tot_len = bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
- first_iocbq->iocb.ulpBdeCount = 1;
+ /* Keep track of the BDE count */
+ first_iocbq->wcqe_cmpl.word3 = 1;
+
if (tot_len > LPFC_DATA_BUF_SIZE)
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
else
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
-
- first_iocbq->iocb.un.rcvels.remoteID = sid;
+ first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+ first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
+ bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
+ sid);
}
iocbq = first_iocbq;
/*
}
if (!iocbq->context3) {
iocbq->context3 = d_buf;
- iocbq->iocb.ulpBdeCount++;
+ iocbq->wcqe_cmpl.word3++;
/* We need to get the size out of the right CQE */
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
- pbde = (struct ulp_bde64 *)
- &iocbq->iocb.unsli3.sli3Words[4];
- if (len > LPFC_DATA_BUF_SIZE)
- pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
- else
- pbde->tus.f.bdeSize = len;
-
- iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+ iocbq->unsol_rcv_len = len;
+ iocbq->wcqe_cmpl.total_data_placed += len;
tot_len += len;
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
if (first_iocbq) {
- first_iocbq->iocb.ulpStatus =
- IOSTAT_FCP_RSP_ERROR;
- first_iocbq->iocb.un.ulpWord[4] =
- IOERR_NO_RESOURCES;
+ bf_set(lpfc_wcqe_c_status,
+ &first_iocbq->wcqe_cmpl,
+ IOSTAT_SUCCESS);
+ first_iocbq->wcqe_cmpl.parameter =
+ IOERR_NO_RESOURCES;
}
lpfc_in_buf_free(vport->phba, d_buf);
continue;
&hbq_buf->cq_event.cqe.rcqe_cmpl);
iocbq->context2 = d_buf;
iocbq->context3 = NULL;
- iocbq->iocb.ulpBdeCount = 1;
+ iocbq->wcqe_cmpl.word3 = 1;
+
if (len > LPFC_DATA_BUF_SIZE)
- iocbq->iocb.un.cont64[0].tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
else
- iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
+ iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
+ len;
tot_len += len;
- iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
-
- iocbq->iocb.un.rcvels.remoteID = sid;
+ iocbq->wcqe_cmpl.total_data_placed = tot_len;
+ bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
+ sid);
list_add_tail(&iocbq->list, &first_iocbq->list);
}
}
struct fc_frame_header *fc_hdr;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq = NULL;
- union lpfc_wqe *wqe;
+ union lpfc_wqe128 *pwqe;
struct lpfc_dmabuf *pcmd = NULL;
uint32_t frame_len;
int rc;
/* copyin the payload */
memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
- /* fill in BDE's for command */
- iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
- iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
- iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
- iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
-
iocbq->context2 = pcmd;
iocbq->vport = vport;
- iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
- iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+ iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
+ iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
+ iocbq->num_bdes = 0;
+
+ pwqe = &iocbq->wqe;
+ /* fill in BDE's for command */
+ pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
+ pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
+ pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
+ pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+
+ pwqe->send_frame.frame_len = frame_len;
+ pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
+ pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
+ pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
+ pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
+ pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
+ pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
+
+ pwqe->generic.wqe_com.word7 = 0;
+ pwqe->generic.wqe_com.word10 = 0;
+
+ bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
+ bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
+ bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
+ bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
+ bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
+ bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
+ bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
+ pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
+
+ iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
- /*
- * Setup rest of the iocb as though it were a WQE
- * Build the SEND_FRAME WQE
- */
- wqe = (union lpfc_wqe *)&iocbq->iocb;
-
- wqe->send_frame.frame_len = frame_len;
- wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
- wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
- wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
- wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
- wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
- wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
-
- iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
- iocbq->iocb.ulpLe = 1;
- iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
if (rc == IOCB_ERROR)
goto exit;
struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
- struct lpfc_sglq *sglq;
- union lpfc_wqe128 wqe;
uint32_t txq_cnt = 0;
struct lpfc_queue *wq;
+ int ret = 0;
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
txq_cnt);
break;
}
- sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
- if (!sglq) {
- __lpfc_sli_ringtx_put(phba, pring, piocbq);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
- break;
- }
txq_cnt--;
- /* The xri and iocb resources secured,
- * attempt to issue request
- */
- piocbq->sli4_lxritag = sglq->sli4_lxritag;
- piocbq->sli4_xritag = sglq->sli4_xritag;
- if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
- fail_msg = "to convert bpl to sgl";
- else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
- fail_msg = "to convert iocb to wqe";
- else if (lpfc_sli4_wq_put(wq, &wqe))
- fail_msg = " - Wq is full";
- else
- lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+ ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
+ if (ret && ret != IOCB_BUSY) {
+ fail_msg = " - Cannot send IO ";
+ piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ }
if (fail_msg) {
+ piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
/* Failed means we can't issue and need to cancel */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2822 IOCB failed %s iotag 0x%x "
- "xri 0x%x\n",
- fail_msg,
- piocbq->iotag, piocbq->sli4_xritag);
+ "xri 0x%x %d flg x%x\n",
+ fail_msg, piocbq->iotag,
+ piocbq->sli4_xritag, ret,
+ piocbq->cmd_flag);
list_add_tail(&piocbq->list, &completions);
fail_msg = NULL;
}
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ if (txq_cnt == 0 || ret == IOCB_BUSY)
+ break;
}
-
/* Cancel all the IOCBs that cannot be issued */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ IOERR_SLI_ABORTED);
return txq_cnt;
}
cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
if (cmd == CMD_XMIT_BLS_RSP64_WQE)
return sglq->sli4_xritag;
- numBdes = pwqeq->rsvd2;
+ numBdes = pwqeq->num_bdes;
if (numBdes) {
/* The addrHigh and addrLow fields within the WQE
* have not been byteswapped yet so there is no
uint32_t ret = 0;
/* NVME_LS and NVME_LS ABTS requests. */
- if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
+ if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
pring = phba->sli4_hba.nvmels_wq->pring;
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
}
/* NVME_FCREQ and NVME_ABTS requests */
- if (pwqe->iocb_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
+ if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
}
/* NVMET requests */
- if (pwqe->iocb_flag & LPFC_IO_NVMET) {
+ if (pwqe->cmd_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
return WQE_NORESOURCE;
/* Indicate the IO is being aborted by the driver. */
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
abtswqe = &abtsiocb->wqe;
memset(abtswqe, 0, sizeof(*abtswqe));
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
- abtsiocb->iocb_flag |= LPFC_IO_FCP;
- if (cmdiocb->iocb_flag & LPFC_IO_NVME)
- abtsiocb->iocb_flag |= LPFC_IO_NVME;
- if (cmdiocb->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
+ abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
+ if (cmdiocb->cmd_flag & LPFC_IO_FCP)
+ abtsiocb->cmd_flag |= LPFC_IO_FCP;
+ if (cmdiocb->cmd_flag & LPFC_IO_NVME)
+ abtsiocb->cmd_flag |= LPFC_IO_NVME;
+ if (cmdiocb->cmd_flag & LPFC_IO_FOF)
+ abtsiocb->cmd_flag |= LPFC_IO_FOF;
abtsiocb->vport = vport;
- abtsiocb->wqe_cmpl = cmpl;
+ abtsiocb->cmd_cmpl = cmpl;
lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
if (retval) {
- cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
__lpfc_sli_release_iocbq(phba, abtsiocb);
}
/* MUST zero fields if buffer is reused by another protocol */
lpfc_ncmd->nvmeCmd = NULL;
- lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
- lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
if (phba->cfg_xpsgl && !phba->nvmet_support &&
!list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
+
+/**
+ * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
+ * @phba: phba object
+ * @job: job entry of the command to be posted.
+ *
+ * Fill the common fields of the wqe for each of the command.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
+{
+ u8 cmnd;
+ u32 *pcmd;
+ u32 if_type = 0;
+ u32 fip, abort_tag;
+ struct lpfc_nodelist *ndlp = NULL;
+ union lpfc_wqe128 *wqe = &job->wqe;
+ struct lpfc_dmabuf *context2;
+ u32 els_id = LPFC_ELS_ID_DEFAULT;
+ u8 command_type = ELS_COMMAND_NON_FIP;
+
+ fip = phba->hba_flag & HBA_FIP_SUPPORT;
+ /* The fcp commands will set command type */
+ if (job->cmd_flag & LPFC_IO_FCP)
+ command_type = FCP_COMMAND;
+ else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
+ command_type = ELS_COMMAND_FIP;
+ else
+ command_type = ELS_COMMAND_NON_FIP;
+
+ abort_tag = job->iotag;
+ cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
+
+ switch (cmnd) {
+ case CMD_ELS_REQUEST64_WQE:
+ if (job->cmd_flag & LPFC_IO_LIBDFC)
+ ndlp = job->context_un.ndlp;
+ else
+ ndlp = (struct lpfc_nodelist *)job->context1;
+
+ /* CCP CCPE PV PRI in word10 were set in the memcpy */
+ if (command_type == ELS_COMMAND_FIP)
+ els_id = ((job->cmd_flag & LPFC_FIP_ELS_ID_MASK)
+ >> LPFC_FIP_ELS_ID_SHIFT);
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
+ context2 = (struct lpfc_dmabuf *)job->context2;
+ pcmd = (u32 *)context2->virt;
+ if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+ *pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_RDF ||
+ *pcmd == ELS_CMD_EDC ||
+ *pcmd == ELS_CMD_RSCN_XMT ||
+ *pcmd == ELS_CMD_FDISC ||
+ *pcmd == ELS_CMD_LOGO ||
+ *pcmd == ELS_CMD_QFPA ||
+ *pcmd == ELS_CMD_UVEM ||
+ *pcmd == ELS_CMD_PLOGI)) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ job->vport->fc_myDID);
+
+ if ((*pcmd == ELS_CMD_FLOGI) &&
+ !(phba->fc_topology ==
+ LPFC_TOPOLOGY_LOOP))
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[job->vport->vpi]);
+ } else if (pcmd) {
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ }
+ }
+
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+
+ bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+ bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ break;
+ case CMD_XMIT_ELS_RSP64_WQE:
+ ndlp = (struct lpfc_nodelist *)job->context1;
+
+ /* word4 */
+ wqe->xmit_els_rsp.word4 = 0;
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
+ if (job->vport->fc_flag & FC_PT2PT) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ job->vport->fc_myDID);
+ if (job->vport->fc_myDID == Fabric_DID) {
+ bf_set(wqe_els_did,
+ &wqe->xmit_els_rsp.wqe_dest, 0);
+ }
+ }
+ }
+
+ bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ job->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+
+ if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ phba->vpi_ids[job->vport->vpi]);
+ }
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_GEN_REQUEST64_WQE:
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_SEQUENCE64_WQE:
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+
+ wqe->xmit_sequence.rsvd3 = 0;
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_BLS_RSP64_WQE:
+ bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+ bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+ bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ /* Overwrite the pre-set comnd type with OTHER_COMMAND */
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
+ case CMD_ABORT_XRI_WQE: /* abort iotag */
+ case CMD_SEND_FRAME: /* mds loopback */
+ /* cases already formatted for sli4 wqe - no chgs necessary */
+ return;
+ default:
+ dump_stack();
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6207 Invalid command 0x%x\n",
+ cmnd);
+ break;
+ }
+
+ wqe->generic.wqe_com.abort_tag = abort_tag;
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
LPFC_CTX_HOST
} lpfc_ctx_cmd;
-union lpfc_vmid_iocb_tag {
+union lpfc_vmid_tag {
uint32_t app_id;
uint8_t cs_ctl_vmid;
struct lpfc_vmid_context *vmid_context; /* UVEM context information */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
uint16_t hba_wqidx; /* index to HBA work queue */
struct lpfc_cq_event cq_event;
- struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
uint64_t isr_timestamp;
union lpfc_wqe128 wqe; /* SLI-4 */
IOCB_t iocb; /* SLI-3 */
+ struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
- uint8_t rsvd2;
+ u32 unsol_rcv_len; /* Receive len in usol path */
+
+ uint8_t num_bdes;
+ uint8_t abort_bls; /* ABTS by initiator or responder */
+ u8 abort_rctl; /* ACC or RJT flag */
uint8_t priority; /* OAS priority */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
- uint32_t iocb_flag;
+
+ u32 cmd_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
struct lpfc_node_rrq *rrq;
} context_un;
- union lpfc_vmid_iocb_tag vmid_tag;
- void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_wcqe_complete *);
+ union lpfc_vmid_tag vmid_tag;
+ void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
+ void (*wait_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
+ void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.0.0.4"
+#define LPFC_DRIVER_VERSION "14.2.0.0"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2021 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2022 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
{
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
- int x;
writeb(state->host->this_id | CF1_PAR_ENABLE, ®s->config1);
writeb(TIMO_VAL(250), ®s->sel_timeout); /* 250ms */
writeb(0, ®s->config3);
writeb(0, ®s->sync_period);
writeb(0, ®s->sync_offset);
- x = readb(®s->interrupt);
+ (void)readb(®s->interrupt);
writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control);
}
struct fsc_state *state = (struct fsc_state *) dev_id;
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
- struct scsi_cmnd *cmd = state->current_req;
+ struct scsi_cmnd *const cmd = state->current_req;
+ struct mac53c94_cmd_priv *const mcmd = mac53c94_priv(cmd);
int nb, stat, seq, intr;
static int mac53c94_errors;
/* set DMA controller going if any data to transfer */
if ((stat & (STAT_MSG|STAT_CD)) == 0
&& (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
- nb = cmd->SCp.this_residual;
+ nb = mcmd->this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
- cmd->SCp.this_residual -= nb;
+ mcmd->this_residual -= nb;
writeb(nb, ®s->count_lo);
writeb(nb >> 8, ®s->count_mid);
writeb(CMD_DMA_MODE + CMD_NOP, ®s->command);
cmd_done(state, DID_ERROR << 16);
return;
}
- if (cmd->SCp.this_residual != 0
+ if (mcmd->this_residual != 0
&& (stat & (STAT_MSG|STAT_CD)) == 0) {
/* Set up the count regs to transfer more */
- nb = cmd->SCp.this_residual;
+ nb = mcmd->this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
- cmd->SCp.this_residual -= nb;
+ mcmd->this_residual -= nb;
writeb(nb, ®s->count_lo);
writeb(nb >> 8, ®s->count_mid);
writeb(CMD_DMA_MODE + CMD_NOP, ®s->command);
cmd_done(state, DID_ERROR << 16);
return;
}
- cmd->SCp.Status = readb(®s->fifo);
- cmd->SCp.Message = readb(®s->fifo);
+ mcmd->status = readb(®s->fifo);
+ mcmd->message = readb(®s->fifo);
writeb(CMD_ACCEPT_MSG, ®s->command);
state->phase = busfreeing;
break;
if (intr != INTR_DISCONNECT) {
printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr);
}
- cmd_done(state, (DID_OK << 16) + (cmd->SCp.Message << 8)
- + cmd->SCp.Status);
+ cmd_done(state, (DID_OK << 16) + (mcmd->message << 8) + mcmd->status);
break;
default:
printk(KERN_DEBUG "don't know about phase %d\n", state->phase);
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
dcmds[-1].command = cpu_to_le16(dma_cmd);
dcmds->command = cpu_to_le16(DBDMA_STOP);
- cmd->SCp.this_residual = total;
+ mac53c94_priv(cmd)->this_residual = total;
}
static struct scsi_host_template mac53c94_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.max_segment_size = 65535,
+ .cmd_size = sizeof(struct mac53c94_cmd_priv),
};
static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
#define CF4_TEST 0x02
#define CF4_BBTE 0x01
+struct mac53c94_cmd_priv {
+ int this_residual;
+ int status;
+ int message;
+};
+
+static inline struct mac53c94_cmd_priv *mac53c94_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#endif /* _MAC53C94_H */
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
- cmd->SCp.this_residual < setup_use_pdma)
+ int resid = NCR5380_to_ncmd(cmd)->this_residual;
+
+ if (hostdata->flags & FLAG_NO_PSEUDO_DMA || resid < setup_use_pdma)
return 0;
- return cmd->SCp.this_residual;
+ return resid;
}
static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
.sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <scsi/scsicam.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
#include "megaraid.h"
static void
mega_rundoneq (adapter_t *adapter)
{
- struct scsi_cmnd *cmd;
- struct list_head *pos;
-
- list_for_each(pos, &adapter->completed_list) {
+ struct megaraid_cmd_priv *cmd_priv;
- struct scsi_pointer* spos = (struct scsi_pointer *)pos;
-
- cmd = list_entry(spos, struct scsi_cmnd, SCp);
- scsi_done(cmd);
- }
+ list_for_each_entry(cmd_priv, &adapter->completed_list, entry)
+ scsi_done(megaraid_to_scsi_cmd(cmd_priv));
INIT_LIST_HEAD(&adapter->completed_list);
}
.eh_bus_reset_handler = megaraid_reset,
.eh_host_reset_handler = megaraid_reset,
.no_write_same = 1,
+ .cmd_size = sizeof(struct megaraid_cmd_priv),
};
static int
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
#define MEGARAID_VERSION \
"v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n"
#define CACHED_IO 0
#define DIRECT_IO 1
+struct megaraid_cmd_priv {
+ struct list_head entry;
+};
+
+#define SCSI_LIST(scp) \
+ (&((struct megaraid_cmd_priv *)scsi_cmd_priv(scp))->entry)
+
+struct scsi_cmd_and_priv {
+ struct scsi_cmnd cmd;
+ struct megaraid_cmd_priv priv;
+};
+
+static inline struct scsi_cmnd *
+megaraid_to_scsi_cmd(struct megaraid_cmd_priv *cmd_priv)
+{
+ /* See also scsi_mq_setup_tags() */
+ BUILD_BUG_ON(sizeof(struct scsi_cmd_and_priv) !=
+ sizeof(struct scsi_cmnd) +
+ sizeof(struct megaraid_cmd_priv));
-#define SCSI_LIST(scp) ((struct list_head *)(&(scp)->SCp))
+ return &container_of(cmd_priv, struct scsi_cmd_and_priv, priv)->cmd;
+}
/*
* Each controller's soft state
#ifndef LSI_MEGARAID_SAS_H
#define LSI_MEGARAID_SAS_H
+#include <scsi/scsi_cmnd.h>
+
/*
* MegaRAID SAS Driver meta data
*/
};
};
+struct megasas_cmd_priv {
+ void *cmd_priv;
+ u8 status;
+};
+
+static inline struct megasas_cmd_priv *megasas_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#define MAX_MGMT_ADAPTERS 1024
#define MAX_IOCTL_SGE 16
goto out_return_cmd;
cmd->scmd = scmd;
- scmd->SCp.ptr = (char *)cmd;
+ megasas_priv(scmd)->cmd_priv = cmd;
/*
* Issue the command to the FW
void
megasas_dump_fusion_io(struct scsi_cmnd *scmd)
{
- struct megasas_cmd_fusion *cmd;
+ struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct megasas_instance *instance;
- cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
instance = (struct megasas_instance *)scmd->device->host->hostdata;
scmd_printk(KERN_INFO, scmd,
.mq_poll = megasas_blk_mq_poll,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
+ .cmd_size = sizeof(struct megasas_cmd_priv),
};
/**
cmd->retry_for_fw_reset = 0;
if (cmd->scmd)
- cmd->scmd->SCp.ptr = NULL;
+ megasas_priv(cmd->scmd)->cmd_priv = NULL;
switch (hdr->cmd) {
case MFI_CMD_INVALID:
get_updated_dev_handle(instance,
&fusion->load_balance_info[device_id],
&io_info, local_map_ptr);
- scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(scp)->status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
if (instance->adapter_type >= VENTURA_SERIES)
rctx_g35->span_arm = io_info.span_arm;
rctx->span_arm = io_info.span_arm;
} else
- scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(scp)->status &= ~MEGASAS_LOAD_BALANCE_FLAG;
if (instance->adapter_type >= VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
- scp->SCp.ptr = (char *)cmd;
+ megasas_priv(scp)->cmd_priv = cmd;
return 0;
}
if (instance->ldio_threshold &&
megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
atomic_dec(&instance->ldio_outstanding);
- scmd_local->SCp.ptr = NULL;
+ megasas_priv(scmd_local)->cmd_priv = NULL;
megasas_return_cmd_fusion(instance, cmd);
scsi_dma_unmap(scmd_local);
megasas_sdev_busy_dec(instance, scmd_local);
case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
/* Update load balancing info */
if (fusion->load_balance_info &&
- (cmd_fusion->scmd->SCp.Status &
+ (megasas_priv(cmd_fusion->scmd)->status &
MEGASAS_LOAD_BALANCE_FLAG)) {
device_id = MEGASAS_DEV_INDEX(scmd_local);
lbinfo = &fusion->load_balance_info[device_id];
atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
- cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(cmd_fusion->scmd)->status &=
+ ~MEGASAS_LOAD_BALANCE_FLAG;
}
fallthrough; /* and complete IO */
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
if (instance->ldio_threshold &&
(megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
atomic_dec(&instance->ldio_outstanding);
- scmd_local->SCp.ptr = NULL;
+ megasas_priv(scmd_local)->cmd_priv = NULL;
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
megasas_sdev_busy_dec(instance, scmd_local);
if (instance->adapter_type >= VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
- 0, sizeof(struct LD_STREAM_DETECT));
- fusion->stream_detect_by_ld[j]->mru_bit_map
+ 0, sizeof(struct LD_STREAM_DETECT));
+ fusion->stream_detect_by_ld[j]->mru_bit_map
= MR_STREAM_BITMAP;
}
}
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
+ struct mesh_cmd_priv *mcmd = mesh_priv(cmd);
+
set_host_byte(cmd, ms->stat);
- set_status_byte(cmd, cmd->SCp.Status);
+ set_status_byte(cmd, mcmd->status);
if (ms->stat == DID_OK)
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ scsi_msg_to_host_byte(cmd, mcmd->message);
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
}
#endif
}
- cmd->SCp.this_residual -= ms->data_ptr;
+ mcmd->this_residual -= ms->data_ptr;
scsi_done(cmd);
}
if (start_next) {
if (ms->n_msgin < msgin_length(ms))
goto reject;
if (cmd)
- cmd->SCp.Message = code;
+ mesh_priv(cmd)->message = code;
switch (code) {
case COMMAND_COMPLETE:
break;
if (cmd) {
int nseg;
- cmd->SCp.this_residual = scsi_bufflen(cmd);
+ mesh_priv(cmd)->this_residual = scsi_bufflen(cmd);
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
break;
case statusing:
if (cmd) {
- cmd->SCp.Status = mr->fifo;
+ struct mesh_cmd_priv *mcmd = mesh_priv(cmd);
+
+ mcmd->status = mr->fifo;
if (DEBUG_TARGET(cmd))
printk(KERN_DEBUG "mesh: status is %x\n",
- cmd->SCp.Status);
+ mcmd->status);
}
ms->msgphase = msg_in;
break;
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.max_segment_size = 65535,
+ .cmd_size = sizeof(struct mesh_cmd_priv),
};
static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
#ifndef _MESH_H
#define _MESH_H
+struct mesh_cmd_priv {
+ int this_residual;
+ int message;
+ int status;
+};
+
+static inline struct mesh_cmd_priv *mesh_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/*
* Registers in the MESH controller.
*/
#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3)
#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000)
#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5)
-#define MPI3_MFGPAGE_DEVID_SAS4016 (0x00a7)
struct mpi3_man_page0 {
struct mpi3_config_page_header header;
u8 chip_revision[8];
__le32 reserved94;
__le32 reserved98;
u8 oem;
- u8 sub_oem;
+ u8 profile_identifier;
__le16 flags;
u8 board_mfg_day;
u8 board_mfg_month;
#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
-#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ATTN (0x0d)
+#define MPI3_MAN6_GPIO_FUNCTION_PBLP_STATUS_CHANGE (0x0d)
#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ONLINE (0x0e)
#define MPI3_MAN6_GPIO_FUNCTION_EPACK_FAULT (0x0f)
#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13)
+#define MPI3_MAN6_GPIO_FUNCTION_AUXILIARY_POWER (0x14)
+#define MPI3_MAN6_GPIO_FUNCTION_RAID_DATA_CACHE_DIRTY (0x15)
+#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_CONTROL (0x16)
+#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_FAULT (0x17)
+#define MPI3_MAN6_GPIO_FUNCTION_POWER_BRAKE (0x18)
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01)
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00)
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01)
#define MPI3_MAN9_MAX_OUTSTANDING_REQS (65000)
#define MPI3_MAN9_MIN_TARGET_CMDS (0)
#define MPI3_MAN9_MAX_TARGET_CMDS (65535)
-#define MPI3_MAN9_MIN_SAS_TARGETS (0)
-#define MPI3_MAN9_MAX_SAS_TARGETS (65535)
-#define MPI3_MAN9_MIN_PCIE_TARGETS (0)
+#define MPI3_MAN9_MIN_NVME_TARGETS (0)
#define MPI3_MAN9_MIN_INITIATORS (0)
-#define MPI3_MAN9_MAX_INITIATORS (65535)
-#define MPI3_MAN9_MIN_ENCLOSURES (0)
+#define MPI3_MAN9_MIN_VDS (0)
+#define MPI3_MAN9_MIN_ENCLOSURES (1)
#define MPI3_MAN9_MAX_ENCLOSURES (65535)
#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
-#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1)
#define MPI3_MAN9_MIN_EXPANDERS (0)
#define MPI3_MAN9_MAX_EXPANDERS (65535)
#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
+#define MPI3_MAN9_MIN_HOST_PD_DRIVES (0)
+#define MPI3_MAN9_ADV_HOST_PD_DRIVES (0)
+#define MPI3_MAN9_RAID_PD_DRIVES (0)
+#define MPI3_MAN9_DRIVER_DIAG_BUFFER (0)
+#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1)
+#define MPI3_MAN9_MIN_EXPANDERS (0)
+#define MPI3_MAN9_MAX_EXPANDERS (65535)
struct mpi3_man_page9 {
struct mpi3_config_page_header header;
u8 num_resources;
__le32 reserved00;
__le32 reserved04;
};
-
+struct mpi3_man11_board_fan_device_format {
+ u8 flags;
+ u8 reserved01;
+ u8 min_fan_speed;
+ u8 max_fan_speed;
+ __le32 reserved04;
+};
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_MASK (0x07)
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_AMC6821 (0x00)
union mpi3_man11_device_specific_format {
struct mpi3_man11_mux_device_format mux;
struct mpi3_man11_temp_sensor_device_format temp_sensor;
struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
struct mpi3_man11_gas_gauge_device_format gas_gauge;
struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller;
+ struct mpi3_man11_board_fan_device_format board_fan;
__le32 words[2];
};
-
struct mpi3_man11_istwi_device_format {
u8 device_type;
u8 controller;
#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07)
+#define MPI3_MAN11_ISTWI_DEVTYPE_BOARD_FAN (0x08)
#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
#define MPI3_MAN13_PAGEVERSION (0x00)
struct mpi3_man_page14 {
struct mpi3_config_page_header header;
- __le16 flags;
- __le16 reserved0a;
+ __le32 reserved08;
u8 num_slot_groups;
u8 num_slots;
__le16 max_cert_chain_length;
__le32 sealed_slots;
+ __le32 populated_slots;
+ __le32 mgmt_pt_updatable_slots;
};
-
#define MPI3_MAN14_PAGEVERSION (0x00)
-#define MPI3_MAN14_FLAGS_AUTH_SESSION_REQ (0x01)
-#define MPI3_MAN14_FLAGS_AUTH_API_MASK (0x0e)
-#define MPI3_MAN14_FLAGS_AUTH_API_NONE (0x00)
-#define MPI3_MAN14_FLAGS_AUTH_API_CERBERUS (0x02)
-#define MPI3_MAN14_FLAGS_AUTH_API_SPDM (0x04)
+#define MPI3_MAN14_NUMSLOTS_MAX (32)
#ifndef MPI3_MAN15_VERSION_RECORD_MAX
#define MPI3_MAN15_VERSION_RECORD_MAX 1
#endif
#define MPI3_IOUNIT6_PAGEVERSION (0x00)
#define MPI3_IOUNIT6_FLAGS_ACT_CABLE_PWR_EXC (0x01)
-struct mpi3_io_unit_page7 {
- struct mpi3_config_page_header header;
- __le32 reserved08;
-};
-
-#define MPI3_IOUNIT7_PAGEVERSION (0x00)
#ifndef MPI3_IOUNIT8_DIGEST_MAX
#define MPI3_IOUNIT8_DIGEST_MAX (1)
#endif
#define MPI3_IOUNIT9_PAGEVERSION (0x00)
#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x01)
#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+struct mpi3_io_unit_page10 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 silicon_id;
+ u8 fw_version_minor;
+ u8 fw_version_major;
+ u8 hw_version_minor;
+ u8 hw_version_major;
+ u8 part_number[16];
+};
+#define MPI3_IOUNIT10_PAGEVERSION (0x00)
+#define MPI3_IOUNIT10_FLAGS_VALID (0x01)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_MASK (0x02)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_FIRST_REGION (0x00)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_SECOND_REGION (0x02)
+#define MPI3_IOUNIT10_FLAGS_PBLP_EXPECTED (0x80)
+#ifndef MPI3_IOUNIT11_PROFILE_MAX
+#define MPI3_IOUNIT11_PROFILE_MAX (1)
+#endif
+struct mpi3_iounit11_profile {
+ u8 profile_identifier;
+ u8 reserved01[3];
+ __le16 max_vds;
+ __le16 max_host_pds;
+ __le16 max_adv_host_pds;
+ __le16 max_raid_pds;
+ __le16 max_nvme;
+ __le16 max_outstanding_requests;
+ __le16 subsystem_id;
+ __le16 reserved12;
+ __le32 reserved14[2];
+};
+struct mpi3_io_unit_page11 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_profiles;
+ u8 current_profile_identifier;
+ __le16 reserved0e;
+ struct mpi3_iounit11_profile profile[MPI3_IOUNIT11_PROFILE_MAX];
+};
+#define MPI3_IOUNIT11_PAGEVERSION (0x00)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
struct mpi3_config_page_header header;
__le32 coalescing_timeout;
u8 coalescing_depth;
- u8 pci_slot_num;
+ u8 obsolete;
__le16 reserved0e;
};
-
#define MPI3_IOC1_PAGEVERSION (0x00)
-#define MPI3_IOC1_PCISLOTNUM_UNKNOWN (0xff)
#ifndef MPI3_IOC2_EVENTMASK_WORDS
#define MPI3_IOC2_EVENTMASK_WORDS (4)
#endif
__le32 reserved14;
__le32 reserved18;
};
-
#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
-#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
-#define MPI3_DRIVER0_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
struct mpi3_driver_page1 {
struct mpi3_config_page_header header;
__le32 flags;
u8 raid_level;
__le16 device_info;
__le16 flags;
- __le16 reserved06;
- __le32 reserved08[2];
+ __le16 io_throttle_group;
+ __le16 io_throttle_group_low;
+ __le16 io_throttle_group_high;
+ __le32 reserved0c;
};
-
#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)
#define MPI3_DEVICE0_VD_STATE_DEGRADED (0x02)
#define MPI3_DEVICE0_VD_DEVICE_INFO_NVME (0x0004)
#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002)
#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_MASK (0x0003)
#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_NONE (0x0000)
#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_HOST (0x0001)
#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f)
#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50)
#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51)
+#define MPI3_DEVICE0_ASTATUS_NVME_TOO_MANY_ERRORS (0x52)
#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f)
#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80)
#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f)
#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
+#define MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED (0x0010)
#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
-#define MPI3_DEVICE0_FLAGS_ATT_METHOD_MASK (0x0006)
-#define MPI3_DEVICE0_FLAGS_ATT_METHOD_NOT_DIR_ATTACHED (0x0000)
-#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002)
#define MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL (0x0004)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002)
#define MPI3_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
#define MPI3_DEVICE0_QUEUE_DEPTH_NOT_APPLICABLE (0x0000)
struct mpi3_device1_sas_sata_format {
__le32 reserved00;
};
-
struct mpi3_device1_pcie_format {
__le16 vendor_id;
__le16 device_id;
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
#define MPI3_SCSIIO_METASGL_INDEX (3)
struct mpi3_scsi_io_reply {
__le16 host_tag;
u8 ioc_number;
u8 who_init;
__le16 max_msix_vectors;
- __le16 max_outstanding_request;
+ __le16 max_outstanding_requests;
__le16 product_id;
__le16 ioc_request_frame_size;
__le16 reply_frame_size;
u8 sge_modifier_shift;
u8 protocol_flags;
__le16 max_sas_initiators;
- __le16 reserved2a;
+ __le16 max_data_length;
__le16 max_sas_expanders;
__le16 max_enclosures;
__le16 min_dev_handle;
u8 max_host_pd_ns_count;
u8 max_adv_host_pd_ns_count;
u8 max_raidpd_ns_count;
- u8 reserved5f;
+ u8 max_devices_per_throttle_group;
+ __le16 io_throttle_data_length;
+ __le16 max_io_throttle_group;
+ __le16 io_throttle_low;
+ __le16 io_throttle_high;
};
-
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
-#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x10000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
#define MPI3_IOCFACTS_PROTOCOL_NVME (0x0004)
#define MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
#define MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED (0x0000)
#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
+#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
struct mpi3_mgmt_passthrough_request {
__le16 host_tag;
u8 ioc_use_only02;
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
#define MPI3_EVENT_LOG_DATA (0x01)
#define MPI3_EVENT_CHANGE (0x02)
#define MPI3_EVENT_GPIO_INTERRUPT (0x04)
-#define MPI3_EVENT_TEMP_THRESHOLD (0x05)
#define MPI3_EVENT_CABLE_MGMT (0x06)
#define MPI3_EVENT_DEVICE_ADDED (0x07)
#define MPI3_EVENT_DEVICE_INFO_CHANGED (0x08)
u8 gpio_num;
u8 reserved01[3];
};
-
-struct mpi3_event_data_temp_threshold {
- __le16 status;
- u8 sensor_num;
- u8 reserved03;
- __le16 current_temperature;
- __le16 reserved06;
- __le32 reserved08;
- __le32 reserved0c;
-};
-
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_FATAL_THRESHOLD_EXCEEDED (0x0004)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_CRITICAL_THRESHOLD_EXCEEDED (0x0002)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_WARNING_THRESHOLD_EXCEEDED (0x0001)
struct mpi3_event_data_cable_management {
__le32 active_cable_power_requirement;
u8 status;
#define MPI3_CTRL_OP_LOOKUP_MAPPING (0x02)
#define MPI3_CTRL_OP_UPDATE_TIMESTAMP (0x04)
#define MPI3_CTRL_OP_GET_TIMESTAMP (0x05)
+#define MPI3_CTRL_OP_GET_IOC_CHANGE_COUNT (0x06)
+#define MPI3_CTRL_OP_CHANGE_PROFILE (0x07)
#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13)
-#define MPI3_CTRL_OP_SAS_SEND_PRIMITIVE (0x20)
+#define MPI3_CTRL_OP_SEND_SAS_PRIMITIVE (0x20)
#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21)
#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23)
#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24)
#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30)
#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
+#define MPI3_CTRL_OP_CHANGE_PROFILE_PARAM8_PROFILE_ID_INDEX (0x00)
#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00)
-#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PHY_INDEX (0x00)
-#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
-#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01)
#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
+#define MPI3_CTRL_GET_IOC_CHANGE_COUNT_VALUE16_CHANGECOUNT_INDEX (0)
#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0)
#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
__le16 dev_handle;
__le16 encapsulated_command_length;
__le16 flags;
- __le32 reserved10[4];
+ __le32 data_length;
+ __le32 reserved14[3];
__le32 command[MPI3_NVME_ENCAP_CMD_MAX];
};
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (22)
-#define MPI3_VERSION_DEV (0)
+#define MPI3_VERSION_UNIT (23)
+#define MPI3_VERSION_DEV (1)
+#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
__le16 producer_index;
__le16 reserved02;
#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
-#define MPI3_SGE_FLAGS_DLAS_IOC_DDR (0x01)
+#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
#define MPI3_SGE_EXT_OPER_EEDP (0x00)
#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000)
#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
-#define MPI3_EEDPFLAGS_EEDP_OP_NOOP (0x0000)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2021 Broadcom Inc.
+ * Copyright (C) 2017-2022 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
extern struct list_head mrioc_list;
extern int prot_mask;
-#define MPI3MR_DRIVER_VERSION "8.0.0.61.0"
-#define MPI3MR_DRIVER_RELDATE "20-December-2021"
+#define MPI3MR_DRIVER_VERSION "8.0.0.68.0"
+#define MPI3MR_DRIVER_RELDATE "10-February-2022"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
* @send_ack: Event acknowledgment required or not
* @process_evt: Bottomhalf processing required or not
* @evt_ctx: Event context to send in Ack
+ * @pending_at_sml: waiting for device add/remove API to complete
+ * @discard: discard this event
* @ref_count: kref count
* @event_data: Actual MPI3 event data
*/
bool send_ack;
bool process_evt;
u32 evt_ctx;
+ bool pending_at_sml;
+ bool discard;
struct kref ref_count;
char event_data[] __aligned(4);
};
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2021 Broadcom Inc.
+ * Copyright (C) 2017-2022 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2021 Broadcom Inc.
+ * Copyright (C) 2017-2022 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
case MPI3_EVENT_GPIO_INTERRUPT:
desc = "GPIO Interrupt";
break;
- case MPI3_EVENT_TEMP_THRESHOLD:
- desc = "Temperature Threshold";
- break;
case MPI3_EVENT_CABLE_MGMT:
desc = "Cable Management";
break;
MPI3MR_MAX_SEG_LIST_SIZE,
mrioc->req_qinfo[q_idx].q_segment_list,
mrioc->req_qinfo[q_idx].q_segment_list_dma);
- mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
+ mrioc->req_qinfo[q_idx].q_segment_list = NULL;
}
} else
size = mrioc->req_qinfo[q_idx].segment_qd *
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
- mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request);
+ mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_TEMP_THRESHOLD);
retval = mpi3mr_issue_event_notification(mrioc);
if (retval)
memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
- mpi3mr_cleanup_fwevt_list(mrioc);
mpi3mr_flush_host_io(mrioc);
+ mpi3mr_cleanup_fwevt_list(mrioc);
mpi3mr_invalidate_devhandles(mrioc);
if (mrioc->prepare_for_reset) {
mrioc->prepare_for_reset = 0;
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2021 Broadcom Inc.
+ * Copyright (C) 2017-2022 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
return fwevt;
}
+/**
+ * mpi3mr_cancel_work - cancel firmware event
+ * @fwevt: fwevt object which needs to be canceled
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
+{
+ /*
+ * Wait on the fwevt to complete. If this returns 1, then
+ * the event was never executed.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from mpi3mr_process_fwevt()
+ */
+ if (cancel_work_sync(&fwevt->work)) {
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+ /*
+ * Put fwevt reference count to neutralize
+ * kref_init increment
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+}
+
/**
* mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
* @mrioc: Adapter instance reference
!mrioc->fwevt_worker_thread)
return;
- while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)) ||
- (fwevt = mrioc->current_event)) {
+ while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
+ mpi3mr_cancel_work(fwevt);
+
+ if (mrioc->current_event) {
+ fwevt = mrioc->current_event;
/*
- * Wait on the fwevt to complete. If this returns 1, then
- * the event was never executed, and we need a put for the
- * reference the work had on the fwevt.
- *
- * If it did execute, we wait for it to finish, and the put will
- * happen from mpi3mr_process_fwevt()
+ * Don't call cancel_work_sync() API for the
+ * fwevt work if the controller reset is
+ * get called as part of processing the
+ * same fwevt work (or) when worker thread is
+ * waiting for device add/remove APIs to complete.
+ * Otherwise we will see deadlock.
*/
- if (cancel_work_sync(&fwevt->work)) {
- /*
- * Put fwevt reference count after
- * dequeuing it from worker queue
- */
- mpi3mr_fwevt_put(fwevt);
- /*
- * Put fwevt reference count to neutralize
- * kref_init increment
- */
- mpi3mr_fwevt_put(fwevt);
+ if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
+ fwevt->discard = 1;
+ return;
}
+
+ mpi3mr_cancel_work(fwevt);
}
}
return tgtdev;
}
+/**
+ * mpi3mr_print_device_event_notice - print notice related to post processing of
+ * device event after controller reset.
+ *
+ * @mrioc: Adapter instance reference
+ * @device_add: true for device add event and false for device removal event
+ *
+ * Return: None.
+ */
+static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add)
+{
+ ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
+ (device_add ? "addition" : "removal"));
+ ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
+ ioc_notice(mrioc, "are matched with attached devices for correctness\n");
+}
+
/**
* mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
* @mrioc: Adapter instance reference
}
if (tgtdev->starget) {
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
scsi_remove_target(&tgtdev->starget->dev);
tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc, false);
+ return;
+ }
+ }
}
ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
__func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
}
if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
tgtdev->host_exposed = 1;
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
scsi_scan_target(&mrioc->shost->shost_gendev, 0,
tgtdev->perst_id,
SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
if (!tgtdev->starget)
tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc, true);
+ goto out;
+ }
+ }
}
out:
if (tgtdev)
mpi3mr_sastopochg_evt_debug(mrioc, event_data);
for (i = 0; i < event_data->num_entries; i++) {
+ if (fwevt->discard)
+ return;
handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
if (!handle)
continue;
mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
for (i = 0; i < event_data->num_entries; i++) {
+ if (fwevt->discard)
+ return;
handle =
le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
if (!handle)
static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
struct mpi3mr_fwevt *fwevt)
{
- mrioc->current_event = fwevt;
mpi3mr_fwevt_del_from_list(mrioc, fwevt);
+ mrioc->current_event = fwevt;
if (mrioc->stop_drv_processing)
goto out;
u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
ioc_info(mrioc,
"%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
__func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
kfree(delayed_dev_rmhs);
return;
}
+
+clear_drv_cmd:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
drv_cmd->callback = NULL;
drv_cmd->retry_count = 0;
struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
int retval;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
if (retval) {
pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
mrioc->name);
- goto out_failed;
+ goto clear_drv_cmd;
}
return;
-out_failed:
+clear_drv_cmd:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
drv_cmd->callback = NULL;
drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
struct delayed_evt_ack_node *delayed_evtack = NULL;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
dprint_event_th(mrioc,
"immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
kfree(delayed_evtack);
return;
}
+clear_drv_cmd:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
drv_cmd->callback = NULL;
clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
mrioc->facts.shutdown_timeout = shutdown_timeout;
}
-/**
- * mpi3mr_tempthreshold_evt_th - Temp threshold event tophalf
- * @mrioc: Adapter instance reference
- * @event_reply: event data
- *
- * Displays temperature threshold event details and fault code
- * if any is hit due to temperature exceeding threshold.
- *
- * Return: Nothing
- */
-static void mpi3mr_tempthreshold_evt_th(struct mpi3mr_ioc *mrioc,
- struct mpi3_event_notification_reply *event_reply)
-{
- struct mpi3_event_data_temp_threshold *evtdata =
- (struct mpi3_event_data_temp_threshold *)event_reply->event_data;
-
- ioc_err(mrioc, "Temperature threshold levels %s%s%s exceeded for sensor: %d !!! Current temperature in Celsius: %d\n",
- (le16_to_cpu(evtdata->status) & 0x1) ? "Warning " : " ",
- (le16_to_cpu(evtdata->status) & 0x2) ? "Critical " : " ",
- (le16_to_cpu(evtdata->status) & 0x4) ? "Fatal " : " ", evtdata->sensor_num,
- le16_to_cpu(evtdata->current_temperature));
- mpi3mr_print_fault_info(mrioc);
-}
-
/**
* mpi3mr_cablemgmt_evt_th - Cable management event tophalf
* @mrioc: Adapter instance reference
mpi3mr_energypackchg_evt_th(mrioc, event_reply);
break;
}
- case MPI3_EVENT_TEMP_THRESHOLD:
- {
- mpi3mr_tempthreshold_evt_th(mrioc, event_reply);
- break;
- }
case MPI3_EVENT_CABLE_MGMT:
{
mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
scmd->result = DID_OK << 16;
goto out_success;
}
+
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
if (stgt_priv_data->pend_count) {
sdev_printk(KERN_INFO, scmd->device,
"%s: target has %d pending commands, target reset is failed\n",
- mrioc->name, sdev_priv_data->pend_count);
+ mrioc->name, stgt_priv_data->pend_count);
goto out;
}
snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
"%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
- mrioc->fwevt_worker_name, WQ_MEM_RECLAIM);
+ mrioc->fwevt_worker_name, 0);
if (!mrioc->fwevt_worker_thread) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
pci_save_state(pdev);
- pci_set_power_state(pdev, device_state);
mpi3mr_cleanup_resources(mrioc);
+ pci_set_power_state(pdev, device_state);
return 0;
}
U16 Event; /*0x14 */
U16 Reserved4; /*0x16 */
U32 EventContext; /*0x18 */
- U32 EventData[1]; /*0x1C */
+ U32 EventData[]; /*0x1C */
} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY,
Mpi2EventNotificationReply_t,
*pMpi2EventNotificationReply_t;
U8 Reserved1; /*0x01 */
U16 Reserved2; /*0x02 */
U32 Reserved3; /*0x04 */
- U32 HostData[1]; /*0x08 */
+ U32 HostData[]; /*0x08 */
} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
U32 Reserved8; /*0x18 */
U32 Reserved9; /*0x1C */
U32 Reserved10; /*0x20 */
- U32 HostData[1]; /*0x24 */
+ U32 HostData[]; /*0x24 */
} MPI2_SEND_HOST_MESSAGE_REQUEST,
*PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
Mpi2SendHostMessageRequest_t,
/* Get the SG list pointer and info. */
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return 1;
- }
/* Check if we need to build a native SG list. */
if (!base_is_prp_possible(ioc, pcie_device,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return -ENOMEM;
- }
sg_local = &mpi_request->SGL;
sges_in_segment = ioc->max_sges_in_main_message;
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return -ENOMEM;
- }
sg_local = &mpi_request->SGL;
sges_in_segment = (ioc->request_sz -
*/
static int
-mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
+mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
{
- long reply_pool_end_address;
+ dma_addr_t end_address;
- reply_pool_end_address = reply_pool_start_address + pool_sz;
+ end_address = start_address + pool_sz - 1;
- if (upper_32_bits(reply_pool_start_address) ==
- upper_32_bits(reply_pool_end_address))
+ if (upper_32_bits(start_address) == upper_32_bits(end_address))
return 1;
else
return 0;
}
if (!mpt3sas_check_same_4gb_region(
- (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
ioc->pcie_sg_lookup[i].pcie_sgl,
(unsigned long long)
GFP_KERNEL, &ctr->chain_buffer_dma);
if (!ctr->chain_buffer)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)
- ctr->chain_buffer, ioc->chain_segment_sz)) {
+ if (!mpt3sas_check_same_4gb_region(
+ ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
ioc_err(ioc,
"Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
ctr->chain_buffer,
GFP_KERNEL, &ioc->sense_dma);
if (!ioc->sense)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
+ if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
dinitprintk(ioc, pr_err(
"Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
ioc->sense, (unsigned long long) ioc->sense_dma));
&ioc->reply_dma);
if (!ioc->reply)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
dinitprintk(ioc, pr_err(
"Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
ioc->reply, (unsigned long long) ioc->reply_dma));
GFP_KERNEL, &ioc->reply_free_dma);
if (!ioc->reply_free)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
dinitprintk(ioc,
pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
GFP_KERNEL, &ioc->reply_post_free_array_dma);
if (!ioc->reply_post_free_array)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array,
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
reply_post_free_array_sz)) {
dinitprintk(ioc, pr_err(
"Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
* resources and set DMA mask to 32 and allocate.
*/
if (!mpt3sas_check_same_4gb_region(
- (long)ioc->reply_post[i].reply_post_free, sz)) {
+ ioc->reply_post[i].reply_post_free_dma, sz)) {
dinitprintk(ioc,
ioc_err(ioc, "bad Replypost free pool(0x%p)"
"reply_post_free_dma = (0x%llx)\n",
#include <asm/mvme147hw.h>
#include <asm/irq.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "mvme147.h"
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct Scsi_Host *instance = cmd->device->host;
struct WD33C93_hostdata *hdata = shost_priv(instance);
unsigned char flags = 0x01;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ unsigned long addr = virt_to_bus(scsi_pointer->ptr);
/* setup dma direction */
if (!dir_in)
if (dir_in) {
/* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
+ cache_clear(addr, scsi_pointer->this_residual);
} else {
/* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
+ cache_push(addr, scsi_pointer->this_residual);
}
/* start DMA */
- m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
+ m147_pcc->dma_bcr = scsi_pointer->this_residual | (1 << 24);
m147_pcc->dma_dadr = addr;
m147_pcc->dma_cntrl = flags;
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static struct Scsi_Host *mvme147_shost;
SENSE_DATA = 2,
};
-/* define task management IU */
-struct mvs_tmf_task{
- u8 tmf;
- u16 tag_of_task_to_be_managed;
-};
#endif
.lldd_control_phy = mvs_phy_control,
.lldd_abort_task = mvs_abort_task,
- .lldd_abort_task_set = mvs_abort_task_set,
- .lldd_clear_aca = mvs_clear_aca,
- .lldd_clear_task_set = mvs_clear_task_set,
+ .lldd_abort_task_set = sas_abort_task_set,
+ .lldd_clear_task_set = sas_clear_task_set,
.lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
.lldd_lu_reset = mvs_lu_reset,
.lldd_query_task = mvs_query_task,
{
unsigned int rc, nhost = 0;
struct mvs_info *mvi;
- struct mvs_prv_info *mpi;
irq_handler_t irq_handler = mvs_interrupt;
struct Scsi_Host *shost = NULL;
const struct mvs_chip_info *chip;
}
nhost++;
} while (nhost < chip->n_host);
- mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ {
+ struct mvs_prv_info *mpi = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
(unsigned long)SHOST_TO_SAS_HA(shost));
+ }
#endif
mvs_post_sas_ha_init(shost, chip);
static ssize_t driver_version_show(struct device *cdev,
struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
+ return sysfs_emit(buffer, "%s\n", DRV_VERSION);
}
static DEVICE_ATTR_RO(driver_version);
static ssize_t interrupt_coalescing_show(struct device *cdev,
struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
+ return sysfs_emit(buffer, "%d\n", interrupt_coalescing);
}
static DEVICE_ATTR_RW(interrupt_coalescing);
static int mvs_task_prep_ssp(struct mvs_info *mvi,
struct mvs_task_exec_info *tei, int is_tmf,
- struct mvs_tmf_task *tmf)
+ struct sas_tmf_task *tmf)
{
struct sas_task *task = tei->task;
struct mvs_cmd_hdr *hdr = tei->hdr;
#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
- struct mvs_tmf_task *tmf, int *pass)
+ struct sas_tmf_task *tmf, int *pass)
{
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
slot->port = tei.port;
task->lldd_task = slot;
list_add_tail(&slot->entry, &tei.port->list);
- spin_lock(&task->task_state_lock);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&task->task_state_lock);
mvi_dev->running_req++;
++(*pass);
return rc;
}
-static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
- struct completion *completion, int is_tmf,
- struct mvs_tmf_task *tmf)
+int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
struct mvs_info *mvi = NULL;
u32 rc = 0;
u32 pass = 0;
unsigned long flags = 0;
+ struct sas_tmf_task *tmf = task->tmf;
+ int is_tmf = !!task->tmf;
mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
return rc;
}
-int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
-}
-
static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
{
u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
mvs_dev_gone_notify(dev);
}
-static void mvs_task_done(struct sas_task *task)
-{
- if (!del_timer(&task->slow_task->timer))
- return;
- complete(&task->slow_task->completion);
-}
-
-static void mvs_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
-
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
-}
-
-#define MVS_TASK_TIMEOUT 20
-static int mvs_exec_internal_tmf_task(struct domain_device *dev,
- void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
-{
- int res, retry;
- struct sas_task *task = NULL;
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
-
- memcpy(&task->ssp_task, parameter, para_len);
- task->task_done = mvs_task_done;
-
- task->slow_task->timer.function = mvs_tmf_timedout;
- task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
- add_timer(&task->slow_task->timer);
-
- res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- mv_printk("executing internal task failed:%d\n", res);
- goto ex_err;
- }
-
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
- goto ex_err;
- }
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun */
- res = task->task_status.residual;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- mv_dprintk("blocked task error.\n");
- res = -EMSGSIZE;
- break;
- } else {
- mv_dprintk(" task to dev %016llx response: 0x%x "
- "status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- sas_free_task(task);
- task = NULL;
-
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
-
-static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
- u8 *lun, struct mvs_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
- if (!(dev->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- memcpy(ssp_task.LUN, lun, 8);
-
- return mvs_exec_internal_tmf_task(dev, &ssp_task,
- sizeof(ssp_task), tmf);
-}
-
-
/* Standard mandates link reset for ATA (type 0)
and hard reset for SSP (type 1) , only for RECOVERY */
static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
{
unsigned long flags;
int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
struct mvs_device * mvi_dev = dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- tmf_task.tmf = TMF_LU_RESET;
mvi_dev->dev_status = MVS_DEV_EH;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+ rc = sas_lu_reset(dev, lun);
if (rc == TMF_RESP_FUNC_COMPLETE) {
spin_lock_irqsave(&mvi->lock, flags);
mvs_release_task(mvi, dev);
int mvs_query_task(struct sas_task *task)
{
u32 tag;
- struct scsi_lun lun;
- struct mvs_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- int_to_scsilun(cmnd->device->lun, &lun);
rc = mvs_find_tag(mvi, task, &tag);
if (rc == 0) {
rc = TMF_RESP_FUNC_FAILED;
return rc;
}
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
/* mandatory SAM-3, still need free task/slot info */
int mvs_abort_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct mvs_tmf_task tmf_task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi;
spin_unlock_irqrestore(&task->task_state_lock, flags);
mvi_dev->dev_status = MVS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
-
- int_to_scsilun(cmnd->device->lun, &lun);
rc = mvs_find_tag(mvi, task, &tag);
if (rc == 0) {
mv_printk("No such tag in %s\n", __func__);
return rc;
}
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ rc = sas_abort_task(task, tag);
/* if successful, clear the task and callback forwards.*/
if (rc == TMF_RESP_FUNC_COMPLETE) {
return rc;
}
-int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
-{
- int rc;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
-int mvs_clear_aca(struct domain_device *dev, u8 *lun)
-{
- int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
-int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
-{
- int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
u32 slot_idx, int err)
{
static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
u8 key, u8 asc, u8 asc_q)
{
- iu->datapres = 2;
+ iu->datapres = SAS_DATAPRES_SENSE_DATA;
iu->response_data_len = 0;
iu->sense_data_len = 17;
iu->status = 02;
mvi_dev = dev->lldd_dev;
spin_lock(&task->task_state_lock);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
/* race condition*/
aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags);
int mvs_abort_task(struct sas_task *task);
-int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
-int mvs_clear_aca(struct domain_device *dev, u8 *lun);
-int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
void mvs_port_formed(struct asd_sas_phy *sas_phy);
void mvs_port_deformed(struct asd_sas_phy *sas_phy);
int mvs_dev_found(struct domain_device *dev);
{
struct scsi_cmnd *scmd = cmd->scmd;
- cmd->scmd->SCp.ptr = NULL;
+ mvumi_priv(cmd->scmd)->cmd_priv = NULL;
scmd->result = ob_frame->req_status;
switch (ob_frame->req_status) {
goto out_return_cmd;
cmd->scmd = scmd;
- scmd->SCp.ptr = (char *) cmd;
+ mvumi_priv(scmd)->cmd_priv = cmd;
mhba->instancet->fire_cmd(mhba, cmd);
spin_unlock_irqrestore(shost->host_lock, irq_flags);
return 0;
static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
{
- struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
+ struct mvumi_cmd *cmd = mvumi_priv(scmd)->cmd_priv;
struct Scsi_Host *host = scmd->device->host;
struct mvumi_hba *mhba = shost_priv(host);
unsigned long flags;
atomic_dec(&mhba->fw_outstanding);
scmd->result = (DID_ABORT << 16);
- scmd->SCp.ptr = NULL;
+ mvumi_priv(scmd)->cmd_priv = NULL;
if (scsi_bufflen(scmd)) {
dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
.bios_param = mvumi_bios_param,
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
+ .cmd_size = sizeof(struct mvumi_cmd_priv),
};
static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
unsigned char cmd_status;
};
+struct mvumi_cmd_priv {
+ struct mvumi_cmd *cmd_priv;
+};
+
+static inline struct mvumi_cmd_priv *mvumi_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/*
* the function type of the in bound frame
*/
* Deal with DMA mapping/unmapping.
*/
-/* To keep track of the dma mapping (sg/single) that has been set */
-#define __data_mapped SCp.phase
-#define __data_mapping SCp.have_data_in
-
static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
{
- switch(cmd->__data_mapped) {
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
+
+ switch(cmd_priv->data_mapped) {
case 2:
scsi_dma_unmap(cmd);
break;
}
- cmd->__data_mapped = 0;
+ cmd_priv->data_mapped = 0;
}
static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
{
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
int use_sg;
use_sg = scsi_dma_map(cmd);
if (!use_sg)
return 0;
- cmd->__data_mapped = 2;
- cmd->__data_mapping = use_sg;
+ cmd_priv->data_mapped = 2;
+ cmd_priv->data_mapping = use_sg;
return use_sg;
}
static int ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd)
{
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
void (*done)(struct scsi_cmnd *) = scsi_done;
struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
unsigned long flags;
#endif
cmd->host_scribble = NULL;
- cmd->__data_mapped = 0;
- cmd->__data_mapping = 0;
+ cmd_priv->data_mapped = 0;
+ cmd_priv->data_mapping = 0;
spin_lock_irqsave(&np->smp_lock, flags);
u_long flags = 0;
int i;
+ WARN_ON_ONCE(tpnt->cmd_size < sizeof(struct ncr_cmd_priv));
+
if (!tpnt->name)
tpnt->name = SCSI_NCR_DRIVER_NAME;
if (!tpnt->shost_groups)
u8 differential;
};
+/* To keep track of the dma mapping (sg/single) that has been set */
+struct ncr_cmd_priv {
+ int data_mapped;
+ int data_mapping;
+};
+
extern struct Scsi_Host *ncr_attach(struct scsi_host_template *tpnt, int unit, struct ncr_device *device);
extern void ncr53c8xx_release(struct Scsi_Host *host);
irqreturn_t ncr53c8xx_intr(int irq, void *dev_id);
.eh_abort_handler = nsp32_eh_abort,
.eh_host_reset_handler = nsp32_eh_host_reset,
/* .highmem_io = 1, */
+ .cmd_size = sizeof(struct nsp32_cmd_priv),
};
#include "nsp32_io.h"
show_command(SCpnt);
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
+ nsp32_priv(SCpnt)->status = SAM_STAT_CHECK_CONDITION;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
- SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
- SCpnt->SCp.buffer = NULL;
- SCpnt->SCp.buffers_residual = 0;
-
/* initialize data */
data->msgout_len = 0;
data->msgin_len = 0;
case BUSPHASE_STATUS:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status");
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
break;
default:
/* MsgIn 00: Command Complete */
nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
- SCpnt->SCp.Status, scsi_get_resid(SCpnt));
+ nsp32_priv(SCpnt)->status, scsi_get_resid(SCpnt));
SCpnt->result = (DID_OK << 16) |
- (SCpnt->SCp.Status << 0);
+ (nsp32_priv(SCpnt)->status << 0);
nsp32_scsi_done(SCpnt);
/* All operation is done */
return TRUE;
} else if (execph & MSGIN_04_VALID) {
/* MsgIn 04: Disconnect */
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
return TRUE;
/* Unexpected bus free */
nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
- /* DID_ERROR? */
- //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Status << 0);
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return TRUE;
---PERIOD-- ---OFFSET-- */
#define TO_SYNCREG(period, offset) (((period) & 0x0f) << 4 | ((offset) & 0x0f))
+struct nsp32_cmd_priv {
+ enum sam_status status;
+};
+
+static inline struct nsp32_cmd_priv *nsp32_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
typedef struct _nsp32_target {
unsigned char syncreg; /* value for SYNCREG */
unsigned char ackwidth; /* value for ACKWIDTH */
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/ioport.h>
-#include <scsi/scsi.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <scsi/scsi_ioctl.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include "aha152x.h"
#include <pcmcia/cistpl.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <../drivers/scsi/scsi.h>
-#include <scsi/scsi_host.h>
-
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <pcmcia/cistpl.h>
module_param(free_ports, bool, 0);
MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))");
+static struct scsi_pointer *nsp_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static struct scsi_host_template nsp_driver_template = {
.proc_name = "nsp_cs",
.show_info = nsp_show_info,
.this_id = NSP_INITIATOR_ID,
.sg_tablesize = SG_ALL,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
scsi_done(SCpnt);
}
-static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt)
+static int nsp_queuecommand_lck(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
#ifdef NSP_DEBUG
/*unsigned int host_id = SCpnt->device->host->this_id;*/
/*unsigned int base = SCpnt->device->host->io_port;*/
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.have_data_in = IO_UNKNOWN;
- SCpnt->SCp.sent_command = 0;
- SCpnt->SCp.phase = PH_UNDETERMINED;
+ scsi_pointer->Status = SAM_STAT_CHECK_CONDITION;
+ scsi_pointer->Message = 0;
+ scsi_pointer->have_data_in = IO_UNKNOWN;
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->phase = PH_UNDETERMINED;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
/* setup scratch area
SCp.buffers_residual : left buffers in list
SCp.phase : current state of the command */
if (scsi_bufflen(SCpnt)) {
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
+ scsi_pointer->buffer = scsi_sglist(SCpnt);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1;
} else {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
- SCpnt->SCp.buffer = NULL;
- SCpnt->SCp.buffers_residual = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->buffers_residual = 0;
}
- if (nsphw_start_selection(SCpnt) == FALSE) {
+ if (!nsphw_start_selection(SCpnt)) {
nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail");
SCpnt->result = DID_BUS_BUSY << 16;
nsp_scsi_done(SCpnt);
/*
* setup PIO FIFO transfer mode and enable/disable to data out
*/
-static void nsp_setup_fifo(nsp_hw_data *data, int enabled)
+static void nsp_setup_fifo(nsp_hw_data *data, bool enabled)
{
unsigned int base = data->BaseAddress;
unsigned char transfer_mode_reg;
//nsp_dbg(NSP_DEBUG_DATA_IO, "enabled=%d", enabled);
- if (enabled != FALSE) {
+ if (enabled) {
transfer_mode_reg = TRANSFER_GO | BRAIND;
} else {
transfer_mode_reg = 0;
/*
* Initialize Ninja hardware
*/
-static int nsphw_init(nsp_hw_data *data)
+static void nsphw_init(nsp_hw_data *data)
{
unsigned int base = data->BaseAddress;
SCSI_RESET_IRQ_EI );
nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR);
- nsp_setup_fifo(data, FALSE);
-
- return TRUE;
+ nsp_setup_fifo(data, false);
}
/*
* Start selection phase
*/
-static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
+static bool nsphw_start_selection(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int host_id = SCpnt->device->host->this_id;
unsigned int base = SCpnt->device->host->io_port;
unsigned char target = scmd_id(SCpnt);
phase = nsp_index_read(base, SCSIBUSMON);
if(phase != BUSMON_BUS_FREE) {
//nsp_dbg(NSP_DEBUG_RESELECTION, "bus busy");
- return FALSE;
+ return false;
}
/* start arbitration */
//nsp_dbg(NSP_DEBUG_RESELECTION, "start arbit");
- SCpnt->SCp.phase = PH_ARBSTART;
+ scsi_pointer->phase = PH_ARBSTART;
nsp_index_write(base, SETARBIT, ARBIT_GO);
time_out = 1000;
if (!(arbit & ARBIT_WIN)) {
//nsp_dbg(NSP_DEBUG_RESELECTION, "arbit fail");
nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR);
- return FALSE;
+ return false;
}
/* assert select line */
//nsp_dbg(NSP_DEBUG_RESELECTION, "assert SEL line");
- SCpnt->SCp.phase = PH_SELSTART;
+ scsi_pointer->phase = PH_SELSTART;
udelay(3); /* wait 2.4us */
nsp_index_write(base, SCSIDATALATCH, BIT(host_id) | BIT(target));
nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_ATN);
nsp_start_timer(SCpnt, 1000/51);
data->SelectionTimeOut = 1;
- return TRUE;
+ return true;
}
struct nsp_sync_table {
sync->SyncRegister = 0;
sync->AckWidth = 0;
- return FALSE;
+ return false;
}
sync->SyncRegister = (sync_table->chip_period << SYNCREG_PERIOD_SHIFT) |
nsp_dbg(NSP_DEBUG_SYNC, "sync_reg=0x%x, ack_width=0x%x", sync->SyncRegister, sync->AckWidth);
- return TRUE;
+ return true;
}
/*
* transfer SCSI message
*/
-static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase)
+static int nsp_xfer(struct scsi_cmnd *const SCpnt, int phase)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
char *buf = data->MsgBuffer;
}
/* if last byte, negate ATN */
- if (len == 1 && SCpnt->SCp.phase == PH_MSG_OUT) {
+ if (len == 1 && scsi_pointer->phase == PH_MSG_OUT) {
nsp_index_write(base, SCSIBUSCTRL, AUTODIRECTION | ACKENB);
}
/*
* get extra SCSI data from fifo
*/
-static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt)
+static int nsp_dataphase_bypass(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
unsigned int count;
//nsp_dbg(NSP_DEBUG_DATA_IO, "in");
- if (SCpnt->SCp.have_data_in != IO_IN) {
+ if (scsi_pointer->have_data_in != IO_IN) {
return 0;
}
* data phase skip only occures in case of SCSI_LOW_READ
*/
nsp_dbg(NSP_DEBUG_DATA_IO, "use bypass quirk");
- SCpnt->SCp.phase = PH_DATA;
+ scsi_pointer->phase = PH_DATA;
nsp_pio_read(SCpnt);
- nsp_setup_fifo(data, FALSE);
+ nsp_setup_fifo(data, false);
return 0;
}
/*
* accept reselection
*/
-static int nsp_reselected(struct scsi_cmnd *SCpnt)
+static void nsp_reselected(struct scsi_cmnd *SCpnt)
{
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
bus_reg = nsp_index_read(base, SCSIBUSCTRL) & ~(SCSI_BSY | SCSI_ATN);
nsp_index_write(base, SCSIBUSCTRL, bus_reg);
nsp_index_write(base, SCSIBUSCTRL, bus_reg | AUTODIRECTION | ACKENB);
-
- return TRUE;
}
/*
/*
* read data in DATA IN phase
*/
-static void nsp_pio_read(struct scsi_cmnd *SCpnt)
+static void nsp_pio_read(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
unsigned long mmio_base = SCpnt->device->host->base;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
ocount = data->FifoCount;
nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d",
- SCpnt, scsi_get_resid(SCpnt), ocount, SCpnt->SCp.ptr,
- SCpnt->SCp.this_residual, SCpnt->SCp.buffer,
- SCpnt->SCp.buffers_residual);
+ SCpnt, scsi_get_resid(SCpnt), ocount, scsi_pointer->ptr,
+ scsi_pointer->this_residual, scsi_pointer->buffer,
+ scsi_pointer->buffers_residual);
time_out = 1000;
while ((time_out-- != 0) &&
- (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0 ) ) {
+ (scsi_pointer->this_residual > 0 ||
+ scsi_pointer->buffers_residual > 0)) {
stat = nsp_index_read(base, SCSIBUSMON);
stat &= BUSMON_PHASE_MASK;
res = nsp_fifo_count(SCpnt) - ocount;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount, res);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount, res);
if (res == 0) { /* if some data available ? */
if (stat == BUSPHASE_DATA_IN) { /* phase changed? */
- //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", SCpnt->SCp.this_residual);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", scsi_pointer->this_residual);
continue;
} else {
nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x", stat);
continue;
}
- res = min(res, SCpnt->SCp.this_residual);
+ res = min(res, scsi_pointer->this_residual);
switch (data->TransferMode) {
case MODE_IO32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_fifo32_read(base, SCpnt->SCp.ptr, res >> 2);
+ nsp_fifo32_read(base, scsi_pointer->ptr, res >> 2);
break;
case MODE_IO8:
- nsp_fifo8_read (base, SCpnt->SCp.ptr, res );
+ nsp_fifo8_read(base, scsi_pointer->ptr, res);
break;
case MODE_MEM32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_mmio_fifo32_read(mmio_base, SCpnt->SCp.ptr, res >> 2);
+ nsp_mmio_fifo32_read(mmio_base, scsi_pointer->ptr,
+ res >> 2);
break;
default:
}
nsp_inc_resid(SCpnt, -res);
- SCpnt->SCp.ptr += res;
- SCpnt->SCp.this_residual -= res;
+ scsi_pointer->ptr += res;
+ scsi_pointer->this_residual -= res;
ocount += res;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount);
/* go to next scatter list if available */
- if (SCpnt->SCp.this_residual == 0 &&
- SCpnt->SCp.buffers_residual != 0 ) {
+ if (scsi_pointer->this_residual == 0 &&
+ scsi_pointer->buffers_residual != 0 ) {
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out);
- SCpnt->SCp.buffers_residual--;
- SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ scsi_pointer->buffers_residual--;
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
time_out = 1000;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", SCpnt->SCp.buffer->page, SCpnt->SCp.buffer->offset);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", scsi_pointer->buffer->page, scsi_pointer->buffer->offset);
}
}
if (time_out < 0) {
nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
- scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
- SCpnt->SCp.buffers_residual);
+ scsi_get_resid(SCpnt), scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual);
}
nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount);
nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId,
*/
static void nsp_pio_write(struct scsi_cmnd *SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
unsigned long mmio_base = SCpnt->device->host->base;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
ocount = data->FifoCount;
nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x",
- data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual,
- SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual,
+ data->FifoCount, scsi_pointer->ptr, scsi_pointer->this_residual,
+ scsi_pointer->buffer, scsi_pointer->buffers_residual,
scsi_get_resid(SCpnt));
time_out = 1000;
while ((time_out-- != 0) &&
- (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0)) {
+ (scsi_pointer->this_residual > 0 ||
+ scsi_pointer->buffers_residual > 0)) {
stat = nsp_index_read(base, SCSIBUSMON);
stat &= BUSMON_PHASE_MASK;
nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res);
/* Put back pointer */
nsp_inc_resid(SCpnt, res);
- SCpnt->SCp.ptr -= res;
- SCpnt->SCp.this_residual += res;
- ocount -= res;
+ scsi_pointer->ptr -= res;
+ scsi_pointer->this_residual += res;
+ ocount -= res;
break;
}
continue;
}
- res = min(SCpnt->SCp.this_residual, WFIFO_CRIT);
+ res = min(scsi_pointer->this_residual, WFIFO_CRIT);
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, res);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, res);
switch (data->TransferMode) {
case MODE_IO32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_fifo32_write(base, SCpnt->SCp.ptr, res >> 2);
+ nsp_fifo32_write(base, scsi_pointer->ptr, res >> 2);
break;
case MODE_IO8:
- nsp_fifo8_write (base, SCpnt->SCp.ptr, res );
+ nsp_fifo8_write(base, scsi_pointer->ptr, res);
break;
case MODE_MEM32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_mmio_fifo32_write(mmio_base, SCpnt->SCp.ptr, res >> 2);
+ nsp_mmio_fifo32_write(mmio_base, scsi_pointer->ptr,
+ res >> 2);
break;
default:
}
nsp_inc_resid(SCpnt, -res);
- SCpnt->SCp.ptr += res;
- SCpnt->SCp.this_residual -= res;
- ocount += res;
+ scsi_pointer->ptr += res;
+ scsi_pointer->this_residual -= res;
+ ocount += res;
/* go to next scatter list if available */
- if (SCpnt->SCp.this_residual == 0 &&
- SCpnt->SCp.buffers_residual != 0 ) {
+ if (scsi_pointer->this_residual == 0 &&
+ scsi_pointer->buffers_residual != 0 ) {
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next");
- SCpnt->SCp.buffers_residual--;
- SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ scsi_pointer->buffers_residual--;
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
time_out = 1000;
}
}
}
/* setup pdma fifo */
- nsp_setup_fifo(data, TRUE);
+ nsp_setup_fifo(data, true);
/* clear ack counter */
data->FifoCount = 0;
unsigned int base;
unsigned char irq_status, irq_phase, phase;
struct scsi_cmnd *tmpSC;
+ struct scsi_pointer *scsi_pointer;
unsigned char target, lun;
unsigned int *sync_neg;
int i, tmp;
if(data->CurrentSC != NULL) {
tmpSC = data->CurrentSC;
- tmpSC->result = (DID_RESET << 16) |
- ((tmpSC->SCp.Message & 0xff) << 8) |
- ((tmpSC->SCp.Status & 0xff) << 0);
+ scsi_pointer = nsp_priv(tmpSC);
+ tmpSC->result = (DID_RESET << 16) |
+ ((scsi_pointer->Message & 0xff) << 8) |
+ ((scsi_pointer->Status & 0xff) << 0);
nsp_scsi_done(tmpSC);
}
return IRQ_HANDLED;
}
tmpSC = data->CurrentSC;
+ scsi_pointer = nsp_priv(tmpSC);
target = tmpSC->device->id;
lun = tmpSC->device->lun;
sync_neg = &(data->Sync[target].SyncNegotiation);
if (irq_phase & RESELECT_IRQ) {
nsp_dbg(NSP_DEBUG_INTR, "reselect");
nsp_write(base, IRQCONTROL, IRQCONTROL_RESELECT_CLEAR);
- if (nsp_reselected(tmpSC) != FALSE) {
- return IRQ_HANDLED;
- }
+ nsp_reselected(tmpSC);
+ return IRQ_HANDLED;
}
if ((irq_phase & (PHASE_CHANGE_IRQ | LATCHED_BUS_FREE)) == 0) {
//show_phase(tmpSC);
- switch(tmpSC->SCp.phase) {
+ switch (scsi_pointer->phase) {
case PH_SELSTART:
// *sync_neg = SYNC_NOT_YET;
if ((phase & BUSMON_BSY) == 0) {
/* attention assert */
//nsp_dbg(NSP_DEBUG_INTR, "attention assert");
data->SelectionTimeOut = 0;
- tmpSC->SCp.phase = PH_SELECTED;
+ scsi_pointer->phase = PH_SELECTED;
nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN);
udelay(1);
nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB);
//nsp_dbg(NSP_DEBUG_INTR, "start scsi seq");
/* normal disconnect */
- if (((tmpSC->SCp.phase == PH_MSG_IN) || (tmpSC->SCp.phase == PH_MSG_OUT)) &&
- (irq_phase & LATCHED_BUS_FREE) != 0 ) {
+ if ((scsi_pointer->phase == PH_MSG_IN ||
+ scsi_pointer->phase == PH_MSG_OUT) &&
+ (irq_phase & LATCHED_BUS_FREE) != 0) {
nsp_dbg(NSP_DEBUG_INTR, "normal disconnect irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase);
//*sync_neg = SYNC_NOT_YET;
/* all command complete and return status */
- if (tmpSC->SCp.Message == COMMAND_COMPLETE) {
- tmpSC->result = (DID_OK << 16) |
- ((tmpSC->SCp.Message & 0xff) << 8) |
- ((tmpSC->SCp.Status & 0xff) << 0);
+ if (scsi_pointer->Message == COMMAND_COMPLETE) {
+ tmpSC->result = (DID_OK << 16) |
+ ((scsi_pointer->Message & 0xff) << 8) |
+ ((scsi_pointer->Status & 0xff) << 0);
nsp_dbg(NSP_DEBUG_INTR, "command complete result=0x%x", tmpSC->result);
nsp_scsi_done(tmpSC);
return IRQ_HANDLED;
}
- tmpSC->SCp.phase = PH_COMMAND;
+ scsi_pointer->phase = PH_COMMAND;
nsp_nexus(tmpSC);
case BUSPHASE_DATA_OUT:
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_OUT");
- tmpSC->SCp.phase = PH_DATA;
- tmpSC->SCp.have_data_in = IO_OUT;
+ scsi_pointer->phase = PH_DATA;
+ scsi_pointer->have_data_in = IO_OUT;
nsp_pio_write(tmpSC);
case BUSPHASE_DATA_IN:
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_IN");
- tmpSC->SCp.phase = PH_DATA;
- tmpSC->SCp.have_data_in = IO_IN;
+ scsi_pointer->phase = PH_DATA;
+ scsi_pointer->have_data_in = IO_IN;
nsp_pio_read(tmpSC);
nsp_dataphase_bypass(tmpSC);
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_STATUS");
- tmpSC->SCp.phase = PH_STATUS;
+ scsi_pointer->phase = PH_STATUS;
- tmpSC->SCp.Status = nsp_index_read(base, SCSIDATAWITHACK);
- nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x", tmpSC->SCp.Message, tmpSC->SCp.Status);
+ scsi_pointer->Status = nsp_index_read(base, SCSIDATAWITHACK);
+ nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x",
+ scsi_pointer->Message, scsi_pointer->Status);
break;
goto timer_out;
}
- tmpSC->SCp.phase = PH_MSG_OUT;
+ scsi_pointer->phase = PH_MSG_OUT;
//*sync_neg = SYNC_NOT_YET;
data->MsgLen = i = 0;
- data->MsgBuffer[i] = IDENTIFY(TRUE, lun); i++;
+ data->MsgBuffer[i] = IDENTIFY(true, lun); i++;
if (*sync_neg == SYNC_NOT_YET) {
data->Sync[target].SyncPeriod = 0;
goto timer_out;
}
- tmpSC->SCp.phase = PH_MSG_IN;
+ scsi_pointer->phase = PH_MSG_IN;
nsp_message_in(tmpSC);
/**/
i += (1 + data->MsgBuffer[i+1]);
}
}
- tmpSC->SCp.Message = tmp;
+ scsi_pointer->Message = tmp;
- nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d", tmpSC->SCp.Message, data->MsgLen);
+ nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d",
+ scsi_pointer->Message, data->MsgLen);
show_message(data);
break;
nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d",
data->BaseAddress, data->NumAddress, data->IrqNumber);
- if(nsphw_init(data) == FALSE) {
- goto cs_failed;
- }
+ nsphw_init(data);
host = nsp_detect(&nsp_driver_template);
static int nsp_bus_reset (nsp_hw_data *data);
/* */
-static int nsphw_init (nsp_hw_data *data);
-static int nsphw_start_selection(struct scsi_cmnd *SCpnt);
+static void nsphw_init (nsp_hw_data *data);
+static bool nsphw_start_selection(struct scsi_cmnd *SCpnt);
static void nsp_start_timer (struct scsi_cmnd *SCpnt, int time);
static int nsp_fifo_count (struct scsi_cmnd *SCpnt);
static void nsp_pio_read (struct scsi_cmnd *SCpnt);
unsigned char mask);
static int nsp_xfer (struct scsi_cmnd *SCpnt, int phase);
static int nsp_dataphase_bypass (struct scsi_cmnd *SCpnt);
-static int nsp_reselected (struct scsi_cmnd *SCpnt);
+static void nsp_reselected (struct scsi_cmnd *SCpnt);
static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht);
/* Interrupt handler */
};
/* scatter-gather table */
-# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
+#define BUFFER_ADDR(SCpnt) ((char *)(sg_virt(nsp_priv(SCpnt)->buffer)))
#endif /*__nsp_cs__*/
/* end */
static void show_phase(struct scsi_cmnd *SCpnt)
{
- int i = SCpnt->SCp.phase;
+ int i = nsp_scsi_pointer(SCpnt)->phase;
char *ph[] = {
"PH_UNDETERMINED",
#include <linux/string.h>
#include <linux/ioport.h>
#include <asm/io.h>
-#include <scsi/scsi.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <scsi/scsi_ioctl.h>
#include <linux/interrupt.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include "../qlogicfas408.h"
#include <pcmcia/cistpl.h>
int fast_pio;
};
+static struct scsi_pointer *sym53c500_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
enum Phase {
idle,
data_out,
struct sym53c500_data *data =
(struct sym53c500_data *)dev->hostdata;
struct scsi_cmnd *curSC = data->current_SC;
+ struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(curSC);
int fast_pio = data->fast_pio;
spin_lock_irqsave(dev->host_lock, flags);
if (int_reg & 0x20) { /* Disconnect */
DEB(printk("SYM53C500: disconnect intr received\n"));
- if (curSC->SCp.phase != message_in) { /* Unexpected disconnect */
+ if (scsi_pointer->phase != message_in) { /* Unexpected disconnect */
curSC->result = DID_NO_CONNECT << 16;
} else { /* Command complete, return status and message */
- curSC->result = (curSC->SCp.Status & 0xff)
- | ((curSC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ curSC->result = (scsi_pointer->Status & 0xff) |
+ ((scsi_pointer->Message & 0xff) << 8) |
+ (DID_OK << 16);
}
goto idle_out;
}
struct scatterlist *sg;
int i;
- curSC->SCp.phase = data_out;
+ scsi_pointer->phase = data_out;
VDEB(printk("SYM53C500: Data-Out phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
struct scatterlist *sg;
int i;
- curSC->SCp.phase = data_in;
+ scsi_pointer->phase = data_in;
VDEB(printk("SYM53C500: Data-In phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
break;
case 0x02: /* COMMAND */
- curSC->SCp.phase = command_ph;
+ scsi_pointer->phase = command_ph;
printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
break;
case 0x03: /* STATUS */
- curSC->SCp.phase = status_ph;
+ scsi_pointer->phase = status_ph;
VDEB(printk("SYM53C500: Status phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
case 0x06: /* MESSAGE-OUT */
DEB(printk("SYM53C500: Message-Out phase\n"));
- curSC->SCp.phase = message_out;
+ scsi_pointer->phase = message_out;
outb(SET_ATN, port_base + CMD_REG); /* Reject the message */
outb(MSG_ACCEPT, port_base + CMD_REG);
break;
case 0x07: /* MESSAGE-IN */
VDEB(printk("SYM53C500: Message-In phase\n"));
- curSC->SCp.phase = message_in;
+ scsi_pointer->phase = message_in;
- curSC->SCp.Status = inb(port_base + SCSI_FIFO);
- curSC->SCp.Message = inb(port_base + SCSI_FIFO);
+ scsi_pointer->Status = inb(port_base + SCSI_FIFO);
+ scsi_pointer->Message = inb(port_base + SCSI_FIFO);
VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
- DEB(printk("Status = %02x Message = %02x\n", curSC->SCp.Status, curSC->SCp.Message));
+ DEB(printk("Status = %02x Message = %02x\n",
+ scsi_pointer->Status, scsi_pointer->Message));
- if (curSC->SCp.Message == SAVE_POINTERS || curSC->SCp.Message == DISCONNECT) {
+ if (scsi_pointer->Message == SAVE_POINTERS ||
+ scsi_pointer->Message == DISCONNECT) {
outb(SET_ATN, port_base + CMD_REG); /* Reject message */
DEB(printk("Discarding SAVE_POINTERS message\n"));
}
return IRQ_HANDLED;
idle_out:
- curSC->SCp.phase = idle;
+ scsi_pointer->phase = idle;
scsi_done(curSC);
goto out;
}
static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
{
+ struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(SCpnt);
int i;
int port_base = SCpnt->device->host->io_port;
struct sym53c500_data *data =
VDEB(printk("\n"));
data->current_SC = SCpnt;
- data->current_SC->SCp.phase = command_ph;
- data->current_SC->SCp.Status = 0;
- data->current_SC->SCp.Message = 0;
+ scsi_pointer->phase = command_ph;
+ scsi_pointer->Status = 0;
+ scsi_pointer->Message = 0;
/* We are locked here already by the mid layer */
REG0(port_base);
.can_queue = 1,
.this_id = 7,
.sg_tablesize = 32,
- .shost_groups = SYM53C500_shost_groups
+ .shost_groups = SYM53C500_shost_groups,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
DECLARE_COMPLETION_ONSTACK(completion);
u8 *ioctlbuffer;
struct fw_control_info *fwControl;
- u32 partitionSize, partitionSizeTmp;
+ __be32 partitionSizeTmp;
+ u32 partitionSize;
u32 loopNumber, loopcount;
struct pm8001_fw_image_header *image_hdr;
u32 sizeRead = 0;
u32 ret = 0;
u32 length = 1024 * 16 + sizeof(*payload) - 1;
+ u32 fc_len;
+ u8 *read_buf;
if (pm8001_ha->fw_image->size < 28) {
pm8001_ha->fw_status = FAIL_FILE_SIZE;
image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
while (sizeRead < pm8001_ha->fw_image->size) {
partitionSizeTmp =
- *(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
+ *(__be32 *)((u8 *)&image_hdr->image_length + sizeRead);
partitionSize = be32_to_cpu(partitionSizeTmp);
loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN,
IOCTL_BUF_SIZE);
fwControl->retcode = 0;/* OUT */
fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */
- /* for the last chunk of data in case file size is not even with
- 4k, load only the rest*/
- if (((loopcount-loopNumber) == 1) &&
- ((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) {
- fwControl->len =
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
- memcpy((u8 *)fwControl->buffer,
- (u8 *)pm8001_ha->fw_image->data + sizeRead,
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE);
- sizeRead +=
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
- } else {
- memcpy((u8 *)fwControl->buffer,
- (u8 *)pm8001_ha->fw_image->data + sizeRead,
- IOCTL_BUF_SIZE);
- sizeRead += IOCTL_BUF_SIZE;
- }
-
- pm8001_ha->nvmd_completion = &completion;
- ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
- if (ret) {
- pm8001_ha->fw_status = FAIL_OUT_MEMORY;
- goto out;
- }
- wait_for_completion(&completion);
- if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
- pm8001_ha->fw_status = fwControl->retcode;
- ret = -EFAULT;
- goto out;
- }
+ /*
+ * for the last chunk of data in case file size is
+ * not even with 4k, load only the rest
+ */
+
+ read_buf = (u8 *)pm8001_ha->fw_image->data + sizeRead;
+ fc_len = (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
+
+ if (loopcount - loopNumber == 1 && fc_len) {
+ fwControl->len = fc_len;
+ memcpy((u8 *)fwControl->buffer, read_buf, fc_len);
+ sizeRead += fc_len;
+ } else {
+ memcpy((u8 *)fwControl->buffer, read_buf, IOCTL_BUF_SIZE);
+ sizeRead += IOCTL_BUF_SIZE;
+ }
+
+ pm8001_ha->nvmd_completion = &completion;
+ ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
+ if (ret) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ goto out;
+ }
+ wait_for_completion(&completion);
+ if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
+ pm8001_ha->fw_status = fwControl->retcode;
+ ret = -EFAULT;
+ goto out;
+ }
}
}
out:
* pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
* FW to tell the fw to get this message from IOMB.
* @pm8001_ha: our hba card information
- * @circularQ: the inbound queue we want to transfer to HBA.
+ * @q_index: the index in the inbound queue we want to transfer to HBA.
* @opCode: the operation code represents commands which LLDD and fw recognized.
* @payload: the command payload of each operation command.
* @nb: size in bytes of the command payload
* @responseQueue: queue to interrupt on w/ command response (if any)
*/
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
- struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, size_t nb,
+ u32 q_index, u32 opCode, void *payload, size_t nb,
u32 responseQueue)
{
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage;
unsigned long flags;
- int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
+ struct inbound_queue_table *circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
int rv;
u32 htag = le32_to_cpu(*(__le32 *)payload);
case IO_XFER_ERROR_BREAK:
{ /* This one stashes the sas_task instead */
struct sas_task *t = (struct sas_task *)pm8001_dev;
- u32 tag;
struct pm8001_ccb_info *ccb;
struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
unsigned long flags, flags1;
/* Search for a possible ccb that matches the task */
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
- tag = ccb->ccb_tag;
- if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ if ((ccb->ccb_tag != PM8001_INVALID_TAG) &&
+ (ccb->task == t))
break;
}
if (!ccb) {
atomic_dec(&pm8001_dev->running_req);
spin_lock_irqsave(&t->task_state_lock, flags1);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, pw->handler, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
case IO_XFER_OPEN_RETRY_TIMEOUT:
{ /* This one stashes the sas_task instead */
struct sas_task *t = (struct sas_task *)pm8001_dev;
- u32 tag;
struct pm8001_ccb_info *ccb;
struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
unsigned long flags, flags1;
/* Search for a possible ccb that matches the task */
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
- tag = ccb->ccb_tag;
- if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ if ((ccb->ccb_tag != PM8001_INVALID_TAG) &&
+ (ccb->task == t))
break;
}
if (!ccb) {
struct task_status_struct *ts;
struct sas_task *task;
int i;
- u32 tag, device_id;
+ u32 device_id;
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
task = ccb->task;
ts = &task->task_status;
- tag = ccb->ccb_tag;
- /* check if tag is NULL */
- if (!tag) {
- pm8001_dbg(pm8001_ha, FAIL,
- "tag Null\n");
- continue;
- }
+
if (task != NULL) {
dev = task->dev;
if (!dev) {
continue;
}
/*complete sas task and update to top layer */
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
ts->resp = SAS_TASK_COMPLETE;
task->task_done(task);
- } else if (tag != 0xFFFFFFFF) {
+ } else if (ccb->ccb_tag != PM8001_INVALID_TAG) {
/* complete the internal commands/non-sas task */
pm8001_dev = ccb->device;
if (pm8001_dev->dcompletion) {
pm8001_dev->dcompletion = NULL;
}
complete(pm8001_ha->nvmd_completion);
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
}
/* Deregister all the device ids */
static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev)
{
- int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
+ struct sas_task *task;
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_ABORT;
int ret;
- if (!pm8001_ha_dev) {
- pm8001_dbg(pm8001_ha, FAIL, "dev is null\n");
- return;
- }
+ pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG;
+ pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res)
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
return;
-
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
-
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ }
memset(&task_abort, 0, sizeof(task_abort));
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- task_abort.tag = cpu_to_le32(ccb_tag);
-
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
- if (ret)
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ task_abort.tag = cpu_to_le32(ccb->ccb_tag);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
+ if (ret) {
+ sas_free_task(task);
+ pm8001_ccb_free(pm8001_ha, ccb);
+ }
}
static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
{
struct sata_start_req sata_cmd;
int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
struct sas_task *task = NULL;
struct host_to_dev_fis fis;
struct domain_device *dev;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
- sas_free_task(task);
- pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n");
- return;
- }
-
- /* allocate domain device by ourselves as libsas
- * is not going to provide any
- */
+ /*
+ * Allocate domain device by ourselves as libsas is not going to
+ * provide any.
+ */
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
pm8001_dbg(pm8001_ha, FAIL,
"Domain device cannot be allocated\n");
return;
task->dev = dev;
task->dev->lldd_dev = pm8001_ha_dev;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
+ kfree(dev);
+ return;
+ }
+
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
- memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
-
/* construct read log FIS */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.fis_type = 0x27;
fis.lbal = 0x10;
fis.sector_count = 0x1;
- sata_cmd.tag = cpu_to_le32(ccb_tag);
+ memset(&sata_cmd, 0, sizeof(sata_cmd));
+ sata_cmd.tag = cpu_to_le32(ccb->ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+ sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
if (res) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
kfree(dev);
}
}
psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
- if (!tag) {
- pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
- return;
- }
-
ccb = &pm8001_ha->ccb_info[tag];
t = ccb->task;
pm8001_dev = ccb->device;
ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
- (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
- /* set new bit for abort_all */
- pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
- /* clear bit for read log */
- pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+ (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
pm8001_send_abort_all(pm8001_ha, pm8001_dev);
/* Free the tag */
pm8001_tag_free(pm8001_ha, tag);
len = sizeof(struct pio_setup_fis);
pm8001_dbg(pm8001_ha, IO,
"PIO read len = %d\n", len);
- } else if (t->ata_task.use_ncq) {
+ } else if (t->ata_task.use_ncq &&
+ t->data_dir != DMA_NONE) {
len = sizeof(struct set_dev_bits_fis);
pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
len);
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
u32 device_id = le32_to_cpu(pPayload->device_id);
u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
- pm8001_dbg(pm8001_ha, MSG, "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n",
+
+ pm8001_dbg(pm8001_ha, MSG,
+ "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n",
device_id, pds, nds, status);
complete(pm8001_dev->setds_completion);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 tag = le32_to_cpu(pPayload->tag);
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
+
complete(pm8001_ha->nvmd_completion);
pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
if ((dlen_status & NVMD_STAT) != 0) {
pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n",
dlen_status);
}
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
void
/* We should free tag during failure also, the tag is not being
* freed by requesting path anywhere.
*/
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
return;
}
if (ir_tds_bn_dps_das_nvm & IPMode) {
*/
complete(pm8001_ha->nvmd_completion);
pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n");
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct hw_event_ack_req payload;
u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
- struct inbound_queue_table *circularQ;
-
memset((u8 *)&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
payload.tag = cpu_to_le32(1);
payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload, sizeof(payload), 0);
}
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
break;
}
complete(pm8001_dev->dcompletion);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, htag);
+ pm8001_ccb_free(pm8001_ha, ccb);
return 0;
}
(struct fw_flash_Update_resp *)(piomb + 4);
u32 tag = le32_to_cpu(ppayload->tag);
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+
status = le32_to_cpu(ppayload->status);
switch (status) {
case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
break;
}
kfree(ccb->fw_control_context);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
complete(pm8001_ha->nvmd_completion);
return 0;
}
status = le32_to_cpu(pPayload->status);
tag = le32_to_cpu(pPayload->tag);
- if (!tag) {
- pm8001_dbg(pm8001_ha, FAIL, " TAG NULL. RETURNING !!!\n");
- return -1;
- }
scp = le32_to_cpu(pPayload->scp);
ccb = &pm8001_ha->ccb_info[tag];
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();
if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
- pm8001_tag_free(pm8001_ha, tag);
sas_free_task(t);
- /* clear the flag */
- pm8001_dev->id &= 0xBFFFFFFF;
- } else
+ pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG;
+ } else {
t->task_done(t);
+ }
return 0;
}
u32 req_len, resp_len;
struct smp_req smp_cmd;
u32 opc;
- struct inbound_queue_table *circularQ;
memset(&smp_cmd, 0, sizeof(smp_cmd));
/*
}
opc = OPC_INB_SMP_REQUEST;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
smp_cmd.long_smp_req.long_resp_size =
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &smp_cmd, sizeof(smp_cmd), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc,
+ &smp_cmd, sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
- int ret;
u64 phys_addr;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
- sizeof(ssp_cmd), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &ssp_cmd,
+ sizeof(ssp_cmd), 0);
}
static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
u32 tag = ccb->ccb_tag;
- int ret;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr;
u32 ATAP = 0x0;
u32 dir;
- struct inbound_queue_table *circularQ;
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- if (task->data_dir == DMA_NONE) {
+
+ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
- if (task->ata_task.dma_xfer) {
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
+ } else if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
- if (task->ata_task.use_ncq &&
- dev->sata_dev.class != ATA_DEV_ATAPI) {
- ATAP = 0x07; /* FPDMA */
- pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
- }
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags &
SAS_TASK_STATE_ABORTED))) {
"task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
task, ts->resp,
ts->stat);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free_done(pm8001_ha, task,
- ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return 0;
}
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
- return ret;
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
}
/**
pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTART;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
/*
memcpy(payload.sas_identify.sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/**
u8 phy_id)
{
struct phy_stop_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTOP;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/*
struct reg_dev_req payload;
u32 opc;
u32 stp_sspsmp_sata = 0x4;
- struct inbound_queue_table *circularQ;
u32 linkrate, phy_id;
- int rc, tag = 0xdeadbeef;
+ int rc;
struct pm8001_ccb_info *ccb;
u8 retryFlag = 0x1;
u16 firstBurstSize = 0;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return rc;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = tag;
- payload.tag = cpu_to_le32(tag);
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
if (flag == 1)
stp_sspsmp_sata = 0x02; /*direct attached sata */
else {
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_ccb_free(pm8001_ha, ccb);
+
return rc;
}
{
struct dereg_dev_req payload;
u32 opc = OPC_INB_DEREG_DEV_HANDLE;
- int ret;
- struct inbound_queue_table *circularQ;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n",
device_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
/**
u32 phyId, u32 phy_op)
{
struct local_phy_ctl_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+
memset(&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
}
static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
- u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag)
+ u32 dev_id, enum sas_internal_abort type, u32 task_tag, u32 cmd_tag)
{
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
- int ret;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&task_abort, 0, sizeof(task_abort));
- if (ABORT_SINGLE == (flag & ABORT_MASK)) {
+ if (type == SAS_INTERNAL_ABORT_SINGLE) {
task_abort.abort_all = 0;
task_abort.device_id = cpu_to_le32(dev_id);
task_abort.tag_to_abort = cpu_to_le32(task_tag);
- task_abort.tag = cpu_to_le32(cmd_tag);
- } else if (ABORT_ALL == (flag & ABORT_MASK)) {
+ } else if (type == SAS_INTERNAL_ABORT_DEV) {
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(dev_id);
- task_abort.tag = cpu_to_le32(cmd_tag);
+ } else {
+ pm8001_dbg(pm8001_ha, EH, "unknown type (%d)\n", type);
+ return -EIO;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
- return ret;
+
+ task_abort.tag = cpu_to_le32(cmd_tag);
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
}
/*
* pm8001_chip_abort_task - SAS abort task when error or exception happened.
*/
int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
+ struct pm8001_ccb_info *ccb)
{
- u32 opc, device_id;
+ struct sas_task *task = ccb->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
+ struct pm8001_device *pm8001_dev = ccb->device;
int rc = TMF_RESP_FUNC_FAILED;
+ u32 opc, device_id;
+
pm8001_dbg(pm8001_ha, EH, "cmd_tag = %x, abort task tag = 0x%x\n",
- cmd_tag, task_tag);
+ ccb->ccb_tag, abort->tag);
if (pm8001_dev->dev_type == SAS_END_DEVICE)
opc = OPC_INB_SSP_ABORT;
else if (pm8001_dev->dev_type == SAS_SATA_DEV)
else
opc = OPC_INB_SMP_ABORT;/* SMP */
device_id = pm8001_dev->device_id;
- rc = send_task_abort(pm8001_ha, opc, device_id, flag,
- task_tag, cmd_tag);
+ rc = send_task_abort(pm8001_ha, opc, device_id, abort->type,
+ abort->tag, ccb->ccb_tag);
if (rc != TMF_RESP_FUNC_COMPLETE)
pm8001_dbg(pm8001_ha, EH, "rc= %d\n", rc);
return rc;
* @tmf: task management function.
*/
int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
{
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
u32 opc = OPC_INB_SSPINITMSTART;
- struct inbound_queue_table *circularQ;
struct ssp_ini_tm_start_req sspTMCmd;
- int ret;
memset(&sspTMCmd, 0, sizeof(sspTMCmd));
sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
- sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
+ sspTMCmd.relate_tag = cpu_to_le32((u32)tmf->tag_of_task_to_be_managed);
sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
if (pm8001_ha->chip_id != chip_8001)
- sspTMCmd.ds_ads_m = 0x08;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
- sizeof(sspTMCmd), 0);
- return ret;
+ sspTMCmd.ds_ads_m = cpu_to_le32(0x08);
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sspTMCmd,
+ sizeof(sspTMCmd), 0);
}
int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_GET_NVMD_DATA;
u32 nvmd_type;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
- struct inbound_queue_table *circularQ;
struct get_nvm_data_req nvmd_req;
struct fw_control_ex *fw_control_context;
struct pm8001_ioctl_payload *ioctl_payload = payload;
return -ENOMEM;
fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
fw_control_context->len = ioctl_payload->rd_length;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&nvmd_req, 0, sizeof(nvmd_req));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return rc;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
ccb->fw_control_context = fw_control_context;
- nvmd_req.tag = cpu_to_le32(tag);
+
+ nvmd_req.tag = cpu_to_le32(ccb->ccb_tag);
switch (nvmd_type) {
case TWI_DEVICE: {
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
- sizeof(nvmd_req), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
return rc;
}
u32 opc = OPC_INB_SET_NVMD_DATA;
u32 nvmd_type;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
- struct inbound_queue_table *circularQ;
struct set_nvm_data_req nvmd_req;
struct fw_control_ex *fw_control_context;
struct pm8001_ioctl_payload *ioctl_payload = payload;
fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
if (!fw_control_context)
return -ENOMEM;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
&ioctl_payload->func_specific,
ioctl_payload->wr_length);
memset(&nvmd_req, 0, sizeof(nvmd_req));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return -EBUSY;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
- ccb->ccb_tag = tag;
- nvmd_req.tag = cpu_to_le32(tag);
+
+ nvmd_req.tag = cpu_to_le32(ccb->ccb_tag);
switch (nvmd_type) {
case TWI_DEVICE: {
u32 twi_addr, twi_page_size;
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req,
sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
return rc;
}
{
struct fw_flash_Update_req payload;
struct fw_flash_updata_info *info;
- struct inbound_queue_table *circularQ;
- int ret;
u32 opc = OPC_INB_FW_FLASH_UPDATE;
memset(&payload, 0, sizeof(struct fw_flash_Update_req));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
info = fw_flash_updata_info;
payload.tag = cpu_to_le32(tag);
payload.cur_image_len = cpu_to_le32(info->cur_image_len);
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
int
struct fw_control_info *fw_control;
struct fw_control_ex *fw_control_context;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
fw_control_context->virtAddr = buffer;
fw_control_context->phys_addr = phys_addr;
fw_control_context->len = fw_control->len;
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return -EBUSY;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
- ccb->ccb_tag = tag;
+
rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
- tag);
+ ccb->ccb_tag);
+ if (rc) {
+ kfree(fw_control_context);
+ pm8001_ccb_free(pm8001_ha, ccb);
+ }
+
return rc;
}
struct pm8001_device *pm8001_dev, u32 state)
{
struct set_dev_state_req payload;
- struct inbound_queue_table *circularQ;
struct pm8001_ccb_info *ccb;
int rc;
- u32 tag;
u32 opc = OPC_INB_SET_DEVICE_STATE;
+
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return -1;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
- ccb->device = pm8001_dev;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = cpu_to_le32(tag);
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
payload.device_id = cpu_to_le32(pm8001_dev->device_id);
payload.nds = cpu_to_le32(state);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return rc;
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_ccb_free(pm8001_ha, ccb);
+
+ return rc;
}
static int
pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
{
struct sas_re_initialization_req payload;
- struct inbound_queue_table *circularQ;
struct pm8001_ccb_info *ccb;
int rc;
- u32 tag;
u32 opc = OPC_INB_SAS_RE_INITIALIZE;
+
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return -ENOMEM;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = cpu_to_le32(tag);
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
payload.SSAHOLT = cpu_to_le32(0xd << 25);
payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
if (rc)
- pm8001_tag_free(pm8001_ha, tag);
- return rc;
+ pm8001_ccb_free(pm8001_ha, ccb);
+ return rc;
}
const struct pm8001_dispatch pm8001_8001_dispatch = {
u32 reserved[11];
} __attribute__((packed, aligned(4)));
-/* These flags used for SSP SMP & SATA Abort */
-#define ABORT_MASK 0x3
-#define ABORT_SINGLE 0x0
-#define ABORT_ALL 0x1
-
/**
* brief the data structure of SSP SATA SMP Abort Response
* use to describe SSP SMP & SATA Abort Response ( 64 bytes)
.lldd_control_phy = pm8001_phy_control,
.lldd_abort_task = pm8001_abort_task,
- .lldd_abort_task_set = pm8001_abort_task_set,
- .lldd_clear_aca = pm8001_clear_aca,
+ .lldd_abort_task_set = sas_abort_task_set,
.lldd_clear_task_set = pm8001_clear_task_set,
.lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
.lldd_lu_reset = pm8001_lu_reset,
.lldd_query_task = pm8001_query_task,
.lldd_port_formed = pm8001_port_formed,
+ .lldd_tmf_exec_complete = pm8001_setds_completion,
+ .lldd_tmf_aborted = pm8001_tmf_aborted,
};
/**
goto err_out;
}
pm8001_ha->ccb_info[i].task = NULL;
- pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
+ pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG;
pm8001_ha->ccb_info[i].device = NULL;
++pm8001_ha->tags_num;
}
+
return 0;
err_out_noccb:
struct pm8001_hba_info *pm8001_ha;
int rc;
u8 i = 0, j;
- u32 device_state;
DECLARE_COMPLETION_ONSTACK(completion);
+
pm8001_ha = sha->lldd_ha;
- device_state = pdev->current_state;
- pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n",
- pdev, pm8001_ha->name, device_state);
+ pm8001_info(pm8001_ha,
+ "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n",
+ pdev, pm8001_ha->name, pdev->current_state);
rc = pci_go_44(pdev);
if (rc)
* @pm8001_ha: our hba struct
* @tag_out: the found empty tag .
*/
-inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
+int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
{
unsigned int tag;
void *bitmap = pm8001_ha->tags;
}
{
struct sas_phy *phy = sas_phy->phy;
- uint32_t *qp = (uint32_t *)(((char *)
- pm8001_ha->io_mem[2].memvirtaddr)
- + 0x1034 + (0x4000 * (phy_id & 3)));
-
- phy->invalid_dword_count = qp[0];
- phy->running_disparity_error_count = qp[1];
- phy->loss_of_dword_sync_count = qp[3];
- phy->phy_reset_problem_count = qp[4];
+ u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
+ + 0x1034 + (0x4000 * (phy_id & 3));
+
+ phy->invalid_dword_count = readl(qp);
+ phy->running_disparity_error_count = readl(&qp[1]);
+ phy->loss_of_dword_sync_count = readl(&qp[3]);
+ phy->phy_reset_problem_count = readl(&qp[4]);
}
if (pm8001_ha->chip_id == chip_8001)
pm8001_bar4_shift(pm8001_ha, 0);
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
{
struct ata_queued_cmd *qc = task->uldd_task;
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ ||
- qc->tf.command == ATA_CMD_FPDMA_RECV ||
- qc->tf.command == ATA_CMD_FPDMA_SEND ||
- qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
- *tag = qc->tag;
- return 1;
- }
+
+ if (qc && ata_is_ncq(qc->tf.protocol)) {
+ *tag = qc->tag;
+ return 1;
}
+
return 0;
}
return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
}
+/**
+ * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
+ * for internal abort task
+ * @pm8001_ha: our hba card information
+ * @ccb: the ccb which attached to sata task
+ */
+static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
+}
+
/**
* pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
* @pm8001_ha: our hba card information
* @tmf: the task management IU
*/
static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
{
return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
}
#define DEV_IS_GONE(pm8001_dev) \
((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
+
+
+static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+ enum sas_protocol task_proto = task->task_proto;
+ struct sas_tmf_task *tmf = task->tmf;
+ int is_tmf = !!tmf;
+
+ switch (task_proto) {
+ case SAS_PROTOCOL_SMP:
+ return pm8001_task_prep_smp(pm8001_ha, ccb);
+ case SAS_PROTOCOL_SSP:
+ if (is_tmf)
+ return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
+ return pm8001_task_prep_ssp(pm8001_ha, ccb);
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ return pm8001_task_prep_ata(pm8001_ha, ccb);
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
+ default:
+ dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
+ task_proto);
+ }
+
+ return -EINVAL;
+}
+
/**
- * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
+ * pm8001_queue_command - register for upper layer used, all IO commands sent
+ * to HBA are from this interface.
* @task: the task to be execute.
- * @gfp_flags: gfp_flags.
- * @is_tmf: if it is task management task.
- * @tmf: the task management IU
+ * @gfp_flags: gfp_flags
*/
-static int pm8001_task_exec(struct sas_task *task,
- gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
+int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
+ struct task_status_struct *ts = &task->task_status;
+ enum sas_protocol task_proto = task->task_proto;
struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ bool internal_abort = sas_is_internal_abort(task);
struct pm8001_hba_info *pm8001_ha;
- struct pm8001_device *pm8001_dev;
struct pm8001_port *port = NULL;
- struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
- u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
- unsigned long flags = 0;
- enum sas_protocol task_proto = t->task_proto;
+ unsigned long flags;
+ u32 n_elem = 0;
+ int rc = 0;
- if (!dev->port) {
- struct task_status_struct *tsm = &t->task_status;
- tsm->resp = SAS_TASK_UNDELIVERED;
- tsm->stat = SAS_PHY_DOWN;
+ if (!internal_abort && !dev->port) {
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
if (dev->dev_type != SAS_SATA_DEV)
- t->task_done(t);
+ task->task_done(task);
return 0;
}
- pm8001_ha = pm8001_find_ha_by_dev(task->dev);
- if (pm8001_ha->controller_fatal_error) {
- struct task_status_struct *ts = &t->task_status;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ if (pm8001_ha->controller_fatal_error) {
ts->resp = SAS_TASK_UNDELIVERED;
- t->task_done(t);
+ task->task_done(task);
return 0;
}
+
pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
+
spin_lock_irqsave(&pm8001_ha->lock, flags);
- do {
- dev = t->dev;
- pm8001_dev = dev->lldd_dev;
- port = &pm8001_ha->port[sas_find_local_port_id(dev)];
- if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
- if (sas_protocol_ata(task_proto)) {
- struct task_status_struct *ts = &t->task_status;
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
- continue;
- } else {
- struct task_status_struct *ts = &t->task_status;
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- t->task_done(t);
- continue;
- }
- }
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- goto err_out;
- ccb = &pm8001_ha->ccb_info[tag];
-
- if (!sas_protocol_ata(task_proto)) {
- if (t->num_scatter) {
- n_elem = dma_map_sg(pm8001_ha->dev,
- t->scatter,
- t->num_scatter,
- t->data_dir);
- if (!n_elem) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
- }
+ pm8001_dev = dev->lldd_dev;
+ port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+
+ if (!internal_abort &&
+ (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ if (sas_protocol_ata(task_proto)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ task->task_done(task);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
} else {
- n_elem = t->num_scatter;
+ task->task_done(task);
}
+ rc = -ENODEV;
+ goto err_out;
+ }
- t->lldd_task = ccb;
- ccb->n_elem = n_elem;
- ccb->ccb_tag = tag;
- ccb->task = t;
- ccb->device = pm8001_dev;
- switch (task_proto) {
- case SAS_PROTOCOL_SMP:
- atomic_inc(&pm8001_dev->running_req);
- rc = pm8001_task_prep_smp(pm8001_ha, ccb);
- break;
- case SAS_PROTOCOL_SSP:
- atomic_inc(&pm8001_dev->running_req);
- if (is_tmf)
- rc = pm8001_task_prep_ssp_tm(pm8001_ha,
- ccb, tmf);
- else
- rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
- break;
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- atomic_inc(&pm8001_dev->running_req);
- rc = pm8001_task_prep_ata(pm8001_ha, ccb);
- break;
- default:
- dev_printk(KERN_ERR, pm8001_ha->dev,
- "unknown sas_task proto: 0x%x\n", task_proto);
- rc = -EINVAL;
- break;
- }
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+ if (!ccb) {
+ rc = -SAS_QUEUE_FULL;
+ goto err_out;
+ }
- if (rc) {
- pm8001_dbg(pm8001_ha, IO, "rc is %x\n", rc);
- atomic_dec(&pm8001_dev->running_req);
- goto err_out_tag;
+ if (!sas_protocol_ata(task_proto)) {
+ if (task->num_scatter) {
+ n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
+ if (!n_elem) {
+ rc = -ENOMEM;
+ goto err_out_ccb;
+ }
}
- /* TODO: select normal or high priority */
- spin_lock(&t->task_state_lock);
- t->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&t->task_state_lock);
- } while (0);
- rc = 0;
- goto out_done;
-
-err_out_tag:
- pm8001_tag_free(pm8001_ha, tag);
+ } else {
+ n_elem = task->num_scatter;
+ }
+
+ task->lldd_task = ccb;
+ ccb->n_elem = n_elem;
+
+ atomic_inc(&pm8001_dev->running_req);
+
+ rc = pm8001_deliver_command(pm8001_ha, ccb);
+ if (rc) {
+ atomic_dec(&pm8001_dev->running_req);
+ if (!sas_protocol_ata(task_proto) && n_elem)
+ dma_unmap_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
+err_out_ccb:
+ pm8001_ccb_free(pm8001_ha, ccb);
+
err_out:
- dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
- if (!sas_protocol_ata(task_proto))
- if (n_elem)
- dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
- t->data_dir);
-out_done:
+ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
+ }
+
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- return rc;
-}
-/**
- * pm8001_queue_command - register for upper layer used, all IO commands sent
- * to HBA are from this interface.
- * @task: the task to be execute.
- * @gfp_flags: gfp_flags
- */
-int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return pm8001_task_exec(task, gfp_flags, 0, NULL);
+ return rc;
}
/**
* pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
* @pm8001_ha: our hba card information
- * @ccb: the ccb which attached to ssp task
- * @task: the task to be free.
- * @ccb_idx: ccb index.
+ * @ccb: the ccb which attached to ssp task to free
*/
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
+ struct pm8001_ccb_info *ccb)
{
+ struct sas_task *task = ccb->task;
struct ata_queued_cmd *qc;
struct pm8001_device *pm8001_dev;
- if (!ccb->task)
+ if (!task)
return;
- if (!sas_protocol_ata(task->task_proto))
- if (ccb->n_elem)
- dma_unmap_sg(pm8001_ha->dev, task->scatter,
- task->num_scatter, task->data_dir);
+
+ if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
+ dma_unmap_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
}
if (sas_protocol_ata(task->task_proto)) {
- // For SCSI/ATA commands uldd_task points to ata_queued_cmd
+ /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
qc = task->uldd_task;
pm8001_dev = ccb->device;
trace_pm80xx_request_complete(pm8001_ha->id,
pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
- ccb_idx, 0 /* ctlr_opcode not known */,
+ ccb->ccb_tag, 0 /* ctlr_opcode not known */,
qc ? qc->tf.command : 0, // ata opcode
pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
}
task->lldd_task = NULL;
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- ccb->open_retry = 0;
- pm8001_tag_free(pm8001_ha, ccb_idx);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
/**
complete(&task->slow_task->completion);
}
-static void pm8001_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-}
-
#define PM8001_TASK_TIMEOUT 20
-/**
- * pm8001_exec_internal_tmf_task - execute some task management commands.
- * @dev: the wanted device.
- * @tmf: which task management wanted to be take.
- * @para_len: para_len.
- * @parameter: ssp task parameter.
- *
- * when errors or exception happened, we may want to do something, for example
- * abort the issued task which result in this exception, it is done by calling
- * this function, note it is also with the task execute interface.
- */
-static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
- void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
-{
- int res, retry;
- struct sas_task *task = NULL;
- struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
- struct pm8001_device *pm8001_dev = dev->lldd_dev;
- DECLARE_COMPLETION_ONSTACK(completion_setstate);
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
- memcpy(&task->ssp_task, parameter, para_len);
- task->task_done = pm8001_task_done;
- task->slow_task->timer.function = pm8001_tmf_timedout;
- task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
- add_timer(&task->slow_task->timer);
-
- res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
- goto ex_err;
- }
- wait_for_completion(&task->slow_task->completion);
- if (pm8001_ha->chip_id != chip_8001) {
- pm8001_dev->setds_completion = &completion_setstate;
- PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, DS_OPERATIONAL);
- wait_for_completion(&completion_setstate);
- }
- res = -TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
- struct pm8001_ccb_info *ccb = task->lldd_task;
-
- pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
- tmf->tmf);
-
- if (ccb)
- ccb->task = NULL;
- goto ex_err;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun */
- res = task->task_status.residual;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- pm8001_dbg(pm8001_ha, FAIL, "Blocked task error.\n");
- res = -EMSGSIZE;
- break;
- } else {
- pm8001_dbg(pm8001_ha, EH,
- " Task to dev %016llx response:0x%x status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- sas_free_task(task);
- task = NULL;
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
-
-static int
-pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
- u32 task_tag)
-{
- int res, retry;
- u32 ccb_tag;
- struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
- task->task_done = pm8001_task_done;
- task->slow_task->timer.function = pm8001_tmf_timedout;
- task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
- add_timer(&task->slow_task->timer);
-
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res)
- goto ex_err;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
- ccb->n_elem = 0;
-
- res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
- pm8001_dev, flag, task_tag, ccb_tag);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
- goto ex_err;
- }
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
- pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
- goto ex_err;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
-
- } else {
- pm8001_dbg(pm8001_ha, EH,
- " Task to dev %016llx response: 0x%x status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- sas_free_task(task);
- task = NULL;
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
/**
* pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
pm8001_dev->device_id, pm8001_dev->dev_type);
if (atomic_read(&pm8001_dev->running_req)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
while (atomic_read(&pm8001_dev->running_req))
msleep(20);
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_dev_gone_notify(dev);
}
-static int pm8001_issue_ssp_tmf(struct domain_device *dev,
- u8 *lun, struct pm8001_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
- if (!(dev->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- memcpy((u8 *)&ssp_task.LUN, lun, 8);
- return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
- tmf);
-}
-
/* retry commands by ha, by task and/or by device */
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
struct task_status_struct *ts;
struct pm8001_device *pm8001_dev;
unsigned long flags1;
- u32 tag;
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
+ if (ccb->ccb_tag == PM8001_INVALID_TAG)
+ continue;
+
pm8001_dev = ccb->device;
if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
continue;
continue;
} else if (pm8001_dev != device_to_close)
continue;
- tag = ccb->ccb_tag;
- if (!tag || (tag == 0xFFFFFFFF))
- continue;
task = ccb->task;
if (!task || !task->task_done)
continue;
atomic_dec(&pm8001_dev->running_req);
spin_lock_irqsave(&task->task_state_lock, flags1);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags
& SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
task->task_done(task);
goto out;
}
msleep(2000);
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ rc = sas_execute_internal_abort_dev(dev, 0, NULL);
if (rc) {
pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
"with rc %d\n", pm8001_dev->device_id, rc);
goto out;
}
/* send internal ssp/sata/smp abort command to FW */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
msleep(100);
/* deregister the target device */
wait_for_completion(&completion_setstate);
} else {
/* send internal ssp/sata/smp abort command to FW */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
msleep(100);
/* deregister the target device */
int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
{
int rc = TMF_RESP_FUNC_FAILED;
- struct pm8001_tmf_task tmf_task;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
DECLARE_COMPLETION_ONSTACK(completion_setstate);
if (dev_is_sata(dev)) {
struct sas_phy *phy = sas_get_local_phy(dev);
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
rc = sas_phy_reset(phy, 1);
sas_put_local_phy(phy);
pm8001_dev->setds_completion = &completion_setstate;
pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion_setstate);
} else {
- tmf_task.tmf = TMF_LU_RESET;
- rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ rc = sas_lu_reset(dev, lun);
}
/* If failed, fall-through I_T_Nexus reset */
pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
int pm8001_query_task(struct sas_task *task)
{
u32 tag = 0xdeadbeef;
- struct scsi_lun lun;
- struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (unlikely(!task || !task->lldd_task || !task->dev))
return rc;
struct pm8001_hba_info *pm8001_ha =
pm8001_find_ha_by_dev(dev);
- int_to_scsilun(cmnd->device->lun, &lun);
rc = pm8001_find_tag(task, &tag);
if (rc == 0) {
rc = TMF_RESP_FUNC_FAILED;
return rc;
}
pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
- rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
u32 tag;
struct domain_device *dev ;
struct pm8001_hba_info *pm8001_ha;
- struct scsi_lun lun;
struct pm8001_device *pm8001_dev;
- struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED, ret;
u32 phy_id, port_id;
struct sas_task_slow slow_task;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
- rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
- pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = sas_abort_task(task, tag);
+ sas_execute_internal_abort_single(dev, tag, 0, NULL);
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (pm8001_ha->chip_id == chip_8006) {
* is removed from the ccb. on success the caller is
* going to free the task.
*/
- ret = pm8001_exec_internal_task_abort(pm8001_ha,
- pm8001_dev, pm8001_dev->sas_device, 1, tag);
+ ret = sas_execute_internal_abort_dev(dev, 0, NULL);
if (ret)
goto out;
ret = wait_for_completion_timeout(
pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion);
} else {
- rc = pm8001_exec_internal_task_abort(pm8001_ha,
- pm8001_dev, pm8001_dev->sas_device, 0, tag);
+ ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
}
rc = TMF_RESP_FUNC_COMPLETE;
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
}
out:
return rc;
}
-int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
-{
- struct pm8001_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
-}
-
-int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
-{
- struct pm8001_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
-}
-
int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
{
- struct pm8001_tmf_task tmf_task;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
pm8001_dev->device_id);
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ return sas_clear_task_set(dev, lun);
}
void pm8001_port_formed(struct asd_sas_phy *sas_phy)
}
sas_port->lldd_port = port;
}
+
+void pm8001_setds_completion(struct domain_device *dev)
+{
+ struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
+
+ if (pm8001_ha->chip_id != chip_8001) {
+ pm8001_dev->setds_completion = &completion_setstate;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, DS_OPERATIONAL);
+ wait_for_completion(&completion_setstate);
+ }
+}
+
+void pm8001_tmf_aborted(struct sas_task *task)
+{
+ struct pm8001_ccb_info *ccb = task->lldd_task;
+
+ if (ccb)
+ ccb->task = NULL;
+}
#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
#define pm8001_info(HBA, fmt, ...) \
- pr_info("%s:: %s %d:" fmt, \
+ pr_info("%s:: %s %d: " fmt, \
(HBA)->name, __func__, __LINE__, ##__VA_ARGS__)
#define pm8001_dbg(HBA, level, fmt, ...) \
struct pm8001_hba_info;
struct pm8001_ccb_info;
struct pm8001_device;
-/* define task management IU */
-struct pm8001_tmf_task {
- u8 tmf;
- u32 tag_of_task_to_be_managed;
-};
+
struct pm8001_ioctl_payload {
u32 signature;
u16 major_function;
int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha,
u32 phy_id, u32 phy_op);
int (*task_abort)(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag,
- u32 cmd_tag);
+ struct pm8001_ccb_info *ccb);
int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf);
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf);
int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha,
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
+ struct pm8001_ccb_info *ccb);
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void pm8001_scan_start(struct Scsi_Host *shost);
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
int pm8001_abort_task(struct sas_task *task);
-int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
-int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
int pm8001_clear_task_set(struct domain_device *dev, u8 *lun);
int pm8001_dev_found(struct domain_device *dev);
void pm8001_dev_gone(struct domain_device *dev);
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
- struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, size_t nb,
+ u32 q_index, u32 opCode, void *payload, size_t nb,
u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr);
int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_ccb_info *ccb,
- struct pm8001_tmf_task *tmf);
+ struct sas_tmf_task *tmf);
int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev,
- u8 flag, u32 task_tag, u32 cmd_tag);
+ struct pm8001_ccb_info *ccb);
int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
void pm8001_work_fn(struct work_struct *work);
/* ctl shared API */
extern const struct attribute_group *pm8001_host_groups[];
-static inline void
-pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb,
- u32 ccb_idx)
+#define PM8001_INVALID_TAG ((u32)-1)
+
+/*
+ * Allocate a new tag and return the corresponding ccb after initializing it.
+ */
+static inline struct pm8001_ccb_info *
+pm8001_ccb_alloc(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *dev, struct sas_task *task)
{
- pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
+ struct pm8001_ccb_info *ccb;
+ u32 tag;
+
+ if (pm8001_tag_alloc(pm8001_ha, &tag)) {
+ pm8001_dbg(pm8001_ha, FAIL, "Failed to allocate a tag\n");
+ return NULL;
+ }
+
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->task = task;
+ ccb->n_elem = 0;
+ ccb->ccb_tag = tag;
+ ccb->device = dev;
+ ccb->fw_control_context = NULL;
+ ccb->open_retry = 0;
+
+ return ccb;
+}
+
+/*
+ * Free the tag of an initialized ccb.
+ */
+static inline void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ u32 tag = ccb->ccb_tag;
+
+ /*
+ * Cleanup the ccb to make sure that a manual scan of the adapter
+ * ccb_info array can detect ccb's that are in use.
+ * C.f. pm8001_open_reject_retry()
+ */
+ ccb->task = NULL;
+ ccb->ccb_tag = PM8001_INVALID_TAG;
+ ccb->device = NULL;
+ ccb->fw_control_context = NULL;
+
+ pm8001_tag_free(pm8001_ha, tag);
+}
+
+static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+
+ pm8001_ccb_task_free(pm8001_ha, ccb);
smp_mb(); /*in order to force CPU ordering*/
task->task_done(task);
}
+void pm8001_setds_completion(struct domain_device *dev);
+void pm8001_tmf_aborted(struct sas_task *task);
#endif
}
static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
- const void *destination,
+ __le32 *destination,
u32 dw_count, u32 bus_base_number)
{
u32 index, value, offset;
- u32 *destination1;
- destination1 = (u32 *)destination;
- for (index = 0; index < dw_count; index += 4, destination1++) {
+ for (index = 0; index < dw_count; index += 4, destination++) {
offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
- *destination1 = cpu_to_le32(value);
+ *destination = cpu_to_le32(value);
}
}
return;
pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
{
struct set_ctrl_cfg_req payload;
- struct inbound_queue_table *circularQ;
int rc;
u32 tag;
u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
if (IS_SPCV_12G(pm8001_ha->pdev))
else
page_code = THERMAL_PAGE_CODE_8H;
- payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
- (THERMAL_ENABLE << 8) | page_code;
- payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+ payload.cfg_pg[0] =
+ cpu_to_le32((THERMAL_LOG_ENABLE << 9) |
+ (THERMAL_ENABLE << 8) | page_code);
+ payload.cfg_pg[1] =
+ cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8));
pm8001_dbg(pm8001_ha, DEV,
"Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
payload.cfg_pg[0], payload.cfg_pg[1]);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
{
struct set_ctrl_cfg_req payload;
- struct inbound_queue_table *circularQ;
SASProtocolTimerConfig_t SASConfigPage;
int rc;
u32 tag;
memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
-
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
- SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
- SASConfigPage.MST_MSI = 3 << 15;
- SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
- SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
- (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
- SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
-
- if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
- SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
-
-
- SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
- SAS_OPNRJT_RTRY_INTVL;
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
- | SAS_COPNRJT_RTRY_TMO;
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
- | SAS_COPNRJT_RTRY_THR;
- SASConfigPage.MAX_AIP = SAS_MAX_AIP;
+ SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE);
+ SASConfigPage.MST_MSI = cpu_to_le32(3 << 15);
+ SASConfigPage.STP_SSP_MCT_TMO =
+ cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO);
+ SASConfigPage.STP_FRM_TMO =
+ cpu_to_le32((SAS_MAX_OPEN_TIME << 24) |
+ (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER);
+ SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME);
+
+ SASConfigPage.OPNRJT_RTRY_INTVL =
+ cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL);
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =
+ cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO);
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =
+ cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR);
+ SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n",
- SASConfigPage.pageCode);
+ le32_to_cpu(SASConfigPage.pageCode));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n",
- SASConfigPage.MST_MSI);
+ le32_to_cpu(SASConfigPage.MST_MSI));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n",
- SASConfigPage.STP_SSP_MCT_TMO);
+ le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n",
- SASConfigPage.STP_FRM_TMO);
+ le32_to_cpu(SASConfigPage.STP_FRM_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n",
- SASConfigPage.STP_IDLE_TMO);
+ le32_to_cpu(SASConfigPage.STP_IDLE_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n",
- SASConfigPage.OPNRJT_RTRY_INTVL);
+ le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n",
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO);
+ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n",
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR);
+ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n",
- SASConfigPage.MAX_AIP);
+ le32_to_cpu(SASConfigPage.MAX_AIP));
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
{
struct kek_mgmt_req payload;
- struct inbound_queue_table *circularQ;
int rc;
u32 tag;
u32 opc = OPC_INB_KEK_MANAGEMENT;
memset(&payload, 0, sizeof(struct kek_mgmt_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
/* Currently only one key is used. New KEK index is 1.
* Current KEK index is 1. Store KEK to NVRAM is 1.
*/
- payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
- KEK_MGMT_SUBOP_KEYCARDUPDATE);
+ payload.new_curidx_ksop =
+ cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) |
+ KEK_MGMT_SUBOP_KEYCARDUPDATE));
pm8001_dbg(pm8001_ha, DEV,
"Saving Encryption info to flash. payload 0x%x\n",
- payload.new_curidx_ksop);
+ le32_to_cpu(payload.new_curidx_ksop));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
{
int ret = 0;
u32 scratch_pad_rsvd0 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_6);
+ MSGU_SCRATCH_PAD_RSVD_0);
u32 scratch_pad_rsvd1 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_7);
+ MSGU_SCRATCH_PAD_RSVD_1);
u32 scratch_pad1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
u32 scratch_pad2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
u32 scratch_pad3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
PCI_VENDOR_ID_ATTO &&
pm8001_ha->pdev->subsystem_vendor != 0) {
ibutton0 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_6);
+ MSGU_SCRATCH_PAD_RSVD_0);
ibutton1 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_7);
+ MSGU_SCRATCH_PAD_RSVD_1);
if (!ibutton0 && !ibutton1) {
pm8001_dbg(pm8001_ha, FAIL,
"iButton Feature is not Available!!!\n");
static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev)
{
- int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
+ struct sas_task *task;
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_ABORT;
int ret;
- if (!pm8001_ha_dev) {
- pm8001_dbg(pm8001_ha, FAIL, "dev is null\n");
- return;
- }
+ pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG;
+ pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
}
-
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
sas_free_task(task);
return;
}
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
-
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
-
memset(&task_abort, 0, sizeof(task_abort));
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- task_abort.tag = cpu_to_le32(ccb_tag);
+ task_abort.tag = cpu_to_le32(ccb->ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
pm8001_dbg(pm8001_ha, FAIL, "Executing abort task end\n");
if (ret) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
}
{
struct sata_start_req sata_cmd;
int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
struct sas_task *task = NULL;
struct host_to_dev_fis fis;
struct domain_device *dev;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
- sas_free_task(task);
- pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n");
- return;
- }
-
- /* allocate domain device by ourselves as libsas
- * is not going to provide any
- */
+ /*
+ * Allocate domain device by ourselves as libsas is not going to
+ * provide any.
+ */
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
pm8001_dbg(pm8001_ha, FAIL,
"Domain device cannot be allocated\n");
return;
task->dev = dev;
task->dev->lldd_dev = pm8001_ha_dev;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
- ccb->n_elem = 0;
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
+ kfree(dev);
+ return;
+ }
+
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
/* construct read log FIS */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.lbal = 0x10;
fis.sector_count = 0x1;
- sata_cmd.tag = cpu_to_le32(ccb_tag);
+ sata_cmd.tag = cpu_to_le32(ccb->ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+ sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9)));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
pm8001_dbg(pm8001_ha, FAIL, "Executing read log end\n");
if (res) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
kfree(dev);
}
}
psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
if (t->slow_task)
complete(&t->slow_task->completion);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
- if (!tag) {
- pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
- return;
- }
-
ccb = &pm8001_ha->ccb_info[tag];
t = ccb->task;
pm8001_dev = ccb->device;
ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
- (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
- /* set new bit for abort_all */
- pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
- /* clear bit for read log */
- pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+ (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
/* Free the tag */
pm8001_tag_free(pm8001_ha, tag);
len = sizeof(struct pio_setup_fis);
pm8001_dbg(pm8001_ha, IO,
"PIO read len = %d\n", len);
- } else if (t->ata_task.use_ncq) {
+ } else if (t->ata_task.use_ncq &&
+ t->data_dir != DMA_NONE) {
len = sizeof(struct set_dev_bits_fis);
pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
len);
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
if (t->slow_task)
complete(&t->slow_task->completion);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
}
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%xstat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
struct hw_event_ack_req payload;
u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
- struct inbound_queue_table *circularQ;
-
memset((u8 *)&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
payload.tag = cpu_to_le32(1);
payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
((phyId & 0xFF) << 24) | (port_id & 0xFF));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload,
+ sizeof(payload), 0);
}
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5));
pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6));
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_0));
pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7));
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_1));
}
static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
pm8001_handle_event(pm8001_ha, NULL, IO_FATAL_ERROR);
print_scratchpad_registers(pm8001_ha);
return ret;
+ } else {
+ /*read scratchpad rsvd 0 register*/
+ regval = pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_RSVD_0);
+ switch (regval) {
+ case NON_FATAL_SPBC_LBUS_ECC_ERR:
+ case NON_FATAL_BDMA_ERR:
+ case NON_FATAL_THERM_OVERTEMP_ERR:
+ /*Clear the register*/
+ pm8001_cw32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_RSVD_0,
+ 0x00000000);
+ break;
+ default:
+ break;
+ }
}
}
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
u32 req_len, resp_len;
struct smp_req smp_cmd;
u32 opc;
- struct inbound_queue_table *circularQ;
u32 i, length;
u8 *payload;
u8 *to;
}
opc = OPC_INB_SMP_REQUEST;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
length = sg_req->length;
kunmap_atomic(to);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
- sizeof(smp_cmd), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &smp_cmd,
+ sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
return 0;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
- int ret;
- u64 phys_addr, start_addr, end_addr;
+ u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
- struct inbound_queue_table *circularQ;
u32 q_index, cpu_id;
u32 opc = OPC_INB_SSPINIIOSTART;
+
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+
/* data address domain added for spcv; set to 0 by host,
* used internally by controller
* 0 for SAS 1.1 and SAS 2.0 compatible TLR
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+ ssp_cmd.ssp_iu.efb_prio_attr = 0x80;
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
cpu_id = smp_processor_id();
q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
- circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + ssp_cmd.enc_len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != ssp_cmd.enc_addr_high) {
+ end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+
+ if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.enc_len,
+ dma_addr,
+ le32_to_cpu(ssp_cmd.enc_len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(phys_addr));
- ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+ ssp_cmd.enc_esgl = cpu_to_le32(1U<<31);
}
} else if (task->num_scatter == 0) {
ssp_cmd.enc_addr_low = 0;
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
}
+
/* XTS mode. All other fields are 0 */
- ssp_cmd.key_cmode = 0x6 << 4;
+ ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4);
+
/* set tweak values. Should be the start lba */
ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) |
(task->ssp_task.cmd->cmnd[3] << 16) |
ssp_cmd.esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.addr_high =
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + ssp_cmd.len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != ssp_cmd.addr_high) {
+ end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+ if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.len,
+ dma_addr,
+ le32_to_cpu(ssp_cmd.len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
ssp_cmd.esgl = 0;
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &ssp_cmd, sizeof(ssp_cmd), q_index);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &ssp_cmd,
+ sizeof(ssp_cmd), q_index);
}
static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
struct ata_queued_cmd *qc = task->uldd_task;
u32 tag = ccb->ccb_tag;
- int ret;
u32 q_index, cpu_id;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
- u64 phys_addr, start_addr, end_addr;
+ u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
u32 dir;
- struct inbound_queue_table *circularQ;
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
cpu_id = smp_processor_id();
q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
- circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
- if (task->data_dir == DMA_NONE) {
+ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
- if (task->ata_task.dma_xfer) {
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
+ } else if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
- if (task->ata_task.use_ncq &&
- dev->sata_dev.class != ATA_DEV_ATAPI) {
- ATAP = 0x07; /* FPDMA */
- pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
- }
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
- sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
- sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+ sata_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ sata_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
- sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
- sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+
+ sata_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ sata_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(dma_addr));
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
sata_cmd.enc_esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + sata_cmd.enc_len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != sata_cmd.enc_addr_high) {
+ end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+ if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.enc_len,
+ dma_addr,
+ le32_to_cpu(sata_cmd.enc_len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
sata_cmd.enc_addr_low =
- lower_32_bits(phys_addr);
+ cpu_to_le32(lower_32_bits(phys_addr));
sata_cmd.enc_addr_high =
- upper_32_bits(phys_addr);
+ cpu_to_le32(upper_32_bits(phys_addr));
sata_cmd.enc_esgl =
cpu_to_le32(1 << 31);
}
sata_cmd.enc_esgl = 0;
}
/* XTS mode. All other fields are 0 */
- sata_cmd.key_index_mode = 0x6 << 4;
+ sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4);
+
/* set tweak values. Should be the start lba */
sata_cmd.twk_val0 =
cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
phys_addr = ccb->ccb_dma_handle;
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
- sata_cmd.esgl = cpu_to_le32(1 << 31);
+ sata_cmd.esgl = cpu_to_le32(1U << 31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
sata_cmd.addr_low = lower_32_bits(dma_addr);
sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + sata_cmd.len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
if (end_addr_high != sata_cmd.addr_high) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.len,
+ dma_addr,
+ le32_to_cpu(sata_cmd.len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
- sata_cmd.addr_low =
- lower_32_bits(phys_addr);
- sata_cmd.addr_high =
- upper_32_bits(phys_addr);
- sata_cmd.esgl = cpu_to_le32(1 << 31);
+ sata_cmd.addr_low = lower_32_bits(phys_addr);
+ sata_cmd.addr_high = upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1U << 31);
}
} else if (task->num_scatter == 0) {
sata_cmd.addr_low = 0;
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
}
+
/* scsi cdb */
sata_cmd.atapi_scsi_cdb[0] =
cpu_to_le32(((task->ata_task.atapi_packet[0]) |
- (task->ata_task.atapi_packet[1] << 8) |
- (task->ata_task.atapi_packet[2] << 16) |
- (task->ata_task.atapi_packet[3] << 24)));
+ (task->ata_task.atapi_packet[1] << 8) |
+ (task->ata_task.atapi_packet[2] << 16) |
+ (task->ata_task.atapi_packet[3] << 24)));
sata_cmd.atapi_scsi_cdb[1] =
cpu_to_le32(((task->ata_task.atapi_packet[4]) |
- (task->ata_task.atapi_packet[5] << 8) |
- (task->ata_task.atapi_packet[6] << 16) |
- (task->ata_task.atapi_packet[7] << 24)));
+ (task->ata_task.atapi_packet[5] << 8) |
+ (task->ata_task.atapi_packet[6] << 16) |
+ (task->ata_task.atapi_packet[7] << 24)));
sata_cmd.atapi_scsi_cdb[2] =
cpu_to_le32(((task->ata_task.atapi_packet[8]) |
- (task->ata_task.atapi_packet[9] << 8) |
- (task->ata_task.atapi_packet[10] << 16) |
- (task->ata_task.atapi_packet[11] << 24)));
+ (task->ata_task.atapi_packet[9] << 8) |
+ (task->ata_task.atapi_packet[10] << 16) |
+ (task->ata_task.atapi_packet[11] << 24)));
sata_cmd.atapi_scsi_cdb[3] =
cpu_to_le32(((task->ata_task.atapi_packet[12]) |
- (task->ata_task.atapi_packet[13] << 8) |
- (task->ata_task.atapi_packet[14] << 16) |
- (task->ata_task.atapi_packet[15] << 24)));
+ (task->ata_task.atapi_packet[13] << 8) |
+ (task->ata_task.atapi_packet[14] << 16) |
+ (task->ata_task.atapi_packet[15] << 24)));
}
/* Check for read log for failed drive and return */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags &
SAS_TASK_STATE_ABORTED))) {
"task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
task, ts->resp,
ts->stat);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
return 0;
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free_done(pm8001_ha, task,
- ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
atomic_dec(&pm8001_ha_dev->running_req);
return 0;
}
ccb->ccb_tag, opc,
qc ? qc->tf.command : 0, // ata opcode
ccb->device ? atomic_read(&ccb->device->running_req) : 0);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, sizeof(sata_cmd), q_index);
- return ret;
+ return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &sata_cmd,
+ sizeof(sata_cmd), q_index);
}
/**
pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTART;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
memcpy(payload.sas_identify.sas_addr,
&pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/**
u8 phy_id)
{
struct phy_stop_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTOP;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/*
struct reg_dev_req payload;
u32 opc;
u32 stp_sspsmp_sata = 0x4;
- struct inbound_queue_table *circularQ;
u32 linkrate, phy_id;
- int rc, tag = 0xdeadbeef;
+ int rc;
struct pm8001_ccb_info *ccb;
u8 retryFlag = 0x1;
u16 firstBurstSize = 0;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return rc;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = tag;
- payload.tag = cpu_to_le32(tag);
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
if (flag == 1) {
stp_sspsmp_sata = 0x02; /*direct attached sata */
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
return rc;
}
u32 tag;
int rc;
struct local_phy_ctl_req payload;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
+ return rc;
}
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
u32 tag, i, j = 0;
int rc;
struct set_phy_profile_req payload;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SET_PHY_PROFILE;
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
+ if (rc) {
pm8001_dbg(pm8001_ha, FAIL, "Invalid tag\n");
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ return;
+ }
+
payload.tag = cpu_to_le32(tag);
- payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF));
+ payload.ppc_phyid =
+ cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF));
pm8001_dbg(pm8001_ha, INIT,
" phy profile command for phy %x ,length is %d\n",
- payload.ppc_phyid, length);
+ le32_to_cpu(payload.ppc_phyid), length);
for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
- payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
+ payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
}
u32 tag, opc;
int rc, i;
struct set_phy_profile_req payload;
- struct inbound_queue_table *circularQ;
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
+ if (rc) {
pm8001_dbg(pm8001_ha, INIT, "Invalid tag\n");
+ return;
+ }
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
opc = OPC_INB_SET_PHY_PROFILE;
payload.tag = cpu_to_le32(tag);
- payload.ppc_phyid = (((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8)
- | (phy & 0xFF));
+ payload.ppc_phyid =
+ cpu_to_le32(((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8)
+ | (phy & 0xFF));
for (i = 0; i < length; i++)
payload.reserved[i] = cpu_to_le32(*(buf + i));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
u32 reserved[27];
} __attribute__((packed, aligned(4)));
-/* These flags used for SSP SMP & SATA Abort */
-#define ABORT_MASK 0x3
-#define ABORT_SINGLE 0x0
-#define ABORT_ALL 0x1
-
/**
* brief the data structure of SSP SATA SMP Abort Response
* use to describe SSP SMP & SATA Abort Response ( 64 bytes)
struct set_phy_profile_req {
__le32 tag;
__le32 ppc_phyid;
- u32 reserved[29];
+ __le32 reserved[29];
} __attribute__((packed, aligned(4)));
/**
#define MSGU_HOST_SCRATCH_PAD_3 0x60
#define MSGU_HOST_SCRATCH_PAD_4 0x64
#define MSGU_HOST_SCRATCH_PAD_5 0x68
-#define MSGU_HOST_SCRATCH_PAD_6 0x6C
-#define MSGU_HOST_SCRATCH_PAD_7 0x70
+#define MSGU_SCRATCH_PAD_RSVD_0 0x6C
+#define MSGU_SCRATCH_PAD_RSVD_1 0x70
#define MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) ((x & 0x3) == 0x2)
#define MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) (((x >> 2) & 0x3) == 0x2)
#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */
#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */
+/*state definition for Scratchpad Rsvd 0, Offset 0x6C, Non-fatal*/
+#define NON_FATAL_SPBC_LBUS_ECC_ERR 0x70000001
+#define NON_FATAL_BDMA_ERR 0xE0000001
+#define NON_FATAL_THERM_OVERTEMP_ERR 0x80000001
+
/* main configuration offset - byte offset */
#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */
#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */
#include "ppa.h"
+static struct scsi_pointer *ppa_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline ppa_struct *ppa_dev(struct Scsi_Host *host)
{
return *(ppa_struct **)&host->hostdata;
{
dev->base = dev->dev->port->base;
if (dev->cur_cmd)
- dev->cur_cmd->SCp.phase = 1;
+ ppa_scsi_pointer(dev->cur_cmd)->phase = 1;
else
wake_up(dev->waiting);
}
* The driver appears to remain stable if we speed up the parallel port
* i/o in this function, but not elsewhere.
*/
-static int ppa_completion(struct scsi_cmnd *cmd)
+static int ppa_completion(struct scsi_cmnd *const cmd)
{
/* Return codes:
* -1 Error
* 0 Told to schedule
* 1 Finished data transfer
*/
+ struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd);
ppa_struct *dev = ppa_dev(cmd->device->host);
unsigned short ppb = dev->base;
unsigned long start_jiffies = jiffies;
if (time_after(jiffies, start_jiffies + 1))
return 0;
- if ((cmd->SCp.this_residual <= 0)) {
+ if (scsi_pointer->this_residual <= 0) {
ppa_fail(dev, DID_ERROR);
return -1; /* ERROR_RETURN */
}
}
/* determine if we should use burst I/O */
- fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
- ? PPA_BURST_SIZE : 1;
+ fast = bulk && scsi_pointer->this_residual >= PPA_BURST_SIZE ?
+ PPA_BURST_SIZE : 1;
if (r == (unsigned char) 0xc0)
- status = ppa_out(dev, cmd->SCp.ptr, fast);
+ status = ppa_out(dev, scsi_pointer->ptr, fast);
else
- status = ppa_in(dev, cmd->SCp.ptr, fast);
+ status = ppa_in(dev, scsi_pointer->ptr, fast);
- cmd->SCp.ptr += fast;
- cmd->SCp.this_residual -= fast;
+ scsi_pointer->ptr += fast;
+ scsi_pointer->this_residual -= fast;
if (!status) {
ppa_fail(dev, DID_BUS_BUSY);
return -1; /* ERROR_RETURN */
}
- if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ if (scsi_pointer->buffer && !scsi_pointer->this_residual) {
/* if scatter/gather, advance to the next segment */
- if (cmd->SCp.buffers_residual--) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- cmd->SCp.this_residual =
- cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (scsi_pointer->buffers_residual--) {
+ scsi_pointer->buffer =
+ sg_next(scsi_pointer->buffer);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr =
+ sg_virt(scsi_pointer->buffer);
}
}
/* Now check to see if the drive is ready to comunicate */
}
#endif
- if (cmd->SCp.phase > 1)
+ if (ppa_scsi_pointer(cmd)->phase > 1)
ppa_disconnect(dev);
ppa_pb_dismiss(dev);
static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd);
unsigned short ppb = dev->base;
unsigned char l = 0, h = 0;
int retv;
if (dev->failed)
return 0;
- switch (cmd->SCp.phase) {
+ switch (scsi_pointer->phase) {
case 0: /* Phase 0 - Waiting for parport */
if (time_after(jiffies, dev->jstart + HZ)) {
/*
return 1; /* Try again in a jiffy */
}
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
}
fallthrough;
ppa_fail(dev, DID_NO_CONNECT);
return 0;
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
if (!ppa_send_command(cmd))
return 0;
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.this_residual = 0;
- cmd->SCp.ptr = NULL;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->ptr = NULL;
}
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.phase++;
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->phase++;
fallthrough;
case 5: /* Phase 5 - Data transfer stage */
return 0;
if (retv == 0)
return 1;
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 6: /* Phase 6 - Read status/message */
dev->jstart = jiffies;
dev->cur_cmd = cmd;
cmd->result = DID_ERROR << 16; /* default return code */
- cmd->SCp.phase = 0; /* bus free */
+ ppa_scsi_pointer(cmd)->phase = 0; /* bus free */
schedule_delayed_work(&dev->ppa_tq, 0);
* have tied the SCSI_MESSAGE line high in the interface
*/
- switch (cmd->SCp.phase) {
+ switch (ppa_scsi_pointer(cmd)->phase) {
case 0: /* Do not have access to parport */
case 1: /* Have not connected to interface */
dev->cur_cmd = NULL; /* Forget the problem */
{
ppa_struct *dev = ppa_dev(cmd->device->host);
- if (cmd->SCp.phase)
+ if (ppa_scsi_pointer(cmd)->phase)
ppa_disconnect(dev);
dev->cur_cmd = NULL; /* Forget the problem */
.sg_tablesize = SG_ALL,
.can_queue = 1,
.slave_alloc = ppa_adjust_queue,
+ .cmd_size = sizeof(struct scsi_pointer),
};
/***************************************************************************
#define FC_GOOD 0
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
struct qedf_ioreq {
unsigned int alloc;
};
+struct qedf_cmd_priv {
+ struct qedf_ioreq *io_req;
+};
+
+static inline struct qedf_cmd_priv *qedf_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
extern struct workqueue_struct *qedf_io_wq;
struct qedf_rport {
/* Initialize rest of io_req fileds */
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
- sc_cmd->SCp.ptr = (char *)io_req;
+ qedf_priv(sc_cmd)->io_req = io_req;
io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
/* Record which cpu this request is associated with */
io_req->fcp_resid = fcp_rsp->fcp_resid;
io_req->scsi_comp_flags = rsp_flags;
- CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
- fcp_rsp->scsi_status_code;
+ io_req->cdb_status = fcp_rsp->scsi_status_code;
if (rsp_flags &
FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
return;
}
- if (!sc_cmd->SCp.ptr) {
- QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
- "another context.\n");
+ if (!qedf_priv(sc_cmd)->io_req) {
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "io_req is NULL, returned in another context.\n");
return;
}
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
- sc_cmd->SCp.ptr = NULL;
+ qedf_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
}
goto bad_scsi_ptr;
}
- if (!sc_cmd->SCp.ptr) {
- QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
- "another context.\n");
+ if (!qedf_priv(sc_cmd)->io_req) {
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "io_req is NULL, returned in another context.\n");
return;
}
qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
io_req->sc_cmd = NULL;
- sc_cmd->SCp.ptr = NULL;
+ qedf_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
return;
(tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
"LUN RESET");
- if (sc_cmd->SCp.ptr) {
- io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ if (qedf_priv(sc_cmd)->io_req) {
+ io_req = qedf_priv(sc_cmd)->io_req;
ref_cnt = kref_read(&io_req->refcount);
QEDF_ERR(NULL,
"orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
}
- io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ io_req = qedf_priv(sc_cmd)->io_req;
if (!io_req) {
QEDF_ERR(&qedf->dbg_ctx,
"sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
.change_queue_depth = scsi_change_queue_depth,
+ .cmd_size = sizeof(struct qedf_cmd_priv),
};
static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
goto error;
}
- if (!sc_cmd->SCp.ptr) {
+ if (!iscsi_cmd(sc_cmd)->task) {
QEDI_WARN(&qedi->dbg_ctx,
- "SCp.ptr is NULL, returned in another context.\n");
+ "NULL task pointer, returned in another context.\n");
goto error;
}
.dma_boundary = QEDI_HW_DMA_BOUNDARY,
.cmd_per_lun = 128,
.shost_groups = qedi_shost_groups,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
iscsi_host_remove(qedi->shost);
if (qedi->tmf_thread) {
- flush_workqueue(qedi->tmf_thread);
destroy_workqueue(qedi->tmf_thread);
qedi->tmf_thread = NULL;
}
if (qedi->offload_thread) {
- flush_workqueue(qedi->offload_thread);
destroy_workqueue(qedi->offload_thread);
qedi->offload_thread = NULL;
}
#endif
-/*
- * We use the scsi_pointer structure that's included with each scsi_command
- * to overlay our struct srb over it. qla1280_init() checks that a srb is not
- * bigger than a scsi_pointer.
- */
-
-#define CMD_SP(Cmnd) &Cmnd->SCp
#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
#define CMD_CDBP(Cmnd) Cmnd->cmnd
#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
- struct srb *sp = (struct srb *)CMD_SP(cmd);
+ struct srb *sp = scsi_cmd_priv(cmd);
int status;
sp->cmd = cmd;
ENTER("qla1280_error_action");
ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
- sp = (struct srb *)CMD_SP(cmd);
+ sp = scsi_cmd_priv(cmd);
bus = SCSI_BUS_32(cmd);
target = SCSI_TCN_32(cmd);
lun = SCSI_LUN_32(cmd);
int i;
ha = (struct scsi_qla_host *)host->hostdata;
- sp = (struct srb *)CMD_SP(cmd);
+ sp = scsi_cmd_priv(cmd);
printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
} */
printk(" tag=%d, transfersize=0x%x \n",
scsi_cmd_to_rq(cmd)->tag, cmd->transfersize);
- printk(" SP=0x%p\n", CMD_SP(cmd));
printk(" underflow size = 0x%x, direction=0x%x\n",
cmd->underflow, cmd->sc_data_direction);
}
.can_queue = MAX_OUTSTANDING_COMMANDS,
.this_id = -1,
.sg_tablesize = SG_ALL,
+ .cmd_size = sizeof(struct srb),
};
static int __init
qla1280_init(void)
{
- if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
- printk(KERN_WARNING
- "qla1280: struct srb too big, aborting\n");
- return -EINVAL;
- }
-
#ifdef MODULE
/*
* If we are called as a module, the qla1280 pointer may not be null
#define RESPONSE_ENTRY_CNT 63 /* Number of response entries. */
/*
- * SCSI Request Block structure (sp) that is placed
- * on cmd->SCp location of every I/O
+ * SCSI Request Block structure (sp) that occurs after each struct scsi_cmnd.
*/
struct srb {
struct list_head list; /* (8/16) LU queue */
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
- if (IS_NOCACHE_VPD_TYPE(ha))
+ if (!IS_NOCACHE_VPD_TYPE(ha))
goto skip;
faddr = ha->flt_region_vpd << 2;
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (IS_QLA83XX(ha)) {
uint32_t idc_control;
qla83xx_idc_lock(vha, 0);
continue;
if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
continue;
- if (iter->type == 0x27 &&
- (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
- continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
"%s: sp hdl %x, result=%x bsg ptr %p\n",
__func__, sp->handle, res, bsg_job);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return 0;
}
* code.
*/
void (*put_fn)(struct kref *kref);
+
+ /*
+ * Report completion for asynchronous commands.
+ */
+ void (*async_done)(struct srb *sp, int res);
} srb_t;
#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
/*
* Fibre channel port/lun states.
*/
-#define FCS_UNCONFIGURED 1
-#define FCS_DEVICE_DEAD 2
-#define FCS_DEVICE_LOST 3
-#define FCS_ONLINE 4
+enum {
+ FCS_UNKNOWN,
+ FCS_UNCONFIGURED,
+ FCS_DEVICE_DEAD,
+ FCS_DEVICE_LOST,
+ FCS_ONLINE,
+};
extern const char *const port_state_str[5];
-static const char * const port_dstate_str[] = {
- "DELETED",
- "GNN_ID",
- "GNL",
- "LOGIN_PEND",
- "LOGIN_FAILED",
- "GPDB",
- "UPD_FCPORT",
- "LOGIN_COMPLETE",
- "ADISC",
- "DELETE_PEND",
- "LOGIN_AUTH_PEND",
+static const char *const port_dstate_str[] = {
+ [DSC_DELETED] = "DELETED",
+ [DSC_GNN_ID] = "GNN_ID",
+ [DSC_GNL] = "GNL",
+ [DSC_LOGIN_PEND] = "LOGIN_PEND",
+ [DSC_LOGIN_FAILED] = "LOGIN_FAILED",
+ [DSC_GPDB] = "GPDB",
+ [DSC_UPD_FCPORT] = "UPD_FCPORT",
+ [DSC_LOGIN_COMPLETE] = "LOGIN_COMPLETE",
+ [DSC_ADISC] = "ADISC",
+ [DSC_DELETE_PEND] = "DELETE_PEND",
+ [DSC_LOGIN_AUTH_PEND] = "LOGIN_AUTH_PEND",
};
/*
#define FDMI_PORT_SPEED_8GB 0x10
#define FDMI_PORT_SPEED_16GB 0x20
#define FDMI_PORT_SPEED_32GB 0x40
-#define FDMI_PORT_SPEED_64GB 0x80
+#define FDMI_PORT_SPEED_20GB 0x80
+#define FDMI_PORT_SPEED_40GB 0x100
+#define FDMI_PORT_SPEED_128GB 0x200
+#define FDMI_PORT_SPEED_64GB 0x400
+#define FDMI_PORT_SPEED_256GB 0x800
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
#define FC_CLASS_2 0x04
#define QLA_ABTS_WAIT_ENABLED(_sp) \
(QLA_NVME_IOS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
-#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
-#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
IS_QLA28XX(ha))
struct workqueue_struct *wq;
struct work_struct heartbeat_work;
struct qlfc_fw fw_buf;
+ unsigned long last_heartbeat_run_jiffies;
/* FCP_CMND priority support */
struct qla_fcp_prio_cfg *fcp_prio_cfg;
/* list of commands waiting on workqueue */
struct list_head qla_cmd_list;
- struct list_head qla_sess_op_cmd_list;
struct list_head unknown_atio_list;
spinlock_t cmd_list_lock;
struct delayed_work unknown_atio_work;
#define QLA_DSDS_PER_IOCB 37
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-
#define QLA_SG_ALL 1024
enum nexus_wait_type {
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
+
+#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \
+ _fcport->disc_state == DSC_DELETED)
+
#endif
bsg_job->request_payload.sg_cnt, &appplogiok,
sizeof(struct auth_complete_cmd));
+ /* silent unaligned access warning */
+ portid.b.domain = appplogiok.u.d_id.b.domain;
+ portid.b.area = appplogiok.u.d_id.b.area;
+ portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
+
switch (appplogiok.type) {
case PL_TYPE_WWPN:
fcport = qla2x00_find_fcport_by_wwpn(vha,
__func__, appplogiok.u.wwpn);
break;
case PL_TYPE_DID:
- fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id);
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
if (!fcport)
ql_dbg(ql_dbg_edif, vha, 0x911d,
"%s d_id lookup failed: %x\n", __func__,
bsg_job->request_payload.sg_cnt, &appplogifail,
sizeof(struct auth_complete_cmd));
+ /* silent unaligned access warning */
+ portid.b.domain = appplogifail.u.d_id.b.domain;
+ portid.b.area = appplogifail.u.d_id.b.area;
+ portid.b.al_pa = appplogifail.u.d_id.b.al_pa;
+
/*
* TODO: edif: app has failed this plogi. Inform driver to
* take any action (if any).
SET_DID_STATUS(bsg_reply->result, DID_OK);
break;
case PL_TYPE_DID:
- fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id);
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
if (!fcport)
ql_dbg(ql_dbg_edif, vha, 0x911d,
"%s d_id lookup failed: %x\n", __func__,
int result = 0;
struct qla_sa_update_frame sa_frame;
struct srb_iocb *iocb_cmd;
+ port_id_t portid;
ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
"%s entered, vha: 0x%p\n", __func__, vha);
goto done;
}
- fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id);
+ /* silent unaligned access warning */
+ portid.b.domain = sa_frame.port_id.b.domain;
+ portid.b.area = sa_frame.port_id.b.area;
+ portid.b.al_pa = sa_frame.port_id.b.al_pa;
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
if (fcport) {
found = 1;
if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
static void qla_noop_sp_done(srb_t *sp, int res)
{
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
/*
extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
+extern u32 ql2xnvme_queues;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern int qla2xxx_dif_start_scsi_mq(srb_t *);
-extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo);
+extern void qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
+ void (*done)(struct srb *, int));
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
struct qla_work_evt *e);
+void qla2x00_sp_release(struct kref *kref);
/*
* Global Function Prototypes in qla_mbx.c source file.
extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
-extern char *qdev_state(uint32_t);
+extern const char *qdev_state(uint32_t);
extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
if (!e)
goto err2;
- del_timer(&sp->u.iocb_cmd.timer);
e->u.iosb.sp = sp;
qla2x00_post_work(vha, e);
return;
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
- sp->free(sp);
-
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
if (!vha->flags.online)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rft_id";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x.\n",
}
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
return (QLA_SUCCESS);
}
- return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
- FC4_TYPE_FCP_SCSI);
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
}
static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rff_id";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
ct_req->req.rff_id.fc4_feature = fc4feature;
- ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */
sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x feature %x type %x.\n",
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rnid";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x\n",
sp->name, sp->handle, d_id->b24);
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rsnn_nn";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x.\n",
sp->name, sp->handle);
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
size += alen;
ql_dbg(ql_dbg_disc, vha, 0x20aa,
"CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
- /* Node Sybolic Name */
+ /* Node Symbolic Name */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
qla24xx_handle_gpsc_event(vha, &ea);
done:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gpsc_sp_done);
/* CT_IU preamble */
ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla24xx_async_gpsc_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x205e,
"Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
sp->name, fcport->port_name, sp->handle,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
break;
}
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (res) {
if (res == QLA_FUNCTION_TIMEOUT) {
qla24xx_post_gpnid_work(sp->vha, &ea.id);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
} else if (sp->gen1) {
/* There was another RSCN for this Nport ID */
qla24xx_post_gpnid_work(sp->vha, &ea.id);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
if (!vha->flags.online)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->name = "gpnid";
sp->u.iocb_cmd.u.ctarg.id = *id;
sp->gen1 = 0;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnid_sp_done);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry(tsp, &vha->gpnid_list, elem) {
if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
tsp->gen1++;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
goto done;
}
}
sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_gpnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x2067,
"Async-%s hdl=%x ID %3phC.\n", sp->name,
sp->handle, &ct_req->req.port_id.port_id);
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
ea.rc = res;
qla24xx_handle_gffid_event(vha, &ea);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
/* Get FC4 Feature with Nport ID. */
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
return rval;
sp->name = "gffid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gffid_sp_done);
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla24xx_async_gffid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x2132,
"Async-%s hdl=%x %8phC.\n", sp->name,
sp->handle, fcport->port_name);
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
"Async done-%s res %x FC4Type %x\n",
sp->name, res, sp->gen2);
- del_timer(&sp->u.iocb_cmd.timer);
sp->rc = res;
if (res) {
unsigned long flags;
sp->name = "gnnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnft_gnnft_sp_done);
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gpnft_gnnft_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
sp->handle, ct_req->req.gpn_ft.port_type);
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s: Performing FCP Scan\n", __func__);
- if (sp)
- sp->free(sp); /* should not happen */
+ if (sp) {
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ }
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp) {
spin_lock_irqsave(&vha->work_lock, flags);
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
+ /* ref: INIT */
qla2x00_rel_sp(sp);
return rval;
}
sp->name = "gpnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnft_gnnft_sp_done);
rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gpnft_gnnft_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
sp->handle, ct_req->req.gpn_ft.port_type);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
qla24xx_handle_gnnid_event(vha, &ea);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
sp->name = "gnnid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gnnid_sp_done);
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gnnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
sp->name, fcport->port_name,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
qla24xx_handle_gfpnid_event(vha, &ea);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
sp->name = "gfpnid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gfpnid_sp_done);
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gfpnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
sp->name, fcport->port_name,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
WARN_ON(irqs_disabled());
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
+
+ /* ref: TMR */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
void qla2x00_sp_free(srb_t *sp)
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- if (sp->cmd_sp)
+ if (sp->cmd_sp) {
+ /*
+ * This done function should take care of
+ * original command ref: INIT
+ */
sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+ }
abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
sp->done(sp, QLA_OS_TIMER_EXPIRED);
if (orig_sp)
qla_wait_nvme_release_cmd_kref(orig_sp);
- del_timer(&sp->u.iocb_cmd.timer);
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&abt->u.abt.comp);
else
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ /* ref: INIT for ABTS command */
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
GFP_ATOMIC);
if (!sp)
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
- abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
/* FW can send 2 x ABTS's timeout/20s */
- qla2x00_init_timer(sp, 42);
+ qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
+ sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
- sp->done = qla24xx_abort_sp_done;
-
ql_dbg(ql_dbg_async, vha, 0x507c,
"Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
cmd_sp->type);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
wait_for_completion(&abt_iocb->u.abt.comp);
rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
QLA_SUCCESS : QLA_ERR_FROM_FW;
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
return rval;
ea.iop[0] = lio->u.logio.iop[0];
ea.iop[1] = lio->u.logio.iop[1];
ea.sp = sp;
+ if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_plogi_done_event(vha, &ea);
}
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
return rval;
}
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->name = "login";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_login_sp_done);
lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_login_sp_done;
if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
} else {
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
sp->fcport->login_gen++;
qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_LOGOUT_CMD;
sp->name = "logout";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_logout_sp_done;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_logout_sp_done),
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
return rval;
if (!test_bit(UNLOADING, &vha->dpc_flags))
qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_PRLO_CMD;
sp->name = "prlo";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_prlo_sp_done;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_prlo_sp_done);
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
ea.iop[1] = lio->u.logio.iop[1];
ea.fcport = sp->fcport;
ea.sp = sp;
+ if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_adisc_event(vha, &ea);
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
+ if (IS_SESSION_DELETED(fcport)) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC is being delete - not sending command.\n",
+ __func__, fcport->port_name);
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
+ }
+
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
fcport->flags |= FCF_ASYNC_SENT;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_ADISC_CMD;
sp->name = "adisc";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_adisc_sp_done);
- sp->done = qla2x00_async_adisc_sp_done;
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
+ lio = &sp->u.iocb_cmd;
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+ }
ql_dbg(ql_dbg_disc, vha, 0x206f,
"Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_post_async_adisc_work(vha, fcport, data);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
break;
+ case ISP_CFG_NL:
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
default:
break;
}
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *mbx;
int rval = QLA_FUNCTION_FAILED;
unsigned long flags;
u16 *mb;
vha->gnl.sent = 1;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gnl_sp_done);
mb = sp->u.iocb_cmd.u.mbx.out_mb;
mb[0] = MBC_PORT_NODE_NAME_LIST;
mb[8] = vha->gnl.size;
mb[9] = vha->vp_idx;
- sp->done = qla24xx_async_gnl_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x20da,
"Async-%s - OUT WWPN %8phC hndl %x\n",
sp->name, fcport->port_name, sp->handle);
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
return rval;
dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
sp->u.iocb_cmd.u.mbx.in_dma);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
ea.sp = sp;
if (res == QLA_OS_TIMER_EXPIRED)
ea.data[0] = QLA_OS_TIMER_EXPIRED;
+ else if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_prli_done_event(vha, &ea);
}
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
sp->type = SRB_PRLI_CMD;
sp->name = "prli";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_prli_sp_done);
lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
if (NVME_TARGET(vha->hw, fcport))
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
- fcport->loop_id == FC_NO_LOOP_ID) {
+ if (IS_SESSION_DELETED(fcport)) {
ql_log(ql_log_warn, vha, 0xffff,
- "%s: %8phC - not sending command.\n",
- __func__, fcport->port_name);
+ "%s: %8phC is being delete - not sending command.\n",
+ __func__, fcport->port_name);
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
+ if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC online %d flags %x - not sending command.\n",
+ __func__, fcport->port_name, vha->flags.online, fcport->flags);
+ goto done;
+ }
+
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gpdb_sp_done);
pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
mb[9] = vha->vp_idx;
mb[10] = opt;
- mbx->u.mbx.in = pd;
+ mbx = &sp->u.iocb_cmd;
+ mbx->u.mbx.in = (void *)pd;
mbx->u.mbx.in_dma = pd_dma;
- sp->done = qla24xx_async_gpdb_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x20dc,
"Async-%s %8phC hndl %x opt %x\n",
sp->name, fcport->port_name, sp->handle, opt);
if (pd)
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
u8 login = 0;
int rc;
+ ql_dbg(ql_dbg_disc, vha, 0x307b,
+ "%s %8phC DS %d LS %d lid %d retries=%d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
+
if (qla_tgt_mode_enabled(vha))
return;
fcport->login_gen, fcport->loop_id, fcport->scan_state,
fcport->fc4_type);
- if (fcport->scan_state != QLA_FCPORT_FOUND)
+ if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_DELETE_PEND)
return 0;
if ((fcport->loop_id != FC_NO_LOOP_ID) &&
if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
return 0;
- if (fcport->flags & FCF_ASYNC_SENT) {
+ if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
}
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- tm_iocb = &sp->u.iocb_cmd;
sp->type = SRB_TM_CMD;
sp->name = "tmf";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
+ qla2x00_tmf_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
- tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+ tm_iocb = &sp->u.iocb_cmd;
init_completion(&tm_iocb->u.tmf.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
-
tm_iocb->u.tmf.flags = flags;
tm_iocb->u.tmf.lun = lun;
- tm_iocb->u.tmf.data = tag;
- sp->done = qla2x00_tmf_sp_done;
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
}
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
default:
- if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
- (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
- break;
- }
-
sp = ea->sp;
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC priority %s, fc4type %x prev try %s\n",
ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[1]);
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
- if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- else
- qla2x00_mark_device_lost(vha, ea->fcport, 1);
+ qlt_schedule_sess_for_deletion(ea->fcport);
break;
case MBS_LOOP_ID_USED:
/* data[1] = IO PARAM 1 = nport ID */
struct rsp_que *rsp = ha->rsp_q_map[0];
struct qla2xxx_fw_dump *fw_dump;
+ if (ha->fw_dump) {
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "Firmware dump already allocated.\n");
+ return;
+ }
+
+ ha->fw_dumped = 0;
+ ha->fw_dump_cap_flags = 0;
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
req_q_size = rsp_q_size = 0;
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (IS_QLA83XX(ha))
fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
- !IS_QLA28XX(ha))
+ if (!IS_QLA83XX(ha))
mq_size = sizeof(struct qla2xxx_mq_chain);
/*
* Allocate maximum buffer size for all queues - Q0.
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) {
+ if (IS_QLA83XX(ha)) {
ha->flags.fac_supported = 0;
rval = QLA_SUCCESS;
}
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
fcport->scan_state = QLA_FCPORT_FOUND;
+ if (fcport->login_retry == 0) {
+ fcport->login_retry = vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0x2135,
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
found++;
break;
}
if (atomic_read(&fcport->state) == FCS_ONLINE)
return;
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
qla2x00_reg_remote_port(vha, fcport);
break;
case MODE_TARGET:
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (!vha->vha_tgt.qla_tgt->tgt_stop &&
!vha->vha_tgt.qla_tgt->tgt_stopped)
qlt_fc_port_added(vha, fcport);
if (NVME_TARGET(vha->hw, fcport))
qla_nvme_register_remote(vha, fcport);
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
if (fcport->id_changed) {
fcport->id_changed = 0;
return rval;
}
-static const char *
-qla83xx_dev_state_to_string(uint32_t dev_state)
-{
- switch (dev_state) {
- case QLA8XXX_DEV_COLD:
- return "COLD/RE-INIT";
- case QLA8XXX_DEV_INITIALIZING:
- return "INITIALIZING";
- case QLA8XXX_DEV_READY:
- return "READY";
- case QLA8XXX_DEV_NEED_RESET:
- return "NEED RESET";
- case QLA8XXX_DEV_NEED_QUIESCENT:
- return "NEED QUIESCENT";
- case QLA8XXX_DEV_FAILED:
- return "FAILED";
- case QLA8XXX_DEV_QUIESCENT:
- return "QUIESCENT";
- default:
- return "Unknown";
- }
-}
-
/* Assumes idc-lock always held on entry */
void
qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
} else {
- const char *state = qla83xx_dev_state_to_string(dev_state);
-
- ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
+ ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
+ qdev_state(dev_state));
/* SV: XXX: Is timeout required here? */
/* Wait for IDC state change READY -> NEED_RESET */
qpair->rsp->req = qpair->req;
qpair->rsp->qpair = qpair;
/* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(qpair, smp_processor_id());
+ qla_cpu_update(qpair, raw_smp_processor_id());
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
sp->vha = vha;
sp->qpair = qpair;
sp->cmd_type = TYPE_SRB;
+ /* ref : INIT - normal flow */
+ kref_init(&sp->cmd_kref);
INIT_LIST_HEAD(&sp->elem);
}
}
}
-void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+static void
+qla2x00_async_done(struct srb *sp, int res)
+{
+ if (del_timer(&sp->u.iocb_cmd.timer)) {
+ /*
+ * Successfully cancelled the timeout handler
+ * ref: TMR
+ */
+ if (kref_put(&sp->cmd_kref, qla2x00_sp_release))
+ return;
+ }
+ sp->async_done(sp, res);
+}
+
+void
+qla2x00_sp_release(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+
+ sp->free(sp);
+}
+
+void
+qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
+ void (*done)(struct srb *sp, int res))
{
timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
- sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+ sp->done = qla2x00_async_done;
+ sp->async_done = done;
sp->free = qla2x00_sp_free;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
sp->start_timer = 1;
return -ENOMEM;
}
- /* Alloc SRB structure */
+ /* Alloc SRB structure
+ * ref: INIT
+ */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
kfree(fcport);
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
- elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
- init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
- sp->done = qla2x00_els_dcmd_sp_done;
+ qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT,
+ qla2x00_els_dcmd_sp_done);
sp->free = qla2x00_els_dcmd_sp_free;
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout;
+ init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
GFP_KERNEL);
if (!elsio->u.els_logo.els_logo_pyld) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return QLA_FUNCTION_FAILED;
}
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return QLA_FUNCTION_FAILED;
}
wait_for_completion(&elsio->u.els_logo.comp);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
- del_timer(&sp->u.iocb_cmd.timer);
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ break;
}
fallthrough;
default:
fw_status[0], fw_status[1], fw_status[2]);
fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(fcport,
- DSC_LOGIN_FAILED);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qlt_schedule_sess_for_deletion(fcport);
break;
}
break;
fw_status[0], fw_status[1], fw_status[2]);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qlt_schedule_sess_for_deletion(fcport);
break;
}
struct srb_iocb *elsio = &sp->u.iocb_cmd;
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
e->u.iosb.sp = sp;
int rval = QLA_SUCCESS;
void *ptr, *resp_ptr;
- /* Alloc SRB structure */
+ /* Alloc SRB structure
+ * ref: INIT
+ */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
ql_dbg(ql_dbg_io, vha, 0x3073,
"%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
- sp->type = SRB_ELS_DCMD;
- sp->name = "ELS_DCMD";
- sp->fcport = fcport;
-
- elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
+ sp->type = SRB_ELS_DCMD;
+ sp->name = "ELS_DCMD";
+ sp->fcport = fcport;
+ qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2,
+ qla2x00_els_dcmd2_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
- sp->done = qla2x00_els_dcmd2_sp_done;
elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
ptr = elsio->u.els_plogi.els_plogi_pyld =
out:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
break;
}
- if (sp->start_timer)
+ if (sp->start_timer) {
+ /* ref: TMR timer ref
+ * this code should be just before start_iocbs function
+ * This will make sure that caller function don't to do
+ * kref_put even on failure
+ */
+ kref_get(&sp->cmd_kref);
add_timer(&sp->u.iocb_cmd.timer);
+ }
wmb();
qla2x00_start_iocbs(vha, qp->req);
}
const char *const port_state_str[] = {
- "Unknown",
- "UNCONFIGURED",
- "DEAD",
- "LOST",
- "ONLINE"
+ [FCS_UNKNOWN] = "Unknown",
+ [FCS_UNCONFIGURED] = "UNCONFIGURED",
+ [FCS_DEVICE_DEAD] = "DEAD",
+ [FCS_DEVICE_LOST] = "LOST",
+ [FCS_ONLINE] = "ONLINE"
};
static void
iocb->u.tmf.data = QLA_FUNCTION_FAILED;
} else if ((le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
+ host_to_fcp_swap(sts->data, sizeof(sts->data));
if (le32_to_cpu(sts->rsp_data_len) < 4) {
ql_log(ql_log_warn, fcport->vha, 0x503b,
"Async-%s error - hdl=%x not enough response(%d).\n",
#include <linux/delay.h>
#include <linux/gfp.h>
+#ifdef CONFIG_PPC
+#define IS_PPCARCH true
+#else
+#define IS_PPCARCH false
+#endif
+
static struct mb_cmd_name {
uint16_t cmd;
const char *str;
mbx_cmd_t *mcp = &mc;
u8 semaphore = 0;
#define EXE_FW_FORCE_SEMAPHORE BIT_7
- u8 retry = 3;
+ u8 retry = 5;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
"Entered %s.\n", __func__);
vha->min_supported_speed =
nv->min_supported_speed;
}
+
+ if (IS_PPCARCH)
+ mcp->mb[11] |= BIT_4;
}
if (ha->flags.exlogins_enabled)
goto again;
}
+ if (retry) {
+ retry--;
+ ql_dbg(ql_dbg_async, vha, 0x509d,
+ "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
+ goto again;
+ }
ql_dbg(ql_dbg_mbx, vha, 0x1026,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
vha->hw_err_cnt++;
ha->orig_fw_iocb_count = mcp->mb[10];
if (ha->flags.npiv_supported)
ha->max_npiv_vports = mcp->mb[11];
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
ha->fw_max_fcf_count = mcp->mb[12];
}
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
- mcp->in_mb |= MBX_3;
+ mcp->in_mb |= MBX_4|MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (!vha->hw->flags.fw_started)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
- sp->type = SRB_MB_IOCB;
- sp->name = mb_to_str(mcp->mb[0]);
-
c = &sp->u.iocb_cmd;
- c->timeout = qla2x00_async_iocb_timeout;
init_completion(&c->u.mbx.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->type = SRB_MB_IOCB;
+ sp->name = mb_to_str(mcp->mb[0]);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_mb_sp_done);
memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
- sp->done = qla2x00_async_mb_sp_done;
-
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1018,
}
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
+ /* ref: INIT */
sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
if (!sp)
return rval;
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
sp->comp = ∁
- sp->done = qla_ctrlvp_sp_done;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla_ctrlvp_sp_done);
sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
break;
}
done:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
struct register_host_info *preg_hsi;
struct new_utsname *p_sysid = NULL;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_FXIOCB_DCMD;
sp->name = "fxdisc";
+ qla2x00_init_async_sp(sp, FXDISC_TIMEOUT,
+ qla2x00_fxdisc_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_fxdisc_iocb_timeout;
fdisc = &sp->u.iocb_cmd;
- fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
- qla2x00_init_timer(sp, FXDISC_TIMEOUT);
-
switch (fx_type) {
case FXDISC_GET_CONFIG_INFO:
fdisc->u.fxiocb.flags =
}
fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
- sp->done = qla2x00_fxdisc_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ return 0;
+
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
+static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
+{
+ if (sp->flags & SRB_DMA_VALID) {
+ struct srb_iocb *nvme = &sp->u.iocb_cmd;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+ dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ fd->rqstlen, DMA_TO_DEVICE);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+}
+
static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
spin_unlock_irqrestore(&priv->cmd_lock, flags);
fd = priv->fd;
+
+ qla_nvme_ls_unmap(sp, fd);
fd->done(fd, priv->comp_status);
out:
qla2x00_rel_sp(sp);
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
+ sp->flags |= SRB_DMA_VALID;
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
wake_up(&sp->nvme_ls_waitq);
sp->priv = NULL;
priv->sp = NULL;
+ qla_nvme_ls_unmap(sp, fd);
qla2x00_rel_sp(sp);
return rval;
}
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
.map_queues = qla_nvme_map_queues,
- .max_hw_queues = 8,
+ .max_hw_queues = DEF_NVME_HW_QUEUES,
.max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,
.dma_boundary = 0xFFFFFFFF,
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
- WARN_ON(vha->nvme_local_port);
+ if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
+ ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
+ ql2xnvme_queues = DEF_NVME_HW_QUEUES;
+ } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
+ ql2xnvme_queues, (ha->max_qpairs - 1),
+ (ha->max_qpairs - 1));
+ ql2xnvme_queues = ((ha->max_qpairs - 1));
+ }
qla_nvme_fc_transport.max_hw_queues =
- min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
- (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
+ min((uint8_t)(ql2xnvme_queues),
+ (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
+
+ ql_log(ql_log_info, vha, 0xfffb,
+ "Number of NVME queues used for this port: %d\n",
+ qla_nvme_fc_transport.max_hw_queues);
pinfo.node_name = wwn_to_u64(vha->node_name);
pinfo.port_name = wwn_to_u64(vha->port_name);
pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
pinfo.port_id = vha->d_id.b24;
- ql_log(ql_log_info, vha, 0xffff,
- "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
- pinfo.node_name, pinfo.port_name, pinfo.port_id);
- qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
-
- ret = nvme_fc_register_localport(&pinfo, tmpl,
- get_device(&ha->pdev->dev), &vha->nvme_local_port);
+ mutex_lock(&ha->vport_lock);
+ /*
+ * Check again for nvme_local_port to see if any other thread raced
+ * with this one and finished registration.
+ */
+ if (!vha->nvme_local_port) {
+ ql_log(ql_log_info, vha, 0xffff,
+ "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
+ pinfo.node_name, pinfo.port_name, pinfo.port_id);
+ qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
+
+ ret = nvme_fc_register_localport(&pinfo, tmpl,
+ get_device(&ha->pdev->dev),
+ &vha->nvme_local_port);
+ mutex_unlock(&ha->vport_lock);
+ } else {
+ mutex_unlock(&ha->vport_lock);
+ return 0;
+ }
if (ret) {
ql_log(ql_log_warn, vha, 0xffff,
"register_localport failed: ret=%x\n", ret);
#include "qla_def.h"
#include "qla_dsd.h"
+#define MIN_NVME_HW_QUEUES 1
+#define DEF_NVME_HW_QUEUES 8
+
#define NVME_ATIO_CMD_OFF 32
#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
#define Q2T_NVME_NUM_TAGS 2048
};
/* Device states */
-static char *q_dev_state[] = {
- "Unknown",
- "Cold",
- "Initializing",
- "Ready",
- "Need Reset",
- "Need Quiescent",
- "Failed",
- "Quiescent",
+static const char *const q_dev_state[] = {
+ [QLA8XXX_DEV_UNKNOWN] = "Unknown",
+ [QLA8XXX_DEV_COLD] = "Cold/Re-init",
+ [QLA8XXX_DEV_INITIALIZING] = "Initializing",
+ [QLA8XXX_DEV_READY] = "Ready",
+ [QLA8XXX_DEV_NEED_RESET] = "Need Reset",
+ [QLA8XXX_DEV_NEED_QUIESCENT] = "Need Quiescent",
+ [QLA8XXX_DEV_FAILED] = "Failed",
+ [QLA8XXX_DEV_QUIESCENT] = "Quiescent",
};
-char *qdev_state(uint32_t dev_state)
+const char *qdev_state(uint32_t dev_state)
{
- return q_dev_state[dev_state];
+ return (dev_state < MAX_STATES) ? q_dev_state[dev_state] : "Unknown";
}
/*
ql_log(ql_log_info, vha, 0x00b6,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA8XXX_DEV_INITIALIZING &&
old_dev_state = dev_state;
ql_log(ql_log_info, vha, 0x009b,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
if (loopcount < 5) {
ql_log(ql_log_info, vha, 0x009d,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) :
- "Unknown");
+ dev_state, qdev_state(dev_state));
}
switch (dev_state) {
} else
ql_log(ql_log_info, vha, 0xb031,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
}
/*
#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
/* Every driver should use these Device State */
-#define QLA8XXX_DEV_COLD 1
-#define QLA8XXX_DEV_INITIALIZING 2
-#define QLA8XXX_DEV_READY 3
-#define QLA8XXX_DEV_NEED_RESET 4
-#define QLA8XXX_DEV_NEED_QUIESCENT 5
-#define QLA8XXX_DEV_FAILED 6
-#define QLA8XXX_DEV_QUIESCENT 7
-#define MAX_STATES 8 /* Increment if new state added */
+enum {
+ QLA8XXX_DEV_UNKNOWN,
+ QLA8XXX_DEV_COLD,
+ QLA8XXX_DEV_INITIALIZING,
+ QLA8XXX_DEV_READY,
+ QLA8XXX_DEV_NEED_RESET,
+ QLA8XXX_DEV_NEED_QUIESCENT,
+ QLA8XXX_DEV_FAILED,
+ QLA8XXX_DEV_QUIESCENT,
+ MAX_STATES, /* Increment if new state added */
+};
+
#define QLA8XXX_BAD_VALUE 0xbad0bad0
#define QLA82XX_IDC_VERSION 1
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
"Device state is 0x%x = %s\n",
- dev_state, dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
ql_log(ql_log_warn, vha, 0xb0cf,
"%s: Device Init Failed 0x%x = %s\n",
QLA2XXX_DRIVER_NAME, dev_state,
- dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ qdev_state(dev_state));
qla8044_wr_direct(vha,
QLA8044_CRB_DEV_STATE_INDEX,
QLA8XXX_DEV_FAILED);
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
ql_log(ql_log_info, vha, 0xb0d0,
"Device state is 0x%x = %s\n",
- dev_state, dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
+u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
+module_param(ql2xnvme_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(ql2xnvme_queues,
+ "Number of NVMe Queues that can be configured.\n"
+ "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
+ "1 - Minimum number of queues supported\n"
+ "8 - Default value");
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- sp->free(sp);
+ /* kref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
cmd->result = res;
- CMD_SP(cmd) = NULL;
+ sp->type = 0;
scsi_done(cmd);
if (comp)
complete(comp);
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
cmd->result = res;
- CMD_SP(cmd) = NULL;
+ sp->type = 0;
scsi_done(cmd);
if (comp)
complete(comp);
goto qc24_target_busy;
sp = scsi_cmd_priv(cmd);
+ /* ref: INIT */
qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
-
- CMD_SP(cmd) = (void *)sp;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
return 0;
qc24_host_busy_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
goto qc24_target_busy;
sp = scsi_cmd_priv(cmd);
+ /* ref: INIT */
qla2xxx_init_sp(sp, vha, qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- CMD_SP(cmd) = (void *)sp;
sp->free = qla2xxx_qpair_sp_free_dma;
sp->done = qla2xxx_qpair_sp_compl;
return 0;
qc24_host_busy_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
unsigned long wait_iter = ABORT_WAIT_ITER;
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
struct qla_hw_data *ha = vha->hw;
+ srb_t *sp = scsi_cmd_priv(cmd);
int ret = QLA_SUCCESS;
if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
return ret;
}
- while (CMD_SP(cmd) && wait_iter--) {
+ while (sp->type && wait_iter--)
msleep(ABORT_POLLING_PERIOD);
- }
- if (CMD_SP(cmd))
+ if (sp->type)
ret = QLA_FUNCTION_FAILED;
return ret;
if (ha->mqiobase)
iounmap(ha->mqiobase);
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
- ha->msixbase)
+ if (ha->msixbase)
iounmap(ha->msixbase);
}
}
spin_lock_irqsave(&list->lock, flags);
list_for_each_entry_safe(item, next, &list->head, list) {
list_del(&item->list);
+ if (item == &item->vha->default_item)
+ continue;
kfree(item);
}
spin_unlock_irqrestore(&list->lock, flags);
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
INIT_LIST_HEAD(&vha->qla_cmd_list);
- INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
qla24xx_handle_relogin_event(vha, &ea);
+ } else if (vha->hw->current_topology ==
+ ISP_CFG_NL &&
+ IS_QLA2XXX_MIDTYPE(vha->hw)) {
+ (void)qla24xx_fcport_handle_login(vha,
+ fcport);
} else if (vha->hw->current_topology ==
ISP_CFG_NL) {
fcport->login_retry--;
return do_heartbeat;
}
-static void qla_heart_beat(struct scsi_qla_host *vha)
+static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
{
struct qla_hw_data *ha = vha->hw;
if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
return;
- if (qla_do_heartbeat(vha))
+ /*
+ * dpc thread cannot run if heartbeat is running at the same time.
+ * We also do not want to starve heartbeat task. Therefore, do
+ * heartbeat task at least once every 5 seconds.
+ */
+ if (dpc_started &&
+ time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ))
+ return;
+
+ if (qla_do_heartbeat(vha)) {
+ ha->last_heartbeat_run_jiffies = jiffies;
queue_work(ha->wq, &ha->heartbeat_work);
+ }
}
/**************************************************************************
start_dpc++;
}
+ /* borrowing w to signify dpc will run */
+ w = 0;
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
+ w = 1;
}
- qla_heart_beat(vha);
+ qla_heart_beat(vha, w);
qla2x00_restart_timer(vha, WATCH_INTERVAL);
}
switch (state) {
case pci_channel_io_normal:
- ha->flags.eeh_busy = 0;
+ qla_pci_set_eeh_busy(vha);
if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
"mmio enabled\n");
ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
+
if (IS_QLA82XX(ha))
return PCI_ERS_RESULT_RECOVERED;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, base_vha, 0x803f,
+ "During mmio enabled, PCI/Register disconnect still detected.\n");
+ goto out;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
stat = rd_reg_word(®->hccr);
"RISC paused -- mmio_enabled, Dumping firmware.\n");
qla2xxx_dump_fw(base_vha);
}
+out:
/* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
ql_dbg(ql_dbg_aer, base_vha, 0x600d,
"mmio enabled returning.\n");
ha->flt_region_nvram = start;
break;
case FLT_REG_IMG_PRI_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_img_status_pri = start;
break;
case FLT_REG_IMG_SEC_27XX:
flash_data_addr(ha, faddr), le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7006,
- "Failed slopw write %x (%x)\n", faddr, *dwptr);
+ "Failed slow write %x (%x)\n", faddr, *dwptr);
break;
}
}
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->type = type;
sp->name = "nack";
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_nack_sp_done);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
- sp->done = qla2x00_async_nack_sp_done;
ql_dbg(ql_dbg_disc, vha, 0x20f4,
"Async-%s %8phC hndl %x %s\n",
return rval;
done_free_sp:
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
key = sid_to_key(s_id);
spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- uint32_t op_key;
- u64 op_lun;
-
- op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
- op_lun = scsilun_to_int(
- (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
- if (op_key == key && op_lun == lun)
- op->aborted = true;
- }
-
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
uint32_t op_key;
u64 op_lun;
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
+ res = 0;
goto out_unmap_unlock;
}
((u32)s_id->b.al_pa));
spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
-
- if (op_key == key) {
- op->aborted = true;
- count++;
- }
- }
-
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
if (!QLA_TGT_MODE_ENABLED())
return;
- if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
{
ql_dbg(ql_dbg_misc, vha, 0xd20a,
"%s: reset risc [%lx]\n", __func__, *len);
- if (buf)
- WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
+ if (buf) {
+ if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0x5001,
+ "%s: unable to soft reset\n", __func__);
+ return INVALID_ENTRY;
+ }
+ }
return qla27xx_next_entry(ent);
}
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.200-k"
+#define QLA2XXX_VERSION "10.02.07.400-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 200
+#define QLA_DRIVER_BETA_VER 400
#define IDC_COMP_TOV 5
#define LINK_UP_COMP_TOV 30
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+/*
+ * Note: the data structure below does not have a struct iscsi_cmd member since
+ * the qla4xxx driver does not use libiscsi for SCSI I/O.
+ */
+struct qla4xxx_cmd_priv {
+ struct srb *srb;
+};
+
+static inline struct qla4xxx_cmd_priv *qla4xxx_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/*
- * SCSI Request Block structure (srb) that is placed
- * on cmd->SCp location of every I/O [We have 22 bytes available]
+ * SCSI Request Block structure (srb) that is associated with each scsi_cmnd.
*/
struct srb {
struct list_head list; /* (8) */
.name = DRIVER_NAME,
.proc_name = DRIVER_NAME,
.queuecommand = qla4xxx_queuecommand,
+ .cmd_size = sizeof(struct qla4xxx_cmd_priv),
.eh_abort_handler = qla4xxx_eh_abort,
.eh_device_reset_handler = qla4xxx_eh_device_reset,
struct dev_db_entry *fw_ddb_entry)
{
uint16_t options;
- int rc = 0;
options = le16_to_cpu(fw_ddb_entry->options);
SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
COPY_ISID(fw_ddb_entry->isid, sess->isid);
- return rc;
+ return 0;
}
static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
srb->ddb = ddb_entry;
srb->cmd = cmd;
srb->flags = 0;
- CMD_SP(cmd) = (void *)srb;
+ qla4xxx_cmd_priv(cmd)->srb = srb;
return srb;
}
scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
- CMD_SP(cmd) = NULL;
+ qla4xxx_cmd_priv(cmd)->srb = NULL;
}
void qla4xxx_srb_compl(struct kref *ref)
* the scsi/block layer is going to prevent
* the tag from being released.
*/
- if (cmd != NULL && CMD_SP(cmd))
+ if (cmd != NULL && qla4xxx_cmd_priv(cmd)->srb)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
" start scan\n", ha->host_no, __func__,
ddb_entry->fw_ddb_index);
- scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ queue_work(ddb_entry->sess->workq, &ddb_entry->sess->scan_work);
}
return QLA_SUCCESS;
}
if (!cmd)
return srb;
- srb = (struct srb *)CMD_SP(cmd);
+ srb = qla4xxx_cmd_priv(cmd)->srb;
if (!srb)
return srb;
do {
/* Checking to see if its returned to OS */
- rp = (struct srb *) CMD_SP(cmd);
+ rp = qla4xxx_cmd_priv(cmd)->srb;
if (rp == NULL) {
done++;
break;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- srb = (struct srb *) CMD_SP(cmd);
+ srb = qla4xxx_cmd_priv(cmd)->srb;
if (!srb) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
#include <asm/irq.h>
#include <asm/dma.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "qlogicfas408.h"
/* Set the following to 2 to use normal interrupt (active high/totempole-
#include <asm/irq.h>
#include <asm/dma.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "qlogicfas408.h"
/*----------------------------------------------------------------*/
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * scsi.h Copyright (C) 1992 Drew Eckhardt
- * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
- * generic SCSI package header file by
- * Initial versions: Drew Eckhardt
- * Subsequent revisions: Eric Youngdale
- *
- * <drew@colorado.edu>
- *
- * Modified by Eric Youngdale eric@andante.org to
- * add scatter-gather, multiple outstanding request, and other
- * enhancements.
- */
-/*
- * NOTE: this file only contains compatibility glue for old drivers. All
- * these wrappers will be removed sooner or later. For new code please use
- * the interfaces declared in the headers in include/scsi/
- */
-
-#ifndef _SCSI_H
-#define _SCSI_H
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi.h>
-
-/*
- * Some defs, in case these are not defined elsewhere.
- */
-#ifndef TRUE
-#define TRUE 1
-#endif
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-struct Scsi_Host;
-struct scsi_cmnd;
-struct scsi_device;
-struct scsi_target;
-struct scatterlist;
-
-#endif /* _SCSI_H */
static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
fmode_t mode, unsigned int timeout)
{
- struct scsi_request *sreq;
+ struct scsi_cmnd *scmd;
struct request *rq;
struct bio *bio;
int ret;
return PTR_ERR(rq);
rq->timeout = timeout;
- ret = -ENOMEM;
- sreq = scsi_req(rq);
- sreq->cmd_len = hdr->request_len;
- if (sreq->cmd_len > BLK_MAX_CDB) {
- sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
- if (!sreq->cmd)
- goto out_put_request;
+ scmd = blk_mq_rq_to_pdu(rq);
+ scmd->cmd_len = hdr->request_len;
+ if (scmd->cmd_len > sizeof(scmd->cmnd)) {
+ ret = -EINVAL;
+ goto out_put_request;
}
ret = -EFAULT;
- if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
- goto out_free_cmd;
+ if (copy_from_user(scmd->cmnd, uptr64(hdr->request), scmd->cmd_len))
+ goto out_put_request;
ret = -EPERM;
- if (!scsi_cmd_allowed(sreq->cmd, mode))
- goto out_free_cmd;
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
+ goto out_put_request;
ret = 0;
if (hdr->dout_xfer_len) {
}
if (ret)
- goto out_free_cmd;
+ goto out_put_request;
bio = rq->bio;
blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/*
* fill in all the output members
*/
- hdr->device_status = sreq->result & 0xff;
- hdr->transport_status = host_byte(sreq->result);
+ hdr->device_status = scmd->result & 0xff;
+ hdr->transport_status = host_byte(scmd->result);
hdr->driver_status = 0;
- if (scsi_status_is_check_condition(sreq->result))
+ if (scsi_status_is_check_condition(scmd->result))
hdr->driver_status = DRIVER_SENSE;
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
hdr->response_len = 0;
- if (sreq->sense_len && hdr->response) {
+ if (scmd->sense_len && hdr->response) {
int len = min_t(unsigned int, hdr->max_response_len,
- sreq->sense_len);
+ scmd->sense_len);
- if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
+ if (copy_to_user(uptr64(hdr->response), scmd->sense_buffer,
+ len))
ret = -EFAULT;
else
hdr->response_len = len;
}
if (rq_data_dir(rq) == READ)
- hdr->din_resid = sreq->resid_len;
+ hdr->din_resid = scmd->resid_len;
else
- hdr->dout_resid = sreq->resid_len;
+ hdr->dout_resid = scmd->resid_len;
blk_rq_unmap_user(bio);
-out_free_cmd:
- scsi_req_free_cmd(scsi_req(rq));
out_put_request:
blk_mq_free_request(rq);
return ret;
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2020 Douglas Gilbert
+ * Copyright (C) 2001 - 2021 Douglas Gilbert
*
* For documentation see http://sg.danny.cz/sg/scsi_debug.html
*/
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <linux/hrtimer.h>
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20200710";
+#define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20210520";
#define MY_NAME "scsi_debug"
#define INSUFF_RES_ASC 0x55
#define INSUFF_RES_ASCQ 0x3
#define POWER_ON_RESET_ASCQ 0x0
+#define POWER_ON_OCCURRED_ASCQ 0x1
#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
#define CAPACITY_CHANGED_ASCQ 0x9
#define SDEBUG_OPT_MAC_TIMEOUT 128
#define SDEBUG_OPT_SHORT_TRANSFER 0x100
#define SDEBUG_OPT_Q_NOISE 0x200
-#define SDEBUG_OPT_ALL_TSF 0x400
+#define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
#define SDEBUG_OPT_RARE_TSF 0x800
#define SDEBUG_OPT_N_WCE 0x1000
#define SDEBUG_OPT_RESET_NOISE 0x2000
* priority. The UA numbers should be a sequence starting from 0 with
* SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
-#define SDEBUG_UA_BUS_RESET 1
-#define SDEBUG_UA_MODE_CHANGED 2
-#define SDEBUG_UA_CAPACITY_CHANGED 3
-#define SDEBUG_UA_LUNS_CHANGED 4
-#define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
-#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
-#define SDEBUG_NUM_UAS 7
+#define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
+#define SDEBUG_UA_BUS_RESET 2
+#define SDEBUG_UA_MODE_CHANGED 3
+#define SDEBUG_UA_CAPACITY_CHANGED 4
+#define SDEBUG_UA_LUNS_CHANGED 5
+#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
+#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
+#define SDEBUG_NUM_UAS 8
/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static int sdebug_num_hosts;
+static atomic_t sdebug_num_hosts;
+static DEFINE_MUTEX(add_host_mutex);
+
static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
static bool sdebug_random = DEF_RANDOM;
static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
+static bool sdebug_deflect_incoming;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
static bool sdebug_strict = DEF_STRICT;
static bool sdebug_any_injecting_opt;
+static bool sdebug_no_rwlock;
static bool sdebug_verbose;
static bool have_dif_prot;
static bool write_since_sync;
(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
static const int device_qfull_result =
- (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
+ (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
static const int condition_met_result = SAM_STAT_CONDITION_MET;
if (sdebug_verbose)
cp = "power on reset";
break;
+ case SDEBUG_UA_POOCCUR:
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
+ POWER_ON_OCCURRED_ASCQ);
+ if (sdebug_verbose)
+ cp = "power on occurred";
+ break;
case SDEBUG_UA_BUS_RESET:
mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
BUS_RESET_ASCQ);
return sizeof(ie_l_pg);
}
+static int resp_env_rep_l_spg(unsigned char *arr)
+{
+ unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
+ 0x0, 40, 72, 0xff, 45, 18, 0, 0,
+ 0x1, 0x0, 0x23, 0x8,
+ 0x0, 55, 72, 35, 55, 45, 0, 0,
+ };
+
+ memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
+ return sizeof(env_rep_l_spg);
+}
+
#define SDEBUG_MAX_LSENSE_SZ 512
static int resp_log_sense(struct scsi_cmnd *scp,
arr[n++] = 0xff; /* this page */
arr[n++] = 0xd;
arr[n++] = 0x0; /* Temperature */
+ arr[n++] = 0xd;
+ arr[n++] = 0x1; /* Environment reporting */
+ arr[n++] = 0xd;
+ arr[n++] = 0xff; /* all 0xd subpages */
arr[n++] = 0x2f;
arr[n++] = 0x0; /* Informational exceptions */
+ arr[n++] = 0x2f;
+ arr[n++] = 0xff; /* all 0x2f subpages */
arr[3] = n - 4;
break;
case 0xd: /* Temperature subpages */
n = 4;
arr[n++] = 0xd;
arr[n++] = 0x0; /* Temperature */
+ arr[n++] = 0xd;
+ arr[n++] = 0x1; /* Environment reporting */
+ arr[n++] = 0xd;
+ arr[n++] = 0xff; /* these subpages */
arr[3] = n - 4;
break;
case 0x2f: /* Informational exceptions subpages */
n = 4;
arr[n++] = 0x2f;
arr[n++] = 0x0; /* Informational exceptions */
+ arr[n++] = 0x2f;
+ arr[n++] = 0xff; /* these subpages */
arr[3] = n - 4;
break;
default:
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
return check_condition_result;
}
+ } else if (subpcode > 0) {
+ arr[0] |= 0x40;
+ arr[1] = subpcode;
+ if (pcode == 0xd && subpcode == 1)
+ arr[3] = resp_env_rep_l_spg(arr + 4);
+ else {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+ }
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
return ret;
}
+static inline void
+sdeb_read_lock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __acquire(&sip->macc_lck);
+ else
+ __acquire(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ read_lock(&sip->macc_lck);
+ else
+ read_lock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_read_unlock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __release(&sip->macc_lck);
+ else
+ __release(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ read_unlock(&sip->macc_lck);
+ else
+ read_unlock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_write_lock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __acquire(&sip->macc_lck);
+ else
+ __acquire(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ write_lock(&sip->macc_lck);
+ else
+ write_lock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_write_unlock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __release(&sip->macc_lck);
+ else
+ __release(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ write_unlock(&sip->macc_lck);
+ else
+ write_unlock(&sdeb_fake_rw_lck);
+ }
+}
+
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
int ret;
u64 lba;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
u8 *cmd = scp->cmnd;
switch (cmd[0]) {
return check_condition_result;
}
- read_lock(macc_lckp);
+ sdeb_read_lock(sip);
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
switch (prot_verify_read(scp, lba, num, ei_lba)) {
case 1: /* Guard tag error */
if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
return check_condition_result;
} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
return illegal_condition_result;
}
break;
case 3: /* Reference tag error */
if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
return check_condition_result;
} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
return illegal_condition_result;
}
}
ret = do_device_access(sip, scp, 0, lba, num, false);
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
if (unlikely(ret == -1))
return DID_ERROR << 16;
int ret;
u64 lba;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u8 *cmd = scp->cmnd;
switch (cmd[0]) {
"to DIF device\n");
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
ret = check_device_access_params(scp, lba, num, true);
if (ret) {
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return ret;
}
switch (prot_verify_write(scp, lba, num, ei_lba)) {
case 1: /* Guard tag error */
if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
return illegal_condition_result;
} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
return check_condition_result;
}
break;
case 3: /* Reference tag error */
if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
return illegal_condition_result;
} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
return check_condition_result;
}
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
if (unlikely(-1 == ret))
return DID_ERROR << 16;
else if (unlikely(sdebug_verbose &&
u8 *lrdp = NULL;
u8 *up;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u8 wrprotect;
u16 lbdof, num_lrd, k;
u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
goto err_out;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
sg_off = lbdof_blen;
/* Spec says Buffer xfer Length field in number of LBs in dout */
cum_lb = 0;
}
ret = 0;
err_out_unlock:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
err_out:
kfree(lrdp);
return ret;
int ret;
struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
scp->device->hostdata, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u8 *fs1p;
u8 *fsp;
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
ret = check_device_access_params(scp, lba, num, true);
if (ret) {
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return ret;
}
ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
- write_unlock(&sip->macc_lck);
+ sdeb_write_unlock(sip);
return DID_ERROR << 16;
} else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
out:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return 0;
}
u8 *cmd = scp->cmnd;
u8 *arr;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u64 lba;
u32 dnum;
u32 lb_size = sdebug_sector_size;
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
ret = do_dout_fetch(scp, dnum, arr);
if (ret == -1) {
if (scsi_debug_lbp())
map_region(sip, lba, num);
cleanup:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
kfree(arr);
return retval;
}
unsigned char *buf;
struct unmap_block_desc *desc;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
unsigned int i, payload_len, descriptors;
int ret;
desc = (void *)&buf[8];
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
ret = 0;
out:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
kfree(buf);
return ret;
u32 nblks;
u8 *cmd = scp->cmnd;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u8 *fsp = sip->storep;
if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
rest = block + nblks - sdebug_store_sectors;
/* Try to bring the PRE-FETCH range into CPU's cache */
- read_lock(macc_lckp);
+ sdeb_read_lock(sip);
prefetch_range(fsp + (sdebug_sector_size * block),
(nblks - rest) * sdebug_sector_size);
if (rest)
prefetch_range(fsp, rest * sdebug_sector_size);
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
fini:
if (cmd[1] & 0x2)
res = SDEG_RES_IMMED_MASK;
u8 *arr;
u8 *cmd = scp->cmnd;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
bytchk = (cmd[1] >> 1) & 0x3;
if (bytchk == 0) {
return check_condition_result;
}
/* Not changing store, so only need read access */
- read_lock(macc_lckp);
+ sdeb_read_lock(sip);
ret = do_dout_fetch(scp, a_num, arr);
if (ret == -1) {
goto cleanup;
}
cleanup:
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
kfree(arr);
return ret;
}
u8 *cmd = scp->cmnd;
struct sdeb_zone_state *zsp;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- read_lock(macc_lckp);
+ sdeb_read_lock(sip);
desc = arr + 64;
for (i = 0; i < max_zones; i++) {
ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
fini:
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
kfree(arr);
return ret;
}
struct sdeb_zone_state *zsp;
bool all = cmd[14] & 0x01;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
if (all) {
/* Check if all closed zones can be open */
zbc_open_zone(devip, zsp, true);
fini:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return res;
}
struct sdeb_zone_state *zsp;
bool all = cmd[14] & 0x01;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
if (all) {
zbc_close_all(devip);
zbc_close_zone(devip, zsp);
fini:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return res;
}
u8 *cmd = scp->cmnd;
bool all = cmd[14] & 0x01;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
if (all) {
zbc_finish_all(devip);
zbc_finish_zone(devip, zsp, true);
fini:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return res;
}
u8 *cmd = scp->cmnd;
bool all = cmd[14] & 0x01;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
if (all) {
zbc_rwp_all(devip);
zbc_rwp_zone(devip, zsp);
fini:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return res;
}
return;
}
spin_lock_irqsave(&sqp->qc_lock, iflags);
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
sqcp = &sqp->qc_arr[qc_idx];
scp = sqcp->a_cmnd;
if (unlikely(scp == NULL)) {
open_devip->lun = sdev->lun;
open_devip->sdbg_host = sdbg_host;
atomic_set(&open_devip->num_in_q, 0);
- set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
+ set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
open_devip->used = true;
return open_devip;
}
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
+ if (smp_load_acquire(&sdebug_deflect_incoming)) {
+ pr_info("Exit early due to deflect_incoming\n");
+ return 1;
+ }
if (devip == NULL) {
devip = find_build_dev_info(sdp);
if (devip == NULL)
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
if (sd_dp) {
- l_defer_t = sd_dp->defer_t;
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
} else
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
}
/* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(void)
+static void stop_all_queued(bool done_with_no_conn)
{
unsigned long iflags;
int j, k;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
+ struct scsi_cmnd *scp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
spin_lock_irqsave(&sqp->qc_lock, iflags);
for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
if (test_bit(k, sqp->in_use_bm)) {
sqcp = &sqp->qc_arr[k];
- if (sqcp->a_cmnd == NULL)
+ scp = sqcp->a_cmnd;
+ if (!scp)
continue;
devip = (struct sdebug_dev_info *)
sqcp->a_cmnd->device->hostdata;
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
if (sd_dp) {
- l_defer_t = sd_dp->defer_t;
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
} else
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
stop_qc_helper(sd_dp, l_defer_t);
+ if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
+ scp->result = DID_NO_CONNECT << 16;
+ scsi_done(scp);
+ }
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
}
}
spin_unlock(&sdebug_host_list_lock);
- stop_all_queued();
+ stop_all_queued(false);
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: %d device(s) found\n", __func__, k);
}
}
-static void block_unblock_all_queues(bool block)
+static void sdeb_block_all_queues(void)
{
int j;
struct sdebug_queue *sqp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)block);
+ atomic_set(&sqp->blocked, (int)true);
+}
+
+static void sdeb_unblock_all_queues(void)
+{
+ int j;
+ struct sdebug_queue *sqp;
+
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
+ atomic_set(&sqp->blocked, (int)false);
+}
+
+static void
+sdeb_add_n_hosts(int num_hosts)
+{
+ if (num_hosts < 1)
+ return;
+ do {
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
+
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true /* make new store */);
+ } else {
+ sdebug_do_add_host(false);
+ }
+ } while (--num_hosts);
}
/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
modulo = abs(sdebug_every_nth);
if (modulo < 2)
return;
- block_unblock_all_queues(true);
+ sdeb_block_all_queues();
count = atomic_read(&sdebug_cmnd_count);
atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
- block_unblock_all_queues(false);
+ sdeb_unblock_all_queues();
}
static void clear_queue_stats(void)
return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
}
+static int process_deflect_incoming(struct scsi_cmnd *scp)
+{
+ u8 opcode = scp->cmnd[0];
+
+ if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
+ return 0;
+ return DID_NO_CONNECT << 16;
+}
+
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
/* Complete the processing of the thread that queued a SCSI command to this
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
int scsi_result,
- int (*pfp)(struct scsi_cmnd *,
- struct sdebug_dev_info *),
+ int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
bool new_sd_dp;
}
sdp = cmnd->device;
- if (delta_jiff == 0)
+ if (delta_jiff == 0) {
+ sqp = get_queue(cmnd);
+ if (atomic_read(&sqp->blocked)) {
+ if (smp_load_acquire(&sdebug_deflect_incoming))
+ return process_deflect_incoming(cmnd);
+ else
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
goto respond_in_thread;
+ }
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (smp_load_acquire(&sdebug_deflect_incoming)) {
+ scsi_result = process_deflect_incoming(cmnd);
+ goto respond_in_thread;
+ }
+ if (sdebug_verbose)
+ pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
num_in_q = atomic_read(&devip->num_in_q);
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (scsi_result)
goto respond_in_thread;
- else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
- scsi_result = device_qfull_result;
+ scsi_result = device_qfull_result;
if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
- sdev_printk(KERN_INFO, sdp,
- "%s: max_queue=%d exceeded, %s\n",
- __func__, sdebug_max_queue,
- (scsi_result ? "status: TASK SET FULL" :
- "report: host busy"));
- if (scsi_result)
- goto respond_in_thread;
- else
- return SCSI_MLQUEUE_HOST_BUSY;
+ sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
+ __func__, sdebug_max_queue);
+ goto respond_in_thread;
}
set_bit(k, sqp->in_use_bm);
atomic_inc(&devip->num_in_q);
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
}
- sd_dp->defer_t = SDEB_DEFER_POLL;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
} else {
if (!sd_dp->init_hrt) {
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
}
- sd_dp->defer_t = SDEB_DEFER_HRT;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
/* schedule the invocation of scsi_done() for a later time */
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
}
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
}
- sd_dp->defer_t = SDEB_DEFER_POLL;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
} else {
if (!sd_dp->init_wq) {
sd_dp->qc_idx = k;
INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
}
- sd_dp->defer_t = SDEB_DEFER_WQ;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
}
if (sdebug_statistics)
respond_in_thread: /* call back to mid-layer using invocation thread */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
cmnd->result &= ~SDEG_RES_IMMED_MASK;
- if (cmnd->result == 0 && scsi_result != 0)
+ if (cmnd->result == 0 && scsi_result != 0) {
cmnd->result = scsi_result;
+ if (sdebug_verbose)
+ pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
+ blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
+ }
scsi_done(cmnd);
return 0;
}
S_IRUGO | S_IWUSR);
module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
+MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
int j, k;
struct sdebug_queue *sqp;
- block_unblock_all_queues(true);
+ sdeb_block_all_queues();
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
- block_unblock_all_queues(false);
+ sdeb_unblock_all_queues();
}
return res;
}
int j, k;
struct sdebug_queue *sqp;
- block_unblock_all_queues(true);
+ sdeb_block_all_queues();
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
}
- block_unblock_all_queues(false);
+ sdeb_unblock_all_queues();
}
return res;
}
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SDEBUG_CANQUEUE) &&
(sdebug_host_max_queue == 0)) {
- block_unblock_all_queues(true);
+ sdeb_block_all_queues();
k = 0;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
atomic_set(&retired_max_queue, k + 1);
else
atomic_set(&retired_max_queue, 0);
- block_unblock_all_queues(false);
+ sdeb_unblock_all_queues();
return count;
}
return -EINVAL;
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
}
+static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
+}
+
+static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_no_rwlock = v;
+ return count;
+}
+static DRIVER_ATTR_RW(no_rwlock);
+
/*
* Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
* in range [0, sdebug_host_max_queue), we can't change it.
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
/* absolute number of hosts currently active is what is shown */
- return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
}
+/*
+ * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
+ * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
+ * Returns -EBUSY if another add_host sysfs invocation is active.
+ */
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- bool found;
- unsigned long idx;
- struct sdeb_store_info *sip;
- bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
- if (sscanf(buf, "%d", &delta_hosts) != 1)
+ if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
return -EINVAL;
+ if (sdebug_verbose)
+ pr_info("prior num_hosts=%d, num_to_add=%d\n",
+ atomic_read(&sdebug_num_hosts), delta_hosts);
+ if (delta_hosts == 0)
+ return count;
+ if (mutex_trylock(&add_host_mutex) == 0)
+ return -EBUSY;
if (delta_hosts > 0) {
- do {
- found = false;
- if (want_phs) {
- xa_for_each_marked(per_store_ap, idx, sip,
- SDEB_XA_NOT_IN_USE) {
- sdeb_most_recent_idx = (int)idx;
- found = true;
- break;
- }
- if (found) /* re-use case */
- sdebug_add_host_helper((int)idx);
- else
- sdebug_do_add_host(true);
- } else {
- sdebug_do_add_host(false);
- }
- } while (--delta_hosts);
+ sdeb_add_n_hosts(delta_hosts);
} else if (delta_hosts < 0) {
+ smp_store_release(&sdebug_deflect_incoming, true);
+ sdeb_block_all_queues();
+ if (delta_hosts >= atomic_read(&sdebug_num_hosts))
+ stop_all_queued(true);
do {
+ if (atomic_read(&sdebug_num_hosts) < 1) {
+ free_all_queued();
+ break;
+ }
sdebug_do_remove_host(false);
} while (++delta_hosts);
+ sdeb_unblock_all_queues();
+ smp_store_release(&sdebug_deflect_incoming, false);
}
+ mutex_unlock(&add_host_mutex);
+ if (sdebug_verbose)
+ pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
return count;
}
static DRIVER_ATTR_RW(add_host);
&driver_attr_lun_format.attr,
&driver_attr_max_luns.attr,
&driver_attr_max_queue.attr,
+ &driver_attr_no_rwlock.attr,
&driver_attr_no_uld.attr,
&driver_attr_scsi_level.attr,
&driver_attr_virtual_gb.attr,
sdebug_add_host = 0;
for (k = 0; k < hosts_to_add; k++) {
+ if (smp_load_acquire(&sdebug_deflect_incoming)) {
+ pr_info("exit early as sdebug_deflect_incoming is set\n");
+ return 0;
+ }
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
if (ret < 0) {
}
}
if (sdebug_verbose)
- pr_info("built %d host(s)\n", sdebug_num_hosts);
+ pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
+ /*
+ * Even though all the hosts have been established, due to async device (LU) scanning
+ * by the scsi mid-level, there may still be devices (LUs) being set up.
+ */
return 0;
bus_unreg:
static void __exit scsi_debug_exit(void)
{
- int k = sdebug_num_hosts;
+ int k;
- stop_all_queued();
- for (; k; k--)
+ /* Possible race with LUs still being set up; stop them asap */
+ sdeb_block_all_queues();
+ smp_store_release(&sdebug_deflect_incoming, true);
+ stop_all_queued(false);
+ for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
sdebug_do_remove_host(true);
free_all_queued();
+ if (sdebug_verbose)
+ pr_info("removed %d hosts\n", k);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
+ dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
error = device_register(&sdbg_host->dev);
if (error)
goto clean;
- ++sdebug_num_hosts;
+ atomic_inc(&sdebug_num_hosts);
return 0;
clean:
return;
device_unregister(&sdbg_host->dev);
- --sdebug_num_hosts;
+ atomic_dec(&sdebug_num_hosts);
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
int num_in_q = 0;
struct sdebug_dev_info *devip;
- block_unblock_all_queues(true);
+ sdeb_block_all_queues();
devip = (struct sdebug_dev_info *)sdev->hostdata;
if (NULL == devip) {
- block_unblock_all_queues(false);
+ sdeb_unblock_all_queues();
return -ENODEV;
}
num_in_q = atomic_read(&devip->num_in_q);
sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
__func__, qdepth, num_in_q);
}
- block_unblock_all_queues(false);
+ sdeb_unblock_all_queues();
return sdev->queue_depth;
}
struct sdebug_defer *sd_dp;
sqp = sdebug_q_arr + queue_num;
+ qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (qc_idx >= sdebug_max_queue)
+ return 0;
+
spin_lock_irqsave(&sqp->qc_lock, iflags);
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
if (first) {
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
first = false;
+ if (!test_bit(qc_idx, sqp->in_use_bm))
+ continue;
} else {
qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
}
- if (unlikely(qc_idx >= sdebug_max_queue))
+ if (qc_idx >= sdebug_max_queue)
break;
sqcp = &sqp->qc_arr[qc_idx];
queue_num, qc_idx, __func__);
break;
}
- if (sd_dp->defer_t == SDEB_DEFER_POLL) {
+ if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
if (kt_from_boot < sd_dp->cmpl_ts)
continue;
else
atomic_set(&retired_max_queue, k + 1);
}
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
scsi_done(scp); /* callback to mid level */
- spin_lock_irqsave(&sqp->qc_lock, iflags);
num_entries++;
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
+ break;
}
+
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+
if (num_entries > 0)
atomic_add(num_entries, &sdeb_mq_poll_count);
return num_entries;
void scsi_show_rq(struct seq_file *m, struct request *rq)
{
- struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
int timeout_ms = jiffies_to_msecs(rq->timeout);
- const u8 *const cdb = READ_ONCE(cmd->cmnd);
char buf[80] = "(?)";
- if (cdb)
- __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
+ __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
seq_printf(m, ", .cmd=%s, .retries=%d, .result = %#x, .flags=", buf,
cmd->retries, cmd->result);
scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
if (sshdr->asc == 0x29) {
evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
- sdev_printk(KERN_WARNING, sdev,
- "Power-on or device reset occurred\n");
+ /*
+ * Do not print message if it is an expected side-effect
+ * of runtime PM.
+ */
+ if (!sdev->silence_suspend)
+ sdev_printk(KERN_WARNING, sdev,
+ "Power-on or device reset occurred\n");
}
if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
* @scmd: SCSI command structure to hijack
* @ses: structure to save restore information
* @cmnd: CDB to send. Can be NULL if no new cmnd is needed
- * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB)
+ * @cmnd_size: size in bytes of @cmnd (must be <= MAX_COMMAND_SIZE)
* @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
*
* This function is used to save a scsi command information before re-execution
* command.
*/
ses->cmd_len = scmd->cmd_len;
- ses->cmnd = scmd->cmnd;
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
ses->result = scmd->result;
- ses->resid_len = scmd->req.resid_len;
+ ses->resid_len = scmd->resid_len;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
ses->eh_eflags = scmd->eh_eflags;
scmd->prot_op = SCSI_PROT_NORMAL;
scmd->eh_eflags = 0;
- scmd->cmnd = ses->eh_cmnd;
- memset(scmd->cmnd, 0, BLK_MAX_CDB);
+ memcpy(ses->cmnd, scmd->cmnd, sizeof(ses->cmnd));
+ memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->result = 0;
- scmd->req.resid_len = 0;
+ scmd->resid_len = 0;
if (sense_bytes) {
scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
} else {
scmd->sc_data_direction = DMA_NONE;
if (cmnd) {
- BUG_ON(cmnd_size > BLK_MAX_CDB);
+ BUG_ON(cmnd_size > sizeof(scmd->cmnd));
memcpy(scmd->cmnd, cmnd, cmnd_size);
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
}
* Restore original data
*/
scmd->cmd_len = ses->cmd_len;
- scmd->cmnd = ses->cmnd;
+ memcpy(scmd->cmnd, ses->cmnd, sizeof(ses->cmnd));
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
scmd->result = ses->result;
- scmd->req.resid_len = ses->resid_len;
+ scmd->resid_len = ses->resid_len;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
scmd->eh_eflags = ses->eh_eflags;
"reservation conflict\n");
set_host_byte(scmd, DID_NEXUS_FAILURE);
return SUCCESS; /* causes immediate i/o error */
- default:
- return FAILED;
}
return FAILED;
*/
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
+ struct scsi_cmnd *scmd;
struct request *req;
- struct scsi_request *rq;
req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return;
- rq = scsi_req(req);
+ scmd = blk_mq_rq_to_pdu(req);
- rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
- rq->cmd[1] = 0;
- rq->cmd[2] = 0;
- rq->cmd[3] = 0;
- rq->cmd[4] = SCSI_REMOVAL_PREVENT;
- rq->cmd[5] = 0;
- rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+ scmd->cmnd[0] = ALLOW_MEDIUM_REMOVAL;
+ scmd->cmnd[1] = 0;
+ scmd->cmnd[2] = 0;
+ scmd->cmnd[3] = 0;
+ scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
+ scmd->cmnd[5] = 0;
+ scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
- rq->retries = 5;
+ scmd->allowed = 5;
blk_execute_rq_nowait(req, true, eh_lock_door_done);
}
scmd = (struct scsi_cmnd *)(rq + 1);
scsi_init_command(dev, scmd);
- scmd->cmnd = scsi_req(rq)->cmd;
scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
struct sg_io_hdr *hdr, fmode_t mode)
{
- struct scsi_request *req = scsi_req(rq);
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
if (hdr->cmd_len < 6)
return -EMSGSIZE;
- if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
+ if (copy_from_user(scmd->cmnd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (!scsi_cmd_allowed(req->cmd, mode))
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
return -EPERM;
-
- /*
- * fill in request structure
- */
- req->cmd_len = hdr->cmd_len;
+ scmd->cmd_len = hdr->cmd_len;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
struct bio *bio)
{
- struct scsi_request *req = scsi_req(rq);
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
int r, ret = 0;
/*
* fill in all the output members
*/
- hdr->status = req->result & 0xff;
- hdr->masked_status = status_byte(req->result);
+ hdr->status = scmd->result & 0xff;
+ hdr->masked_status = status_byte(scmd->result);
hdr->msg_status = COMMAND_COMPLETE;
- hdr->host_status = host_byte(req->result);
+ hdr->host_status = host_byte(scmd->result);
hdr->driver_status = 0;
if (scsi_status_is_check_condition(hdr->status))
hdr->driver_status = DRIVER_SENSE;
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
- hdr->resid = req->resid_len;
+ hdr->resid = scmd->resid_len;
hdr->sb_len_wr = 0;
- if (req->sense_len && hdr->sbp) {
- int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
+ if (scmd->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, scmd->sense_len);
- if (!copy_to_user(hdr->sbp, req->sense, len))
+ if (!copy_to_user(hdr->sbp, scmd->sense_buffer, len))
hdr->sb_len_wr = len;
else
ret = -EFAULT;
int writing = 0;
int at_head = 0;
struct request *rq;
- struct scsi_request *req;
+ struct scsi_cmnd *scmd;
struct bio *bio;
if (hdr->interface_id != 'S')
if (hdr->flags & SG_FLAG_Q_AT_HEAD)
at_head = 1;
- ret = -ENOMEM;
rq = scsi_alloc_request(sdev->request_queue, writing ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
- req = scsi_req(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
- if (hdr->cmd_len > BLK_MAX_CDB) {
- req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
- if (!req->cmd)
- goto out_put_request;
+ if (hdr->cmd_len > sizeof(scmd->cmnd)) {
+ ret = -EINVAL;
+ goto out_put_request;
}
ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode);
if (ret < 0)
- goto out_free_cdb;
+ goto out_put_request;
ret = 0;
if (hdr->iovec_count) {
ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
hdr->iovec_count, 0, &iov, &i);
if (ret < 0)
- goto out_free_cdb;
+ goto out_put_request;
/* SG_IO howto says that the shorter of the two wins */
iov_iter_truncate(&i, hdr->dxfer_len);
hdr->dxfer_len, GFP_KERNEL);
if (ret)
- goto out_free_cdb;
+ goto out_put_request;
bio = rq->bio;
- req->retries = 0;
+ scmd->allowed = 0;
start_time = jiffies;
ret = scsi_complete_sghdr_rq(rq, hdr, bio);
-out_free_cdb:
- scsi_req_free_cmd(req);
out_put_request:
blk_mq_free_request(rq);
return ret;
static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
struct scsi_ioctl_command __user *sic)
{
- enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
struct request *rq;
- struct scsi_request *req;
int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ struct scsi_cmnd *scmd;
char *buffer = NULL;
if (!sic)
err = PTR_ERR(rq);
goto error_free_buffer;
}
- req = scsi_req(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
cmdlen = COMMAND_SIZE(opcode);
* get command and data to send to device, if any
*/
err = -EFAULT;
- req->cmd_len = cmdlen;
- if (copy_from_user(req->cmd, sic->data, cmdlen))
+ scmd->cmd_len = cmdlen;
+ if (copy_from_user(scmd->cmnd, sic->data, cmdlen))
goto error;
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
err = -EPERM;
- if (!scsi_cmd_allowed(req->cmd, mode))
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
goto error;
/* default. possible overridden later */
- req->retries = 5;
+ scmd->allowed = 5;
switch (opcode) {
case SEND_DIAGNOSTIC:
case FORMAT_UNIT:
rq->timeout = FORMAT_UNIT_TIMEOUT;
- req->retries = 1;
+ scmd->allowed = 1;
break;
case START_STOP:
rq->timeout = START_STOP_TIMEOUT;
break;
case READ_DEFECT_DATA:
rq->timeout = READ_DEFECT_DATA_TIMEOUT;
- req->retries = 1;
+ scmd->allowed = 1;
break;
default:
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
blk_execute_rq(rq, false);
- err = req->result & 0xff; /* only 8 bit SCSI status */
+ err = scmd->result & 0xff; /* only 8 bit SCSI status */
if (err) {
- if (req->sense_len && req->sense) {
- bytes = (OMAX_SB_LEN > req->sense_len) ?
- req->sense_len : OMAX_SB_LEN;
- if (copy_to_user(sic->data, req->sense, bytes))
+ if (scmd->sense_len && scmd->sense_buffer) {
+ /* limit sense len for backward compatibility */
+ if (copy_to_user(sic->data, scmd->sense_buffer,
+ min(scmd->sense_len, 16U)))
err = -EFAULT;
}
} else {
int *resid)
{
struct request *req;
- struct scsi_request *rq;
+ struct scsi_cmnd *scmd;
int ret;
req = scsi_alloc_request(sdev->request_queue,
if (IS_ERR(req))
return PTR_ERR(req);
- rq = scsi_req(req);
-
if (bufflen) {
ret = blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, GFP_NOIO);
if (ret)
goto out;
}
- rq->cmd_len = COMMAND_SIZE(cmd[0]);
- memcpy(rq->cmd, cmd, rq->cmd_len);
- rq->retries = retries;
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(scmd->cmnd, cmd, scmd->cmd_len);
+ scmd->allowed = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
req->rq_flags |= rq_flags | RQF_QUIET;
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
- if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
- memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
+ if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
+ memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
if (resid)
- *resid = rq->resid_len;
- if (sense && rq->sense_len)
- memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
+ *resid = scmd->resid_len;
+ if (sense && scmd->sense_len)
+ memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
if (sshdr)
- scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
- ret = rq->result;
+ scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
+ sshdr);
+ ret = scmd->result;
out:
blk_mq_free_request(req);
/*
* SG_IO wants current and deferred errors
*/
- scsi_req(req)->sense_len =
- min(8 + cmd->sense_buffer[7],
- SCSI_SENSE_BUFFERSIZE);
+ cmd->sense_len = min(8 + cmd->sense_buffer[7],
+ SCSI_SENSE_BUFFERSIZE);
}
if (sense_current)
*blk_statp = scsi_result_to_blk_status(cmd, result);
if (unlikely(result)) /* a nz result may or may not be an error */
result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
- if (unlikely(blk_rq_is_passthrough(req))) {
- /*
- * scsi_result_to_blk_status may have reset the host_byte
- */
- scsi_req(req)->result = cmd->result;
- }
-
/*
* Next deal with any sectors which we were able to correctly
* handle.
static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- struct scsi_request *req = &cmd->req;
-
- memset(req->__cmd, 0, sizeof(req->__cmd));
- req->cmd = req->__cmd;
- req->cmd_len = BLK_MAX_CDB;
- req->sense_len = 0;
+ memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
+ cmd->cmd_len = MAX_COMMAND_SIZE;
+ cmd->sense_len = 0;
init_rcu_head(&cmd->rcu);
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
- void *buf = cmd->sense_buffer;
- void *prot = cmd->prot_sdb;
struct request *rq = scsi_cmd_to_rq(cmd);
- unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
- unsigned long jiffies_at_alloc;
- int retries, to_clear;
- bool in_flight;
- int budget_token = cmd->budget_token;
-
- if (!blk_rq_is_passthrough(rq) && !(flags & SCMD_INITIALIZED)) {
- flags |= SCMD_INITIALIZED;
+
+ if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) {
+ cmd->flags |= SCMD_INITIALIZED;
scsi_initialize_rq(rq);
}
- jiffies_at_alloc = cmd->jiffies_at_alloc;
- retries = cmd->retries;
- in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /*
- * Zero out the cmd, except for the embedded scsi_request. Only clear
- * the driver-private command data if the LLD does not supply a
- * function to initialize that data.
- */
- to_clear = sizeof(*cmd) - sizeof(cmd->req);
- if (!dev->host->hostt->init_cmd_priv)
- to_clear += dev->host->hostt->cmd_size;
- memset((char *)cmd + sizeof(cmd->req), 0, to_clear);
-
cmd->device = dev;
- cmd->sense_buffer = buf;
- cmd->prot_sdb = prot;
- cmd->flags = flags;
INIT_LIST_HEAD(&cmd->eh_entry);
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- cmd->jiffies_at_alloc = jiffies_at_alloc;
- cmd->retries = retries;
- if (in_flight)
- __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- cmd->budget_token = budget_token;
-
}
static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
}
- cmd->cmd_len = scsi_req(req)->cmd_len;
- cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
- cmd->allowed = scsi_req(req)->retries;
return BLK_STS_OK;
}
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
+ bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
struct scatterlist *sg;
scsi_init_command(sdev, cmd);
+ cmd->eh_eflags = 0;
+ cmd->allowed = 0;
+ cmd->prot_type = 0;
+ cmd->prot_flags = 0;
+ cmd->submitter = 0;
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+ cmd->underflow = 0;
+ cmd->transfersize = 0;
+ cmd->host_scribble = NULL;
+ cmd->result = 0;
+ cmd->extra_len = 0;
+ cmd->state = 0;
+ if (in_flight)
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+
+ /*
+ * Only clear the driver-private command data if the LLD does not supply
+ * a function to initialize that data.
+ */
+ if (!shost->hostt->init_cmd_priv)
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
+
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
cmd->sc_data_direction = rq_dma_dir(req);
return ret;
}
- cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
- memset(cmd->cmnd, 0, BLK_MAX_CDB);
+ memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
-void scsi_done(struct scsi_cmnd *cmd)
+static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly)
{
+ struct request *req = scsi_cmd_to_rq(cmd);
+
switch (cmd->submitter) {
case SUBMITTED_BY_BLOCK_LAYER:
break;
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
return;
trace_scsi_dispatch_cmd_done(cmd);
- blk_mq_complete_request(scsi_cmd_to_rq(cmd));
+
+ if (complete_directly)
+ blk_mq_complete_request_direct(req, scsi_complete);
+ else
+ blk_mq_complete_request(req);
+}
+
+void scsi_done(struct scsi_cmnd *cmd)
+{
+ scsi_done_internal(cmd, false);
}
EXPORT_SYMBOL(scsi_done);
+void scsi_done_direct(struct scsi_cmnd *cmd)
+{
+ scsi_done_internal(cmd, true);
+}
+EXPORT_SYMBOL(scsi_done_direct);
+
static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{
struct scsi_device *sdev = q->queuedata;
ret = BLK_STS_DEV_RESOURCE;
break;
case BLK_STS_AGAIN:
- scsi_req(req)->result = DID_BUS_BUSY << 16;
+ cmd->result = DID_BUS_BUSY << 16;
if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
break;
default:
if (unlikely(!scsi_device_online(sdev)))
- scsi_req(req)->result = DID_NO_CONNECT << 16;
+ cmd->result = DID_NO_CONNECT << 16;
else
- scsi_req(req)->result = DID_ERROR << 16;
+ cmd->result = DID_ERROR << 16;
/*
* Make sure to release all allocated resources when
* we hit an error, as we will never see this command
kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
- cmd->req.sense = cmd->sense_buffer;
if (scsi_host_get_prot(shost)) {
sg = (void *)cmd + sizeof(struct scsi_cmnd) +
char *logbuf;
size_t off = 0, logbuf_len;
- if (!scmd || !scmd->cmnd)
+ if (!scmd)
return;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
char *logbuf;
size_t off, logbuf_len;
- if (!cmd->cmnd)
- return;
-
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
return;
!sdev->host->hostt->change_queue_depth)
return 0;
-#ifdef CONFIG_SCSI_DH
- if (attr == &dev_attr_access_state.attr &&
- !sdev->handler)
- return 0;
- if (attr == &dev_attr_preferred_path.attr &&
- !sdev->handler)
- return 0;
-#endif
return attr->mode;
}
static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
static void fc_bsg_remove(struct request_queue *);
static void fc_bsg_goose_queue(struct fc_rport *);
-static void fc_li_stats_update(struct fc_fn_li_desc *li_desc,
+static void fc_li_stats_update(u16 event_type,
struct fc_fpin_stats *stats);
static void fc_delivery_stats_update(u32 reason_code,
struct fc_fpin_stats *stats);
EXPORT_SYMBOL(fc_find_rport_by_wwpn);
static void
-fc_li_stats_update(struct fc_fn_li_desc *li_desc,
+fc_li_stats_update(u16 event_type,
struct fc_fpin_stats *stats)
{
- stats->li += be32_to_cpu(li_desc->event_count);
- switch (be16_to_cpu(li_desc->event_type)) {
+ stats->li++;
+ switch (event_type) {
case FPIN_LI_UNKNOWN:
- stats->li_failure_unknown +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_failure_unknown++;
break;
case FPIN_LI_LINK_FAILURE:
- stats->li_link_failure_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_link_failure_count++;
break;
case FPIN_LI_LOSS_OF_SYNC:
- stats->li_loss_of_sync_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_loss_of_sync_count++;
break;
case FPIN_LI_LOSS_OF_SIG:
- stats->li_loss_of_signals_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_loss_of_signals_count++;
break;
case FPIN_LI_PRIM_SEQ_ERR:
- stats->li_prim_seq_err_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_prim_seq_err_count++;
break;
case FPIN_LI_INVALID_TX_WD:
- stats->li_invalid_tx_word_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_invalid_tx_word_count++;
break;
case FPIN_LI_INVALID_CRC:
- stats->li_invalid_crc_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_invalid_crc_count++;
break;
case FPIN_LI_DEVICE_SPEC:
- stats->li_device_specific +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_device_specific++;
break;
}
}
struct fc_rport *attach_rport = NULL;
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv;
+ u16 event_type = be16_to_cpu(li_desc->event_type);
u64 wwpn;
rport = fc_find_rport_by_wwpn(shost,
(rport->roles & FC_PORT_ROLE_FCP_TARGET ||
rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
attach_rport = rport;
- fc_li_stats_update(li_desc, &attach_rport->fpin_stats);
+ fc_li_stats_update(event_type, &attach_rport->fpin_stats);
}
if (be32_to_cpu(li_desc->pname_count) > 0) {
rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
if (rport == attach_rport)
continue;
- fc_li_stats_update(li_desc,
+ fc_li_stats_update(event_type,
&rport->fpin_stats);
}
}
}
if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn))
- fc_li_stats_update(li_desc, &fc_host->fpin_stats);
+ fc_li_stats_update(event_type, &fc_host->fpin_stats);
}
/*
};
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
-static struct workqueue_struct *iscsi_eh_timer_workq;
static struct workqueue_struct *iscsi_conn_cleanup_workq;
struct iscsi_cls_host *ihost = shost->shost_data;
memset(ihost, 0, sizeof(*ihost));
- atomic_set(&ihost->nr_scans, 0);
mutex_init(&ihost->mutex);
iscsi_bsg_host_add(shost, ihost);
}
EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
-/**
- * iscsi_scan_finished - helper to report when running scans are done
- * @shost: scsi host
- * @time: scan run time
- *
- * This function can be used by drives like qla4xxx to report to the scsi
- * layer when the scans it kicked off at module load time are done.
- */
-int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
-{
- struct iscsi_cls_host *ihost = shost->shost_data;
- /*
- * qla4xxx will have kicked off some session unblocks before calling
- * scsi_scan_host, so just wait for them to complete.
- */
- return !atomic_read(&ihost->nr_scans);
-}
-EXPORT_SYMBOL_GPL(iscsi_scan_finished);
-
struct iscsi_scan_data {
unsigned int channel;
unsigned int id;
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session, scan_work);
- struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_cls_host *ihost = shost->shost_data;
struct iscsi_scan_data scan_data;
scan_data.channel = 0;
scan_data.rescan = SCSI_SCAN_RESCAN;
iscsi_user_scan_session(&session->dev, &scan_data);
- atomic_dec(&ihost->nr_scans);
}
/**
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
unblock_work);
- struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n");
- /*
- * The recovery and unblock work get run from the same workqueue,
- * so try to cancel it if it was going to run after this unblock.
- */
- cancel_delayed_work(&session->recovery_work);
+
+ cancel_delayed_work_sync(&session->recovery_work);
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_LOGGED_IN;
spin_unlock_irqrestore(&session->lock, flags);
/* start IO */
scsi_target_unblock(&session->dev, SDEV_RUNNING);
- /*
- * Only do kernel scanning if the driver is properly hooked into
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
- if (shost->hostt->scan_finished) {
- if (scsi_queue_work(shost, &session->scan_work))
- atomic_inc(&ihost->nr_scans);
- }
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n");
}
*/
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
- flush_work(&session->block_work);
+ if (!cancel_work_sync(&session->block_work))
+ cancel_delayed_work_sync(&session->recovery_work);
- queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+ queue_work(session->workq, &session->unblock_work);
/*
* Blocking the session can be done from any context so we only
* queue the block work. Make sure the unblock work has completed
scsi_target_block(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
if (session->recovery_tmo >= 0)
- queue_delayed_work(iscsi_eh_timer_workq,
+ queue_delayed_work(session->workq,
&session->recovery_work,
session->recovery_tmo * HZ);
}
void iscsi_block_session(struct iscsi_cls_session *session)
{
- queue_work(iscsi_eh_timer_workq, &session->block_work);
+ queue_work(session->workq, &session->block_work);
}
EXPORT_SYMBOL_GPL(iscsi_block_session);
int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
unsigned long flags;
int id = 0;
int err;
session->sid = atomic_add_return(1, &iscsi_session_nr);
+ session->workq = alloc_workqueue("iscsi_ctrl_%d:%d",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ shost->host_no, session->sid);
+ if (!session->workq)
+ return -ENOMEM;
+
if (target_id == ISCSI_MAX_TARGET) {
id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
iscsi_cls_session_printk(KERN_ERR, session,
"Failure in Target ID Allocation\n");
- return id;
+ err = id;
+ goto destroy_wq;
}
session->target_id = (unsigned int)id;
session->ida_used = true;
release_ida:
if (session->ida_used)
ida_simple_remove(&iscsi_sess_ida, session->target_id);
-
+destroy_wq:
+ destroy_workqueue(session->workq);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_add_session);
{
if (!iscsi_is_conn_dev(dev))
return 0;
- return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+
+ iscsi_remove_conn(iscsi_dev_to_conn(dev));
+ iscsi_put_conn(iscsi_dev_to_conn(dev));
+
+ return 0;
}
void iscsi_remove_session(struct iscsi_cls_session *session)
list_del(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- flush_work(&session->block_work);
- flush_work(&session->unblock_work);
- cancel_delayed_work_sync(&session->recovery_work);
+ if (!cancel_work_sync(&session->block_work))
+ cancel_delayed_work_sync(&session->recovery_work);
+ cancel_work_sync(&session->unblock_work);
/*
* If we are blocked let commands flow again. The lld or iscsi
* layer should set up the queuecommand to fail commands.
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
- /* flush running scans then delete devices */
+ /*
+ * qla4xxx can perform it's own scans when it runs in kernel only
+ * mode. Make sure to flush those scans.
+ */
flush_work(&session->scan_work);
/* flush running unbind operations */
flush_work(&session->unbind_work);
transport_unregister_device(&session->dev);
+ destroy_workqueue(session->workq);
+
ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n");
device_del(&session->dev);
}
EXPORT_SYMBOL_GPL(iscsi_free_session);
/**
- * iscsi_create_conn - create iscsi class connection
+ * iscsi_alloc_conn - alloc iscsi class connection
* @session: iscsi cls session
* @dd_size: private driver data size
* @cid: connection id
- *
- * This can be called from a LLD or iscsi_transport. The connection
- * is child of the session so cid must be unique for all connections
- * on the session.
- *
- * Since we do not support MCS, cid will normally be zero. In some cases
- * for software iscsi we could be trying to preallocate a connection struct
- * in which case there could be two connection structs and cid would be
- * non-zero.
*/
struct iscsi_cls_conn *
-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
{
struct iscsi_transport *transport = session->transport;
struct iscsi_cls_conn *conn;
- unsigned long flags;
- int err;
conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
if (!conn)
goto free_conn;
dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
+ device_initialize(&conn->dev);
conn->dev.parent = &session->dev;
conn->dev.release = iscsi_conn_release;
- err = device_register(&conn->dev);
+
+ return conn;
+
+free_conn:
+ kfree(conn);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_alloc_conn);
+
+/**
+ * iscsi_add_conn - add iscsi class connection
+ * @conn: iscsi cls connection
+ *
+ * This will expose iscsi_cls_conn to sysfs so make sure the related
+ * resources for sysfs attributes are initialized before calling this.
+ */
+int iscsi_add_conn(struct iscsi_cls_conn *conn)
+{
+ int err;
+ unsigned long flags;
+ struct iscsi_cls_session *session = iscsi_dev_to_session(conn->dev.parent);
+
+ err = device_add(&conn->dev);
if (err) {
- iscsi_cls_session_printk(KERN_ERR, session, "could not "
- "register connection's dev\n");
- goto release_parent_ref;
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register connection's dev\n");
+ return err;
}
err = transport_register_device(&conn->dev);
if (err) {
- iscsi_cls_session_printk(KERN_ERR, session, "could not "
- "register transport's dev\n");
- goto release_conn_ref;
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register transport's dev\n");
+ device_del(&conn->dev);
+ return err;
}
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
spin_unlock_irqrestore(&connlock, flags);
- ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
- return conn;
-
-release_conn_ref:
- device_unregister(&conn->dev);
- put_device(&session->dev);
- return NULL;
-release_parent_ref:
- put_device(&session->dev);
-free_conn:
- kfree(conn);
- return NULL;
+ return 0;
}
-
-EXPORT_SYMBOL_GPL(iscsi_create_conn);
+EXPORT_SYMBOL_GPL(iscsi_add_conn);
/**
- * iscsi_destroy_conn - destroy iscsi class connection
- * @conn: iscsi cls session
+ * iscsi_remove_conn - remove iscsi class connection from sysfs
+ * @conn: iscsi cls connection
*
- * This can be called from a LLD or iscsi_transport.
+ * Remove iscsi_cls_conn from sysfs, and wait for previous
+ * read/write of iscsi_cls_conn's attributes in sysfs to finish.
*/
-int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+void iscsi_remove_conn(struct iscsi_cls_conn *conn)
{
unsigned long flags;
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
- ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n");
- device_unregister(&conn->dev);
- return 0;
+ device_del(&conn->dev);
}
-EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+EXPORT_SYMBOL_GPL(iscsi_remove_conn);
void iscsi_put_conn(struct iscsi_cls_conn *conn)
{
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
if (session)
- scsi_queue_work(iscsi_session_to_shost(session),
- &session->unbind_work);
+ queue_work(session->workq, &session->unbind_work);
else
err = -EINVAL;
break;
INIT_LIST_HEAD(&priv->list);
priv->iscsi_transport = tt;
priv->t.user_scan = iscsi_user_scan;
- priv->t.create_work_queue = 1;
priv->dev.class = &iscsi_transport_class;
dev_set_name(&priv->dev, "%s", tt->name);
goto unregister_flashnode_bus;
}
- iscsi_eh_timer_workq = alloc_workqueue("%s",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, "iscsi_eh");
- if (!iscsi_eh_timer_workq) {
- err = -ENOMEM;
- goto release_nls;
- }
-
iscsi_conn_cleanup_workq = alloc_workqueue("%s",
WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
"iscsi_conn_cleanup");
if (!iscsi_conn_cleanup_workq) {
err = -ENOMEM;
- goto destroy_wq;
+ goto release_nls;
}
return 0;
-destroy_wq:
- destroy_workqueue(iscsi_eh_timer_workq);
release_nls:
netlink_kernel_release(nls);
unregister_flashnode_bus:
static void __exit iscsi_transport_exit(void)
{
destroy_workqueue(iscsi_conn_cleanup_workq);
- destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
transport_class_unregister(&iscsi_connection_class);
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_request.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
static DEFINE_IDA(sd_index_ida);
static struct kmem_cache *sd_cdb_cache;
-static mempool_t *sd_cdb_pool;
static mempool_t *sd_page_pool;
static struct lock_class_key sd_bio_compl_lkclass;
* Reporting a maximum number of blocks that is not aligned
* on the device physical size would cause a large write same
* request to be split into physically unaligned chunks by
- * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
- * even if the caller of these functions took care to align the
- * large request. So make sure the maximum reported is aligned
- * to the device physical block size. This is only an optional
- * optimization for regular disks, but this is mandatory to
- * avoid failure of large write same requests directed at
- * sequential write required zones of host-managed ZBC disks.
+ * __blkdev_issue_write_zeroes() even if the caller of this
+ * functions took care to align the large request. So make sure
+ * the maximum reported is aligned to the device physical block
+ * size. This is only an optional optimization for regular
+ * disks, but this is mandatory to avoid failure of large write
+ * same requests directed at sequential write required zones of
+ * host-managed ZBC disks.
*/
sdkp->max_ws_blocks =
round_down(sdkp->max_ws_blocks,
}
out:
- blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
- (logical_block_size >> 9));
blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
}
-/**
- * sd_setup_write_same_cmnd - write the same data to multiple blocks
- * @cmd: command to prepare
- *
- * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
- * the preference indicated by the target device.
- **/
-static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
-{
- struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
- struct bio *bio = rq->bio;
- u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
- u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
- blk_status_t ret;
-
- if (sdkp->device->no_write_same)
- return BLK_STS_TARGET;
-
- BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
-
- rq->timeout = SD_WRITE_SAME_TIMEOUT;
-
- if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
- cmd->cmd_len = 16;
- cmd->cmnd[0] = WRITE_SAME_16;
- put_unaligned_be64(lba, &cmd->cmnd[2]);
- put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
- } else {
- cmd->cmd_len = 10;
- cmd->cmnd[0] = WRITE_SAME;
- put_unaligned_be32(lba, &cmd->cmnd[2]);
- put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
- }
-
- cmd->transfersize = sdp->sector_size;
- cmd->allowed = sdkp->max_retries;
-
- /*
- * For WRITE SAME the data transferred via the DATA OUT buffer is
- * different from the amount of data actually written to the target.
- *
- * We set up __data_len to the amount of data transferred via the
- * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
- * to transfer a single sector of data first, but then reset it to
- * the amount of data to be written right after so that the I/O path
- * knows how much to actually write.
- */
- rq->__data_len = sdp->sector_size;
- ret = scsi_alloc_sgtables(cmd);
- rq->__data_len = blk_rq_bytes(rq);
-
- return ret;
-}
-
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
sector_t lba, unsigned int nr_blocks,
unsigned char flags)
{
- cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
- if (unlikely(cmd->cmnd == NULL))
- return BLK_STS_RESOURCE;
-
cmd->cmd_len = SD_EXT_CDB_SIZE;
- memset(cmd->cmnd, 0, cmd->cmd_len);
-
cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
cmd->cmnd[7] = 0x18; /* Additional CDB len */
cmd->cmnd[9] = write ? WRITE_32 : READ_32;
}
case REQ_OP_WRITE_ZEROES:
return sd_setup_write_zeroes_cmnd(cmd);
- case REQ_OP_WRITE_SAME:
- return sd_setup_write_same_cmnd(cmd);
case REQ_OP_FLUSH:
return sd_setup_flush_cmnd(cmd);
case REQ_OP_READ:
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = scsi_cmd_to_rq(SCpnt);
- u8 *cmnd;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
mempool_free(rq->special_vec.bv_page, sd_page_pool);
-
- if (SCpnt->cmnd != scsi_req(rq)->cmd) {
- cmnd = SCpnt->cmnd;
- SCpnt->cmnd = NULL;
- SCpnt->cmd_len = 0;
- mempool_free(cmnd, sd_cdb_pool);
- }
}
static bool sd_need_revalidate(struct block_device *bdev,
switch (req_op(req)) {
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
return 0;
if (sdkp->WCE && sdkp->media_present) {
- sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+ if (!sdkp->device->silence_suspend)
+ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp, &sshdr);
if (ret) {
}
if (sdkp->device->manage_start_stop) {
- sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ if (!sdkp->device->silence_suspend)
+ sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
/* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
if (ignore_stop_errors)
goto err_out_class;
}
- sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
- if (!sd_cdb_pool) {
- printk(KERN_ERR "sd: can't init extended cdb pool\n");
- err = -ENOMEM;
- goto err_out_cache;
- }
-
sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
if (!sd_page_pool) {
printk(KERN_ERR "sd: can't init discard page pool\n");
err = -ENOMEM;
- goto err_out_ppool;
+ goto err_out_cache;
}
err = scsi_register_driver(&sd_template.gendrv);
err_out_driver:
mempool_destroy(sd_page_pool);
-err_out_ppool:
- mempool_destroy(sd_cdb_pool);
-
err_out_cache:
kmem_cache_destroy(sd_cdb_cache);
SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
scsi_unregister_driver(&sd_template.gendrv);
- mempool_destroy(sd_cdb_pool);
mempool_destroy(sd_page_pool);
kmem_cache_destroy(sd_cdb_cache);
return true;
case REQ_OP_WRITE:
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
return blk_rq_zone_is_seq(rq);
default:
return false;
rq->__sector += sdkp->zones_wp_offset[zno];
fallthrough;
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE:
if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
sdkp->zones_wp_offset[zno] +=
#include <linux/uio.h>
#include <linux/cred.h> /* for sg_check_file_access() */
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/sg.h>
#include "scsi_logging.h"
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
- scsi_req_free_cmd(scsi_req(srp->rq));
blk_mq_free_request(srp->rq);
srp->rq = NULL;
}
static void
sg_rq_end_io(struct request *rq, blk_status_t status)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
struct sg_request *srp = rq->end_io_data;
- struct scsi_request *req = scsi_req(rq);
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
- sense = req->sense;
- result = req->result;
- resid = req->resid_len;
+ sense = scmd->sense_buffer;
+ result = scmd->result;
+ resid = scmd->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
}
}
- if (req->sense_len)
- memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
+ if (scmd->sense_len)
+ memcpy(srp->sense_b, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
/* Rely on write phase to clean out srp status values, so no "else" */
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
- scsi_req_free_cmd(scsi_req(rq));
blk_mq_free_request(rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
{
int res;
struct request *rq;
- struct scsi_request *req;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
- unsigned char *long_cmdp = NULL;
+ struct scsi_cmnd *scmd;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
- if (hp->cmd_len > BLK_MAX_CDB) {
- long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
- if (!long_cmdp)
- return -ENOMEM;
- }
-
/*
* NOTE
*
*/
rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq)) {
- kfree(long_cmdp);
+ if (IS_ERR(rq))
return PTR_ERR(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
+
+ if (hp->cmd_len > sizeof(scmd->cmnd)) {
+ blk_mq_free_request(rq);
+ return -EINVAL;
}
- req = scsi_req(rq);
- if (hp->cmd_len > BLK_MAX_CDB)
- req->cmd = long_cmdp;
- memcpy(req->cmd, cmd, hp->cmd_len);
- req->cmd_len = hp->cmd_len;
+ memcpy(scmd->cmnd, cmd, hp->cmd_len);
+ scmd->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
- req->retries = SG_DEFAULT_RETRIES;
+ scmd->allowed = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
- if (srp->rq) {
- scsi_req_free_cmd(scsi_req(srp->rq));
+ if (srp->rq)
blk_mq_free_request(srp->rq);
- }
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
#include <asm/sgi/ip22.h>
#include <asm/sgi/wd.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
struct ip22_hostdata {
static inline
void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
{
- unsigned long len = cmd->SCp.this_residual;
- void *addr = cmd->SCp.ptr;
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
+ void *addr = scsi_pointer->ptr;
dma_addr_t physaddr;
unsigned long count;
struct hpc_chunk *hcp;
physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din));
- cmd->SCp.dma_handle = physaddr;
+ scsi_pointer->dma_handle = physaddr;
hcp = hd->cpu;
while (len) {
static int dma_setup(struct scsi_cmnd *cmd, int datainp)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host);
struct hpc3_scsiregs *hregs =
(struct hpc3_scsiregs *) cmd->device->host->base;
* obvious). IMHO a better fix would be, not to do these dma setups
* in the first place.
*/
- if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0)
+ if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0)
return 1;
fill_hpc_entries(hdata, cmd, datainp);
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct ip22_hostdata *hdata = host_to_hostdata(instance);
struct hpc3_scsiregs *hregs;
if (!SCpnt)
return;
- if (SCpnt->SCp.ptr == NULL || SCpnt->SCp.this_residual == 0)
+ if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0)
return;
hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base;
barrier();
}
hregs->ctrl = 0;
- dma_unmap_single(hdata->dev, SCpnt->SCp.dma_handle,
- SCpnt->SCp.this_residual,
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
DMA_DIR(hdata->wh.dma_dir));
pr_debug("\n");
.sg_tablesize = SG_ALL,
.cmd_per_lun = 8,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int sgiwd93_probe(struct platform_device *pdev)
struct pqi_device_registers pqi_registers; /* 4000h */
};
-#if ((HZ) < 1000)
-#define PQI_HZ 1000
-#else
-#define PQI_HZ (HZ)
-#endif
-
#define PQI_DEVICE_REGISTERS_OFFSET 0x4000
/* shutdown reasons for taking the controller offline */
#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
-#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_MAX_DATA_DISKS_PER_ROW 128
#define PQI_PHYSICAL_DEVICE_BUS 0
#define PQI_RAID_VOLUME_BUS 1
u8 box[8];
u16 phys_connector[8];
u8 phy_id;
+ u8 ncq_prio_enable;
+ u8 ncq_prio_support;
bool raid_bypass_configured; /* RAID bypass configured */
bool raid_bypass_enabled; /* RAID bypass enabled */
- u32 next_bypass_group;
+ u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
struct raid_map *raid_map; /* RAID bypass map */
u32 max_transfer_encrypted;
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding;
atomic_t raid_bypass_cnt;
- u8 page_83_identifier[16];
};
/* VPD inquiry pages */
bool controller_online;
bool block_requests;
bool scan_blocked;
+ u8 logical_volume_rescan_needed : 1;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
u8 soft_reset_handshake_supported : 1;
u8 raid_iu_timeout_supported : 1;
u8 tmf_iu_timeout_supported : 1;
- u8 unique_wwid_in_report_phys_lun_supported : 1;
u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1;
u8 enable_r1_writes : 1;
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.12-055"
+#define DRIVER_VERSION "2.1.14-035"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 12
-#define DRIVER_REVISION 55
+#define DRIVER_RELEASE 14
+#define DRIVER_REVISION 35
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
+struct pqi_cmd_priv {
+ int this_residual;
+};
+
+static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
+static void pqi_verify_structures(void);
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
static void pqi_ctrl_offline_worker(struct work_struct *work);
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info, bool raid_bypass);
+ struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
displayed_warning = false;
start_jiffies = jiffies;
- warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
while (atomic_read(&ctrl_info->num_busy_threads) >
atomic_read(&ctrl_info->num_blocked_threads)) {
"waiting %u seconds for driver activity to quiesce\n",
jiffies_to_msecs(jiffies - start_jiffies) / 1000);
displayed_warning = true;
- warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
}
pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
}
-#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
+#define PQI_RESCAN_WORK_DELAY (10 * HZ)
static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
{
return rc;
}
-#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
+#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
static void pqi_update_time_worker(struct work_struct *work)
{
for (i = 0; i < num_physicals; i++) {
memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
- memset(&rpl_16byte_wwid_list->lun_entries[i].wwid, 0, 8);
- memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
+ memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
+ memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
device->volume_offline = volume_offline;
}
+#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
sizeof(device->phys_connector));
device->bay = id_phys->phys_bay_in_box;
- memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
- sizeof(device->page_83_identifier));
-
if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
id_phys->phy_count)
device->phy_id =
else
device->phy_id = 0xFF;
+ device->ncq_prio_support =
+ ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
+ PQI_DEVICE_NCQ_PRIO_SUPPORTED);
+
return 0;
}
/* Assumes the SCSI device list lock is held. */
-static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
- struct pqi_scsi_dev *new_device)
+static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
{
existing_device->device_type = new_device->device_type;
existing_device->bus = new_device->bus;
existing_device->target_lun_valid = true;
}
- if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
- existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
- new_device->volume_status == CISS_LV_OK)
+ if (pqi_is_logical_device(existing_device) &&
+ ctrl_info->logical_volume_rescan_needed)
existing_device->rescan = true;
/* By definition, the scsi3addr and wwid fields are already the same. */
sizeof(existing_device->box));
memcpy(existing_device->phys_connector, new_device->phys_connector,
sizeof(existing_device->phys_connector));
- existing_device->next_bypass_group = 0;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
kfree(existing_device->raid_map);
existing_device->raid_map = new_device->raid_map;
existing_device->raid_bypass_configured =
*/
device->new_device = false;
matching_device->device_gone = false;
- pqi_scsi_update_device(matching_device, device);
+ pqi_scsi_update_device(ctrl_info, matching_device, device);
break;
case DEVICE_NOT_FOUND:
/*
}
/*
- * Notify the SCSI ML if the queue depth of any existing device has
- * changed.
+ * Notify the SML of any existing device changes such as;
+ * queue depth, device size.
*/
list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
}
}
}
+
+ ctrl_info->logical_volume_rescan_needed = false;
+
}
static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
scsi3addr[3] |= 0xc0;
}
-static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
+static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
{
- switch (device->device_type) {
- case SA_DEVICE_TYPE_SAS:
- case SA_DEVICE_TYPE_EXPANDER_SMP:
- case SA_DEVICE_TYPE_SES:
- return true;
- }
+ if (pqi_is_logical_device(device))
+ return false;
- return false;
+ return (device->path_map & (device->path_map - 1)) != 0;
}
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
}
-static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct report_phys_lun_16byte_wwid *phys_lun)
-{
- if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
- ctrl_info->rpl_extended_format_4_5_supported ||
- pqi_is_device_with_sas_address(device))
- memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
- else
- memcpy(&device->wwid[8], device->page_83_identifier, 8);
-}
-
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int i;
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
- pqi_set_physical_device_wwid(ctrl_info, device, phys_lun);
+ memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
if ((phys_lun->device_flags &
CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
phys_lun->aio_handle) {
sizeof(device->volume_id));
}
- if (pqi_is_device_with_sas_address(device))
- device->sas_address = get_unaligned_be64(&device->wwid[8]);
+ device->sas_address = get_unaligned_be64(&device->wwid[0]);
new_device_list[num_valid_devices++] = device;
}
struct pqi_scsi_dev *device;
struct pqi_scsi_dev *next;
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-
list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
scsi_device_list_entry) {
if (pqi_is_device_added(device))
pqi_remove_device(ctrl_info, device);
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
list_del(&device->scsi_device_list_entry);
pqi_free_device(device);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (rmd.is_write) {
pqi_calc_aio_r1_nexus(raid_map, &rmd);
} else {
- group = device->next_bypass_group;
+ group = device->next_bypass_group[rmd.map_index];
next_bypass_group = group + 1;
if (next_bypass_group >= rmd.layout_map_count)
next_bypass_group = 0;
- device->next_bypass_group = next_bypass_group;
+ device->next_bypass_group[rmd.map_index] = next_bypass_group;
rmd.map_index += group * rmd.data_disks_per_row;
}
} else if ((device->raid_level == SA_RAID_5 ||
return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
rmd.cdb, rmd.cdb_length, queue_group,
- encryption_info_ptr, true);
+ encryption_info_ptr, true, false);
}
#define PQI_STATUS_IDLE 0x0
u8 status;
pqi_registers = ctrl_info->pqi_registers;
- timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
signature = readq(&pqi_registers->signature);
int residual_count;
int xfer_count;
bool device_offline;
+ struct pqi_scsi_dev *device;
scmd = io_request->scmd;
error_info = io_request->error_info;
host_byte = DID_OK;
sense_data_length = 0;
device_offline = false;
+ device = scmd->device->hostdata;
switch (error_info->service_response) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
- scsi_status = SAM_STAT_GOOD;
- io_request->status = -EAGAIN;
+ if (pqi_is_multipath_device(device)) {
+ pqi_device_remove_start(device);
+ host_byte = DID_NO_CONNECT;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ } else {
+ scsi_status = SAM_STAT_GOOD;
+ io_request->status = -EAGAIN;
+ }
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
u8 status;
unsigned long timeout;
- timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
status = pqi_read_soft_reset_status(ctrl_info);
} else {
ack_event = true;
rescan_needed = true;
+ if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
+ ctrl_info->logical_volume_rescan_needed = true;
}
if (ack_event)
pqi_acknowledge_event(ctrl_info, event);
pqi_ctrl_unbusy(ctrl_info);
}
-#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
+#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
return 0;
}
-#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
admin_queues = &ctrl_info->admin_queues;
oq_ci = admin_queues->oq_ci_copy;
- timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
oq_pi = readl(admin_queues->oq_pi);
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
rc = 0;
break;
}
scsi_dma_unmap(scmd);
if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
set_host_byte(scmd, DID_IMM_RETRY);
- scmd->SCp.this_residual++;
+ pqi_cmd_priv(scmd)->this_residual++;
}
pqi_free_io_request(io_request);
pqi_scsi_done(scmd);
}
+static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
+{
+ bool io_high_prio;
+ int priority_class;
+
+ io_high_prio = false;
+
+ if (device->ncq_prio_enable) {
+ priority_class =
+ IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
+ if (priority_class == IOPRIO_CLASS_RT) {
+ /* Set NCQ priority for read/write commands. */
+ switch (scmd->cmnd[0]) {
+ case WRITE_16:
+ case READ_16:
+ case WRITE_12:
+ case READ_12:
+ case WRITE_10:
+ case READ_10:
+ case WRITE_6:
+ case READ_6:
+ io_high_prio = true;
+ break;
+ }
+ }
+ }
+
+ return io_high_prio;
+}
+
static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
+ bool io_high_prio;
+
+ io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
+
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
- scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
+ scmd->cmnd, scmd->cmd_len, queue_group, NULL,
+ false, io_high_prio);
}
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info, bool raid_bypass)
+ struct pqi_encryption_info *encryption_info, bool raid_bypass,
+ bool io_high_prio)
{
int rc;
struct pqi_io_request *io_request;
io_request->raid_bypass = raid_bypass;
request = io_request->iu;
- memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
+ memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
put_unaligned_le32(aio_handle, &request->nexus_id);
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
if (cdb_length > sizeof(request->cdb))
if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
return false;
- return scmd->SCp.this_residual == 0;
+ return pqi_cmd_priv(scmd)->this_residual == 0;
}
/*
displayed_warning = false;
start_jiffies = jiffies;
- warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
while (1) {
queued_io_count = pqi_queued_io_count(ctrl_info);
"waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
displayed_warning = true;
- warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
}
unsigned long msecs_waiting;
start_jiffies = jiffies;
- warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
pqi_check_ctrl_health(ctrl_info);
"scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
device->lun, msecs_waiting / 1000, cmds_outstanding);
- warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
}
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
rc = 0;
break;
}
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- if (!device || !pqi_is_device_with_sas_address(device)) {
+ if (!device) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
}
+static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ int output_len = 0;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ output_len = snprintf(buf, PAGE_SIZE, "%d\n",
+ device->ncq_prio_enable);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return output_len;
+}
+
+static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u8 ncq_prio_enable = 0;
+
+ if (kstrtou8(buf, 0, &ncq_prio_enable))
+ return -EINVAL;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ if (!device->ncq_prio_support ||
+ !device->is_physical_device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -EINVAL;
+ }
+
+ device->ncq_prio_enable = ncq_prio_enable;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return strlen(buf);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
+static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
+ pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr,
&dev_attr_ssd_smart_path_enabled.attr,
&dev_attr_raid_level.attr,
&dev_attr_raid_bypass_cnt.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL
};
.map_queues = pqi_map_queues,
.sdev_groups = pqi_sdev_groups,
.shost_groups = pqi_shost_groups,
+ .cmd_size = sizeof(struct pqi_cmd_priv),
};
static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
break;
- case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
- ctrl_info->unique_wwid_in_report_phys_lun_supported =
- firmware_feature->enabled;
- break;
case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
ctrl_info->firmware_triage_supported = firmware_feature->enabled;
pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
.feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
.feature_status = pqi_firmware_feature_status,
},
- {
- .feature_name = "Unique WWID in Report Physical LUN",
- .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
- .feature_status = pqi_ctrl_update_feature_flags,
- },
{
.feature_name = "Firmware Triage",
.feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
ctrl_info->enable_r6_writes = false;
ctrl_info->raid_iu_timeout_supported = false;
ctrl_info->tmf_iu_timeout_supported = false;
- ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false;
}
return pqi_revert_to_sis_mode(ctrl_info);
}
+static void pqi_perform_lockup_action(void)
+{
+ switch (pqi_lockup_action) {
+ case PANIC:
+ panic("FATAL: Smart Family Controller lockup detected");
+ break;
+ case REBOOT:
+ emergency_restart();
+ break;
+ case NONE:
+ default:
+ break;
+ }
+}
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
return rc;
}
sis_soft_reset(ctrl_info);
- msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
+ ssleep(PQI_POST_RESET_DELAY_SECS);
} else {
rc = pqi_force_sis_mode(ctrl_info);
if (rc)
* commands.
*/
rc = sis_wait_for_ctrl_ready(ctrl_info);
- if (rc)
+ if (rc) {
+ if (reset_devices) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "kdump init failed with error %d\n", rc);
+ pqi_lockup_action = REBOOT;
+ pqi_perform_lockup_action();
+ }
return rc;
+ }
/*
* Get the controller properties. This allows us to determine
return pqi_ctrl_init_resume(ctrl_info);
}
-static void pqi_perform_lockup_action(void)
-{
- switch (pqi_lockup_action) {
- case PANIC:
- panic("FATAL: Smart Family Controller lockup detected");
- break;
- case REBOOT:
- emergency_restart();
- break;
- case NONE:
- default:
- break;
- }
-}
-
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
.status = SAM_STAT_CHECK_CONDITION,
const struct pci_device_id *id)
{
int rc;
- int node, cp_node;
+ int node;
struct pqi_ctrl_info *ctrl_info;
pqi_print_ctrl_info(pci_dev, id);
node = dev_to_node(&pci_dev->dev);
if (node == NUMA_NO_NODE) {
- cp_node = cpu_to_node(0);
- if (cp_node == NUMA_NO_NODE)
- cp_node = 0;
- set_dev_node(&pci_dev->dev, cp_node);
+ node = cpu_to_node(0);
+ if (node == NUMA_NO_NODE)
+ node = 0;
+ set_dev_node(&pci_dev->dev, node);
}
ctrl_info = pqi_alloc_ctrl_info(node);
{
int rc;
struct pqi_ctrl_info *ctrl_info;
+ enum bmic_flush_cache_shutdown_event shutdown_event;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info) {
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
+ if (system_state == SYSTEM_RESTART)
+ shutdown_event = RESTART;
+ else
+ shutdown_event = SHUTDOWN;
+
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
- rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+ rc = pqi_flush_cache(ctrl_info, shutdown_event);
if (rc)
dev_err(&pci_dev->dev,
"unable to flush controller cache\n");
pqi_process_lockup_action_param();
}
-static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
+#if defined(CONFIG_PM)
+
+static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
+{
+ if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
+ return RESTART;
+
+ return SUSPEND;
+}
+
+static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
{
+ struct pci_dev *pci_dev;
struct pqi_ctrl_info *ctrl_info;
+ pci_dev = to_pci_dev(dev);
ctrl_info = pci_get_drvdata(pci_dev);
pqi_wait_until_ofa_finished(ctrl_info);
pqi_ctrl_block_device_reset(ctrl_info);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_flush_cache(ctrl_info, SUSPEND);
- pqi_stop_heartbeat_timer(ctrl_info);
- pqi_crash_if_pending_command(ctrl_info);
+ if (suspend) {
+ enum bmic_flush_cache_shutdown_event shutdown_event;
- if (state.event == PM_EVENT_FREEZE)
- return 0;
+ shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
+ pqi_flush_cache(ctrl_info, shutdown_event);
+ }
- pci_save_state(pci_dev);
- pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_free_irqs(ctrl_info);
ctrl_info->controller_online = false;
ctrl_info->pqi_mode_enabled = false;
return 0;
}
-static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
+static __maybe_unused int pqi_suspend(struct device *dev)
+{
+ return pqi_suspend_or_freeze(dev, true);
+}
+
+static int pqi_resume_or_restore(struct device *dev)
{
int rc;
+ struct pci_dev *pci_dev;
struct pqi_ctrl_info *ctrl_info;
+ pci_dev = to_pci_dev(dev);
ctrl_info = pci_get_drvdata(pci_dev);
- if (pci_dev->current_state != PCI_D0) {
- ctrl_info->max_hw_queue_index = 0;
- pqi_free_interrupts(ctrl_info);
- pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
- rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
- IRQF_SHARED, DRIVER_NAME_SHORT,
- &ctrl_info->queue_groups[0]);
- if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "irq %u init failed with error %d\n",
- pci_dev->irq, rc);
- return rc;
- }
- pqi_ctrl_unblock_device_reset(ctrl_info);
- pqi_ctrl_unblock_requests(ctrl_info);
- pqi_scsi_unblock_requests(ctrl_info);
- pqi_ctrl_unblock_scan(ctrl_info);
- return 0;
- }
-
- pci_set_power_state(pci_dev, PCI_D0);
- pci_restore_state(pci_dev);
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
pqi_ctrl_unblock_device_reset(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
pqi_scsi_unblock_requests(ctrl_info);
pqi_ctrl_unblock_scan(ctrl_info);
+ ssleep(PQI_POST_RESET_DELAY_SECS);
+
return pqi_ctrl_init_resume(ctrl_info);
}
+static int pqi_freeze(struct device *dev)
+{
+ return pqi_suspend_or_freeze(dev, false);
+}
+
+static int pqi_thaw(struct device *dev)
+{
+ int rc;
+ struct pci_dev *pci_dev;
+ struct pqi_ctrl_info *ctrl_info;
+
+ pci_dev = to_pci_dev(dev);
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
+
+ ctrl_info->controller_online = true;
+ ctrl_info->pqi_mode_enabled = true;
+
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
+
+ return 0;
+}
+
+static int pqi_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev;
+ struct pqi_ctrl_info *ctrl_info;
+ enum bmic_flush_cache_shutdown_event shutdown_event;
+
+ pci_dev = to_pci_dev(dev);
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
+ pqi_flush_cache(ctrl_info, shutdown_event);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pqi_pm_ops = {
+ .suspend = pqi_suspend,
+ .resume = pqi_resume_or_restore,
+ .freeze = pqi_freeze,
+ .thaw = pqi_thaw,
+ .poweroff = pqi_poweroff,
+ .restore = pqi_resume_or_restore,
+};
+
+#endif /* CONFIG_PM */
+
/* Define the PCI IDs for the controllers that we support. */
static const struct pci_device_id pqi_pci_id_table[] = {
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x152d, 0x8a37)
},
- {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- 0x193d, 0x8460)
- },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1104)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1bd4, 0x0054)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0070)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0071)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0072)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1303)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1304)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1380)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1462)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1463)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1470)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1472)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1473)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1474)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1480)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1590, 0x032e)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x036f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0381)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0382)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0383)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1d8d, 0x0800)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1dfc, 0x3161)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f0c, 0x3161)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x5445)
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x5447)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5449)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544f)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0b27)
.remove = pqi_pci_remove,
.shutdown = pqi_shutdown,
#if defined(CONFIG_PM)
- .suspend = pqi_suspend,
- .resume = pqi_resume,
+ .driver = {
+ .pm = &pqi_pm_ops
+ },
#endif
};
int rc;
pr_info(DRIVER_NAME "\n");
+ pqi_verify_structures();
+ sis_verify_structures();
pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
if (!pqi_sas_transport_template)
module_init(pqi_init);
module_exit(pqi_cleanup);
-static void __attribute__((unused)) verify_structures(void)
+static void pqi_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_host_to_ctrl_doorbell) != 0x20);
unsigned long timeout;
u32 status;
- timeout = (timeout_secs * PQI_HZ) + jiffies;
+ timeout = (timeout_secs * HZ) + jiffies;
while (1) {
status = readl(&ctrl_info->registers->sis_firmware_status);
* the top of the loop in order to give the controller time to start
* processing the command before we start polling.
*/
- timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
doorbell = readl(®isters->sis_ctrl_to_host_doorbell);
u32 doorbell_register;
unsigned long timeout;
- timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
doorbell_register =
enum sis_fw_triage_status status;
unsigned long timeout;
- timeout = (SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
status = sis_read_firmware_triage_status(ctrl_info);
if (status == FW_TRIAGE_COND_INVALID) {
return rc;
}
-static void __attribute__((unused)) verify_structures(void)
+void sis_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct sis_base_struct,
revision) != 0x0);
#if !defined(_SMARTPQI_SIS_H)
#define _SMARTPQI_SIS_H
+void sis_verify_structures(void);
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info);
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
{
struct gendisk *disk = cdi->disk;
u32 len = nr * CD_FRAMESIZE_RAW;
- struct scsi_request *req;
+ struct scsi_cmnd *scmd;
struct request *rq;
struct bio *bio;
int ret;
rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
- req = scsi_req(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret)
goto out_put_request;
- req->cmd[0] = GPCMD_READ_CD;
- req->cmd[1] = 1 << 2;
- req->cmd[2] = (lba >> 24) & 0xff;
- req->cmd[3] = (lba >> 16) & 0xff;
- req->cmd[4] = (lba >> 8) & 0xff;
- req->cmd[5] = lba & 0xff;
- req->cmd[6] = (nr >> 16) & 0xff;
- req->cmd[7] = (nr >> 8) & 0xff;
- req->cmd[8] = nr & 0xff;
- req->cmd[9] = 0xf8;
- req->cmd_len = 12;
+ scmd->cmnd[0] = GPCMD_READ_CD;
+ scmd->cmnd[1] = 1 << 2;
+ scmd->cmnd[2] = (lba >> 24) & 0xff;
+ scmd->cmnd[3] = (lba >> 16) & 0xff;
+ scmd->cmnd[4] = (lba >> 8) & 0xff;
+ scmd->cmnd[5] = lba & 0xff;
+ scmd->cmnd[6] = (nr >> 16) & 0xff;
+ scmd->cmnd[7] = (nr >> 8) & 0xff;
+ scmd->cmnd[8] = nr & 0xff;
+ scmd->cmnd[9] = 0xf8;
+ scmd->cmd_len = 12;
rq->timeout = 60 * HZ;
bio = rq->bio;
blk_execute_rq(rq, false);
- if (scsi_req(rq)->result) {
+ if (scmd->result) {
struct scsi_sense_hdr sshdr;
- scsi_normalize_sense(req->sense, req->sense_len,
+ scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
&sshdr);
*last_sense = sshdr.sense_key;
ret = -EIO;
static void st_do_stats(struct scsi_tape *STp, struct request *req)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
ktime_t now;
now = ktime_get();
- if (scsi_req(req)->cmd[0] == WRITE_6) {
+ if (scmd->cmnd[0] == WRITE_6) {
now = ktime_sub(now, STp->stats->write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
atomic64_inc(&STp->stats->write_cnt);
- if (scsi_req(req)->result) {
+ if (scmd->result) {
atomic64_add(atomic_read(&STp->stats->last_write_size)
- STp->buffer->cmdstat.residual,
&STp->stats->write_byte_cnt);
} else
atomic64_add(atomic_read(&STp->stats->last_write_size),
&STp->stats->write_byte_cnt);
- } else if (scsi_req(req)->cmd[0] == READ_6) {
+ } else if (scmd->cmnd[0] == READ_6) {
now = ktime_sub(now, STp->stats->read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
atomic64_inc(&STp->stats->read_cnt);
- if (scsi_req(req)->result) {
+ if (scmd->result) {
atomic64_add(atomic_read(&STp->stats->last_read_size)
- STp->buffer->cmdstat.residual,
&STp->stats->read_byte_cnt);
static void st_scsi_execute_end(struct request *req, blk_status_t status)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
struct st_request *SRpnt = req->end_io_data;
- struct scsi_request *rq = scsi_req(req);
struct scsi_tape *STp = SRpnt->stp;
struct bio *tmp;
- STp->buffer->cmdstat.midlevel_result = SRpnt->result = rq->result;
- STp->buffer->cmdstat.residual = rq->resid_len;
+ STp->buffer->cmdstat.midlevel_result = SRpnt->result = scmd->result;
+ STp->buffer->cmdstat.residual = scmd->resid_len;
st_do_stats(STp, req);
tmp = SRpnt->bio;
- if (rq->sense_len)
- memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
+ if (scmd->sense_len)
+ memcpy(SRpnt->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
if (SRpnt->waiting)
complete(SRpnt->waiting);
int timeout, int retries)
{
struct request *req;
- struct scsi_request *rq;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
struct scsi_tape *STp = SRpnt->stp;
+ struct scsi_cmnd *scmd;
req = scsi_alloc_request(SRpnt->stp->device->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
- rq = scsi_req(req);
+ scmd = blk_mq_rq_to_pdu(req);
req->rq_flags |= RQF_QUIET;
mdata->null_mapped = 1;
}
SRpnt->bio = req->bio;
- rq->cmd_len = COMMAND_SIZE(cmd[0]);
- memset(rq->cmd, 0, BLK_MAX_CDB);
- memcpy(rq->cmd, cmd, rq->cmd_len);
+ scmd->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(scmd->cmnd, cmd, scmd->cmd_len);
req->timeout = timeout;
- rq->retries = retries;
+ scmd->allowed = retries;
req->end_io_data = SRpnt;
blk_execute_rq_nowait(req, true, st_scsi_execute_end);
static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int wanted_len = cmd->SCp.this_residual;
+ int wanted_len = NCR5380_to_ncmd(cmd)->this_residual;
if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
return 0;
.sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int __init sun3_scsi_probe(struct platform_device *pdev)
struct completion *eh_done; /* SCSI error handling */
};
-#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp))
+#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)scsi_cmd_priv(cmd))
#define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
/*
void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
{
struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
- BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd));
if (ucmd->eh_done)
complete(ucmd->eh_done);
.module = THIS_MODULE,
.name = "sym53c8xx",
.info = sym53c8xx_info,
+ .cmd_size = sizeof(struct sym_ucmd),
.queuecommand = sym53c8xx_queue_command,
.slave_alloc = sym53c8xx_slave_alloc,
.slave_configure = sym53c8xx_slave_configure,
/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
#define UFS_UPIU_MAX_WB_LUN_ID 8
+/*
+ * WriteBooster buffer lifetime has a limit setted by vendor.
+ * If it is over the limit, WriteBooster feature will be disabled.
+ */
+#define UFS_WB_EXCEED_LIFETIME 0x0B
+
/* Well known logical unit id in LUN field of UPIU */
enum {
UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
-#define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
-
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- u64 lba;
+ u64 lba = 0;
u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
return;
opcode = cmd->cmnd[0];
- lba = scsi_get_lba(cmd);
if (opcode == READ_10 || opcode == WRITE_10) {
/*
*/
transfer_len =
be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ lba = scsi_get_lba(cmd);
if (opcode == WRITE_10)
group_id = lrbp->cmd->cmnd[6];
} else if (opcode == UNMAP) {
* The number of Bytes to be unmapped beginning with the lba.
*/
transfer_len = blk_rq_bytes(rq);
+ lba = scsi_get_lba(cmd);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
"INVALID MODE",
};
- dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+ /*
+ * Using dev_dbg to avoid messages during runtime PM to avoid
+ * never-ending cycles of messages written back to storage by user space
+ * causing runtime resume, causing more messages and so on.
+ */
+ dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
__func__,
hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
pm_runtime_get_noresume(&sdev->sdev_gendev);
else if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
+ /*
+ * Do not print messages during runtime PM to avoid never-ending cycles
+ * of messages written back to storage by user space causing runtime
+ * resume, causing more messages and so on.
+ */
+ sdev->silence_suspend = 1;
ufshcd_crypto_register(hba, q);
return false;
}
+static void ufshcd_wb_force_disable(struct ufs_hba *hba)
+{
+ if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
+ ufshcd_wb_toggle_flush(hba, false);
+
+ ufshcd_wb_toggle_flush_during_h8(hba, false);
+ ufshcd_wb_toggle(hba, false);
+ hba->caps &= ~UFSHCD_CAP_WB_EN;
+
+ dev_info(hba->dev, "%s: WB force disabled\n", __func__);
+}
+
+static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
+{
+ u32 lifetime;
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
+ index, 0, &lifetime);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (lifetime == UFS_WB_EXCEED_LIFETIME) {
+ dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
+ __func__, lifetime);
+ return false;
+ }
+
+ dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
+ __func__, lifetime);
+
+ return true;
+}
+
static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
{
int ret;
if (!ufshcd_is_wb_allowed(hba))
return false;
+
+ if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
+ ufshcd_wb_force_disable(hba);
+ return false;
+ }
+
/*
* The ufs device needs the vcc to be ON to flush.
* With user-space reduction enabled, it's enough to enable flush
if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
!hba->vreg_info.vccq2) {
- dev_err(hba->dev,
+ /*
+ * Using dev_dbg to avoid messages during runtime PM to avoid
+ * never-ending cycles of messages written back to storage by
+ * user space causing runtime resume, causing more messages and
+ * so on.
+ */
+ dev_dbg(hba->dev,
"%s: Regulator capability was not set, actvIccLevel=%d",
__func__, icc_level);
goto out;
if (!ufshcd_is_wb_allowed(hba))
return;
+
/*
* Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
* UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
if (!d_lu_wb_buf_alloc)
goto wb_disabled;
}
+
+ if (!ufshcd_is_wb_buf_lifetime_available(hba))
+ goto wb_disabled;
+
return;
wb_disabled:
struct ufshpb_req *umap_req,
struct ufshpb_region *rgn)
{
- struct request *req;
- struct scsi_request *rq;
+ struct request *req = umap_req->req;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
- req = umap_req->req;
req->timeout = 0;
- req->end_io_data = (void *)umap_req;
- rq = scsi_req(req);
- ufshpb_set_unmap_cmd(rq->cmd, rgn);
- rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
+ req->end_io_data = umap_req;
+
+ ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
+ scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
{
struct request_queue *q;
struct request *req;
- struct scsi_request *rq;
+ struct scsi_cmnd *scmd;
int mem_size = hpb->srgn_mem_size;
int ret = 0;
int i;
req->end_io_data = map_req;
- rq = scsi_req(req);
-
if (unlikely(last))
mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
- ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
+ scmd = blk_mq_rq_to_pdu(req);
+ ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
map_req->rb.srgn_idx, mem_size);
- rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
+ scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct WD33C93_hostdata *hostdata;
struct scsi_cmnd *tmp;
*/
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = NULL;
- cmd->SCp.this_residual = 0;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->buffers_residual = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
}
/* WD docs state that at the conclusion of a "LEVEL2" command, the
* status byte is stored.
*/
- cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
+ scsi_pointer->Status = ILLEGAL_STATUS_BYTE;
/*
* Add the cmd to the end of 'input_Q'. Note that REQUEST SENSE
static void
wd33c93_execute(struct Scsi_Host *instance)
{
+ struct scsi_pointer *scsi_pointer;
struct WD33C93_hostdata *hostdata =
(struct WD33C93_hostdata *) instance->hostdata;
const wd33c93_regs regs = hostdata->regs;
* to change around and experiment with for now.
*/
- cmd->SCp.phase = 0; /* assume no disconnect */
+ scsi_pointer = WD33C93_scsi_pointer(cmd);
+ scsi_pointer->phase = 0; /* assume no disconnect */
if (hostdata->disconnect == DIS_NEVER)
goto no;
if (hostdata->disconnect == DIS_ALWAYS)
(prev->device->lun != cmd->device->lun)) {
for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev;
prev = (struct scsi_cmnd *) prev->host_scribble)
- prev->SCp.phase = 1;
+ WD33C93_scsi_pointer(prev)->phase = 1;
goto yes;
}
}
goto no;
yes:
- cmd->SCp.phase = 1;
+ scsi_pointer->phase = 1;
#ifdef PROC_STATISTICS
hostdata->disc_allowed_cnt[cmd->device->id]++;
no:
- write_wd33c93(regs, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
+ write_wd33c93(regs, WD_SOURCE_ID, scsi_pointer->phase ? SRCID_ER : 0);
write_wd33c93(regs, WD_TARGET_LUN, (u8)cmd->device->lun);
write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
* up ahead of time.
*/
- if ((cmd->SCp.phase == 0) && (hostdata->no_dma == 0)) {
+ if (scsi_pointer->phase == 0 && hostdata->no_dma == 0) {
if (hostdata->dma_setup(cmd,
(cmd->sc_data_direction == DMA_TO_DEVICE) ?
DATA_OUT_DIR : DATA_IN_DIR))
write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */
else {
write_wd33c93_count(regs,
- cmd->SCp.this_residual);
+ scsi_pointer->this_residual);
write_wd33c93(regs, WD_CONTROL,
CTRL_IDI | CTRL_EDI | hostdata->dma_mode);
hostdata->dma = D_DMA_RUNNING;
*/
DB(DB_EXECUTE,
- printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
+ printk("%s)EX-2 ", scsi_pointer->phase ? "d:" : ""))
}
static void
transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
int data_in_dir)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct WD33C93_hostdata *hostdata;
unsigned long length;
* now we need to setup the next scatter-gather buffer as the
* source or destination for THIS transfer.
*/
- if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (!scsi_pointer->this_residual && scsi_pointer->buffers_residual) {
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ --scsi_pointer->buffers_residual;
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
}
- if (!cmd->SCp.this_residual) /* avoid bogus setups */
+ if (!scsi_pointer->this_residual) /* avoid bogus setups */
return;
write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
#ifdef PROC_STATISTICS
hostdata->pio_cnt++;
#endif
- transfer_pio(regs, (uchar *) cmd->SCp.ptr,
- cmd->SCp.this_residual, data_in_dir, hostdata);
- length = cmd->SCp.this_residual;
- cmd->SCp.this_residual = read_wd33c93_count(regs);
- cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+ transfer_pio(regs, (uchar *) scsi_pointer->ptr,
+ scsi_pointer->this_residual, data_in_dir,
+ hostdata);
+ length = scsi_pointer->this_residual;
+ scsi_pointer->this_residual = read_wd33c93_count(regs);
+ scsi_pointer->ptr += length - scsi_pointer->this_residual;
}
/* We are able to do DMA (in fact, the Amiga hardware is
hostdata->dma_cnt++;
#endif
write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode);
- write_wd33c93_count(regs, cmd->SCp.this_residual);
+ write_wd33c93_count(regs, scsi_pointer->this_residual);
if ((hostdata->level2 >= L2_DATA) ||
- (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ (hostdata->level2 == L2_BASIC && scsi_pointer->phase == 0)) {
write_wd33c93(regs, WD_COMMAND_PHASE, 0x45);
write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
void
wd33c93_intr(struct Scsi_Host *instance)
{
+ struct scsi_pointer *scsi_pointer;
struct WD33C93_hostdata *hostdata =
(struct WD33C93_hostdata *) instance->hostdata;
const wd33c93_regs regs = hostdata->regs;
#endif
cmd = (struct scsi_cmnd *) hostdata->connected; /* assume we're connected */
+ scsi_pointer = WD33C93_scsi_pointer(cmd);
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear the interrupt */
phs = read_wd33c93(regs, WD_COMMAND_PHASE);
*/
if (hostdata->dma == D_DMA_RUNNING) {
DB(DB_TRANSFER,
- printk("[%p/%d:", cmd->SCp.ptr, cmd->SCp.this_residual))
+ printk("[%p/%d:", scsi_pointer->ptr, scsi_pointer->this_residual))
hostdata->dma_stop(cmd->device->host, cmd, 1);
hostdata->dma = D_DMA_OFF;
- length = cmd->SCp.this_residual;
- cmd->SCp.this_residual = read_wd33c93_count(regs);
- cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+ length = scsi_pointer->this_residual;
+ scsi_pointer->this_residual = read_wd33c93_count(regs);
+ scsi_pointer->ptr += length - scsi_pointer->this_residual;
DB(DB_TRANSFER,
- printk("%p/%d]", cmd->SCp.ptr, cmd->SCp.this_residual))
+ printk("%p/%d]", scsi_pointer->ptr, scsi_pointer->this_residual))
}
/* Respond to the specific WD3393 interrupt - there are quite a few! */
/* construct an IDENTIFY message with correct disconnect bit */
hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun);
- if (cmd->SCp.phase)
+ if (scsi_pointer->phase)
hostdata->outgoing_msg[0] |= 0x40;
if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
case CSR_UNEXP | PHS_DATA_IN:
case CSR_SRV_REQ | PHS_DATA_IN:
DB(DB_INTR,
- printk("IN-%d.%d", cmd->SCp.this_residual,
- cmd->SCp.buffers_residual))
+ printk("IN-%d.%d", scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual))
transfer_bytes(regs, cmd, DATA_IN_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
case CSR_UNEXP | PHS_DATA_OUT:
case CSR_SRV_REQ | PHS_DATA_OUT:
DB(DB_INTR,
- printk("OUT-%d.%d", cmd->SCp.this_residual,
- cmd->SCp.buffers_residual))
+ printk("OUT-%d.%d", scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual))
transfer_bytes(regs, cmd, DATA_OUT_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
case CSR_UNEXP | PHS_STATUS:
case CSR_SRV_REQ | PHS_STATUS:
DB(DB_INTR, printk("STATUS="))
- cmd->SCp.Status = read_1_byte(regs);
- DB(DB_INTR, printk("%02x", cmd->SCp.Status))
+ scsi_pointer->Status = read_1_byte(regs);
+ DB(DB_INTR, printk("%02x", scsi_pointer->Status))
if (hostdata->level2 >= L2_BASIC) {
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
udelay(7);
else
hostdata->incoming_ptr = 0;
- cmd->SCp.Message = msg;
+ scsi_pointer->Message = msg;
switch (msg) {
case COMMAND_COMPLETE:
write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
if (phs == 0x60) {
DB(DB_INTR, printk("SX-DONE"))
- cmd->SCp.Message = COMMAND_COMPLETE;
+ scsi_pointer->Message = COMMAND_COMPLETE;
lun = read_wd33c93(regs, WD_TARGET_LUN);
- DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
+ DB(DB_INTR, printk(":%d.%d", scsi_pointer->Status, lun))
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
- cmd->SCp.Status = lun;
+ if (scsi_pointer->Status == ILLEGAL_STATUS_BYTE)
+ scsi_pointer->Status = lun;
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != SAM_STAT_GOOD) {
+ && scsi_pointer->Status != SAM_STAT_GOOD) {
set_host_byte(cmd, DID_ERROR);
} else {
set_host_byte(cmd, DID_OK);
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
- set_status_byte(cmd, cmd->SCp.Status);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
}
scsi_done(cmd);
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
if (cmd->cmnd[0] == REQUEST_SENSE &&
- cmd->SCp.Status != SAM_STAT_GOOD) {
+ scsi_pointer->Status != SAM_STAT_GOOD) {
set_host_byte(cmd, DID_ERROR);
} else {
set_host_byte(cmd, DID_OK);
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
- set_status_byte(cmd, cmd->SCp.Status);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
}
scsi_done(cmd);
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- DB(DB_INTR, printk(":%d", cmd->SCp.Status))
+ DB(DB_INTR, printk(":%d", scsi_pointer->Status))
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != SAM_STAT_GOOD) {
+ && scsi_pointer->Status != SAM_STAT_GOOD) {
set_host_byte(cmd, DID_ERROR);
} else {
set_host_byte(cmd, DID_OK);
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
- set_status_byte(cmd, cmd->SCp.Status);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
}
scsi_done(cmd);
break;
#endif
};
+static inline struct scsi_pointer *WD33C93_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/* defines for hostdata->chip */
dma_unmap_single(&wd->pdev->dev, scb->phys,
sizeof(struct wd719x_scb), DMA_BIDIRECTIONAL);
scsi_dma_unmap(cmd);
- dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ dma_unmap_single(&wd->pdev->dev, scb->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
cmd->result = result << 16;
/* map sense buffer */
scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
- cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&wd->pdev->dev, cmd->SCp.dma_handle))
+ scb->dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&wd->pdev->dev, scb->dma_handle))
goto out_unmap_scb;
- scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
+ scb->sense_buf = cpu_to_le32(scb->dma_handle);
/* request autosense */
scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE;
return 0;
out_unmap_sense:
- dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ dma_unmap_single(&wd->pdev->dev, scb->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
out_unmap_scb:
dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(*scb),
if (err)
goto fail;
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
goto disable_device;
}
u8 flags[2]; /* 62-63 SCB specific flags (local to each thread) */
/* everything below is for driver use (not used by card) */
dma_addr_t phys; /* bus address of the SCB */
+ dma_addr_t dma_handle;
struct scsi_cmnd *cmd; /* a copy of the pointer we were passed */
struct list_head list;
struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */
static struct scsi_host_template zalon7xx_template = {
.module = THIS_MODULE,
.proc_name = "zalon7xx",
+ .cmd_size = sizeof(struct ncr_cmd_priv),
};
static int __init
if (!iscsit_global->ts_bitmap)
goto configfs_out;
+ if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
+ goto bitmap_out;
+ }
+ cpumask_setall(iscsit_global->allowed_cpumask);
+
lio_qr_cache = kmem_cache_create("lio_qr_cache",
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
pr_err("Unable to kmem_cache_create() for"
" lio_qr_cache\n");
- goto bitmap_out;
+ goto cpumask_out;
}
lio_dr_cache = kmem_cache_create("lio_dr_cache",
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
+cpumask_out:
+ free_cpumask_var(iscsit_global->allowed_cpumask);
bitmap_out:
vfree(iscsit_global->ts_bitmap);
configfs_out:
target_unregister_template(&iscsi_ops);
+ free_cpumask_var(iscsit_global->allowed_cpumask);
vfree(iscsit_global->ts_bitmap);
kfree(iscsit_global);
}
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{
int ord, cpu;
+ cpumask_t conn_allowed_cpumask;
+
+ cpumask_and(&conn_allowed_cpumask, iscsit_global->allowed_cpumask,
+ cpu_online_mask);
+
/*
* bitmap_id is assigned from iscsit_global->ts_bitmap from
* within iscsit_start_kthreads()
* iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
- ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
- for_each_online_cpu(cpu) {
+ cpumask_clear(conn->conn_cpumask);
+ ord = conn->bitmap_id % cpumask_weight(&conn_allowed_cpumask);
+ for_each_cpu(cpu, &conn_allowed_cpumask) {
if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask);
return;
cpumask_setall(conn->conn_cpumask);
}
+static void iscsit_thread_reschedule(struct iscsi_conn *conn)
+{
+ /*
+ * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
+ * connection's RX/TX threads update conn->allowed_cpumask.
+ */
+ if (!cpumask_equal(iscsit_global->allowed_cpumask,
+ conn->allowed_cpumask)) {
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_tx_reset_cpumask = 1;
+ conn->conn_rx_reset_cpumask = 1;
+ cpumask_copy(conn->allowed_cpumask,
+ iscsit_global->allowed_cpumask);
+ }
+}
+
+void iscsit_thread_check_cpumask(
+ struct iscsi_conn *conn,
+ struct task_struct *p,
+ int mode)
+{
+ /*
+ * The TX and RX threads maybe call iscsit_thread_check_cpumask()
+ * at the same time. The RX thread might be faster and return from
+ * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
+ * Then the TX thread sets it back to 1.
+ * The next time the RX thread loops, it sees conn_rx_reset_cpumask
+ * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
+ */
+ iscsit_thread_reschedule(conn);
+
+ /*
+ * mode == 1 signals iscsi_target_tx_thread() usage.
+ * mode == 0 signals iscsi_target_rx_thread() usage.
+ */
+ if (mode == 1) {
+ if (!conn->conn_tx_reset_cpumask)
+ return;
+ } else {
+ if (!conn->conn_rx_reset_cpumask)
+ return;
+ }
+
+ /*
+ * Update the CPU mask for this single kthread so that
+ * both TX and RX kthreads are scheduled to run on the
+ * same CPU.
+ */
+ set_cpus_allowed_ptr(p, conn->conn_cpumask);
+ if (mode == 1)
+ conn->conn_tx_reset_cpumask = 0;
+ else
+ conn->conn_rx_reset_cpumask = 0;
+}
+EXPORT_SYMBOL(iscsit_thread_check_cpumask);
+
int
iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
+static ssize_t lio_target_wwn_cpus_allowed_list_show(
+ struct config_item *item, char *page)
+{
+ return sprintf(page, "%*pbl\n",
+ cpumask_pr_args(iscsit_global->allowed_cpumask));
+}
+
+static ssize_t lio_target_wwn_cpus_allowed_list_store(
+ struct config_item *item, const char *page, size_t count)
+{
+ int ret;
+ char *orig;
+ cpumask_t new_allowed_cpumask;
+
+ orig = kstrdup(page, GFP_KERNEL);
+ if (!orig)
+ return -ENOMEM;
+
+ cpumask_clear(&new_allowed_cpumask);
+ ret = cpulist_parse(orig, &new_allowed_cpumask);
+
+ kfree(orig);
+ if (ret != 0)
+ return ret;
+
+ cpumask_copy(iscsit_global->allowed_cpumask, &new_allowed_cpumask);
+ return count;
+}
+
+CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
+
static struct configfs_attribute *lio_target_wwn_attrs[] = {
&lio_target_wwn_attr_lio_version,
+ &lio_target_wwn_attr_cpus_allowed_list,
NULL,
};
goto free_conn_ops;
}
+ if (!zalloc_cpumask_var(&conn->allowed_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->allowed_cpumask\n");
+ goto free_conn_cpumask;
+ }
+
return conn;
+free_conn_cpumask:
+ free_cpumask_var(conn->conn_cpumask);
free_conn_ops:
kfree(conn->conn_ops);
put_transport:
void iscsit_free_conn(struct iscsi_conn *conn)
{
+ free_cpumask_var(conn->allowed_cpumask);
free_cpumask_var(conn->conn_cpumask);
kfree(conn->conn_ops);
iscsit_put_transport(conn->conn_transport);
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
- struct pscsi_plugin_task *pt = cmd->priv;
- unsigned char *cdb;
+ unsigned char *cdb = cmd->priv;
+
/*
- * Special case for REPORT_LUNs handling where pscsi_plugin_task has
- * not been allocated because TCM is handling the emulation directly.
+ * Special case for REPORT_LUNs which is emulated and not passed on.
*/
- if (!pt)
+ if (!cdb)
return;
- cdb = &pt->pscsi_cdb[0];
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
- struct pscsi_plugin_task *pt;
+ struct scsi_cmnd *scmd;
struct request *req;
sense_reason_t ret;
- /*
- * Dynamically alloc cdb space, since it may be larger than
- * TCM_MAX_COMMAND_SIZE
- */
- pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
- if (!pt) {
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
- cmd->priv = pt;
-
- memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
- scsi_command_size(cmd->t_task_cdb));
-
req = scsi_alloc_request(pdv->pdv_sd->request_queue,
cmd->data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(req)) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto fail;
- }
+ if (IS_ERR(req))
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (sgl) {
ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
req->end_io = pscsi_req_done;
req->end_io_data = cmd;
- scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
- scsi_req(req)->cmd = &pt->pscsi_cdb[0];
+
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->cmd_len = scsi_command_size(cmd->t_task_cdb);
+ if (scmd->cmd_len > sizeof(scmd->cmnd)) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto fail_put_request;
+ }
+ memcpy(scmd->cmnd, cmd->t_task_cdb, scmd->cmd_len);
+
if (pdv->pdv_sd->type == TYPE_DISK ||
pdv->pdv_sd->type == TYPE_ZBC)
req->timeout = PS_TIMEOUT_DISK;
else
req->timeout = PS_TIMEOUT_OTHER;
- scsi_req(req)->retries = PS_RETRY;
+ scmd->allowed = PS_RETRY;
+
+ cmd->priv = scmd->cmnd;
blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
pscsi_req_done);
fail_put_request:
blk_mq_free_request(req);
-fail:
- kfree(pt);
return ret;
}
static void pscsi_req_done(struct request *req, blk_status_t status)
{
struct se_cmd *cmd = req->end_io_data;
- struct pscsi_plugin_task *pt = cmd->priv;
- int result = scsi_req(req)->result;
- enum sam_status scsi_status = result & 0xff;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
+ enum sam_status scsi_status = scmd->result & 0xff;
+ u8 *cdb = cmd->priv;
if (scsi_status != SAM_STAT_GOOD) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
- " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
- result);
+ " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
}
- pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense);
+ pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer);
- switch (host_byte(result)) {
+ switch (host_byte(scmd->result)) {
case DID_OK:
target_complete_cmd_with_length(cmd, scsi_status,
- cmd->data_length - scsi_req(req)->resid_len);
+ cmd->data_length - scmd->resid_len);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
- " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
- result);
+ " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
blk_mq_free_request(req);
- kfree(pt);
}
static const struct target_backend_ops pscsi_ops = {
struct scsi_device;
struct Scsi_Host;
-struct pscsi_plugin_task {
- unsigned char pscsi_cdb[0];
-} ____cacheline_aligned;
-
#define PDF_HAS_CHANNEL_ID 0x01
#define PDF_HAS_TARGET_ID 0x02
#define PDF_HAS_LUN_ID 0x04
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
/* For mailbox plus cmd ring, the size is fixed 8MB */
-#define MB_CMDR_SIZE (8 * 1024 * 1024)
+#define MB_CMDR_SIZE_DEF (8 * 1024 * 1024)
/* Offset of cmd ring is size of mailbox */
-#define CMDR_OFF sizeof(struct tcmu_mailbox)
-#define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF)
+#define CMDR_OFF ((__u32)sizeof(struct tcmu_mailbox))
+#define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF)
/*
* For data area, the default block size is PAGE_SIZE and
udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
+ udev->cmdr_size = CMDR_SIZE_DEF;
udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
mutex_init(&udev->cmdr_lock);
goto err_bitmap_alloc;
}
- mb = vzalloc(MB_CMDR_SIZE);
+ mb = vzalloc(udev->cmdr_size + CMDR_OFF);
if (!mb) {
ret = -ENOMEM;
goto err_vzalloc;
/* mailbox fits in first part of CMDR space */
udev->mb_addr = mb;
udev->cmdr = (void *)mb + CMDR_OFF;
- udev->cmdr_size = CMDR_SIZE;
- udev->data_off = MB_CMDR_SIZE;
+ udev->data_off = udev->cmdr_size + CMDR_OFF;
data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
- udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT;
+ udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT;
udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
udev->dbi_thresh = 0; /* Default in Idle state */
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
- info->mem[0].size = data_size + MB_CMDR_SIZE;
+ info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF;
info->mem[0].memtype = UIO_MEM_NONE;
info->irqcontrol = tcmu_irqcontrol;
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
- Opt_err,
+ Opt_cmd_ring_size_mb, Opt_err,
};
static match_table_t tokens = {
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
+ {Opt_cmd_ring_size_mb, "cmd_ring_size_mb=%d"},
{Opt_err, NULL}
};
return ret;
}
+static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for cmd_ring_size_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid cmd_ring_size_mb %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set cmd_ring_size_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->cmdr_size = (val << 20) - CMDR_OFF;
+ if (val > (MB_CMDR_SIZE_DEF >> 20)) {
+ pr_err("%d is too large. Adjusting cmd_ring_size_mb to global limit of %u\n",
+ val, (MB_CMDR_SIZE_DEF >> 20));
+ udev->cmdr_size = CMDR_SIZE_DEF;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
case Opt_data_pages_per_blk:
ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
break;
+ case Opt_cmd_ring_size_mb:
+ ret = tcmu_set_cmd_ring_size(udev, &args[0]);
+ break;
default:
break;
}
udev->dev_config[0] ? udev->dev_config : "NULL");
bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
- bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk);
+ bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk);
+ bl += sprintf(b + bl, "CmdRingSizeMB: %u\n",
+ (udev->cmdr_size + CMDR_OFF) >> 20);
return bl;
}
}
CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
+static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ (udev->cmdr_size + CMDR_OFF) >> 20);
+}
+CONFIGFS_ATTR_RO(tcmu_, cmd_ring_size_mb);
+
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
&tcmu_attr_qfull_time_out,
&tcmu_attr_max_data_area_mb,
&tcmu_attr_data_pages_per_blk,
+ &tcmu_attr_cmd_ring_size_mb,
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
#include <linux/spinlock.h>
#include <linux/usb.h>
#include <linux/proc_fs.h>
-
#include <linux/atomic.h>
#include <linux/blkdev.h>
-#include "../../scsi/scsi.h"
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "microtek.h"
* but reading register selected in srb->cmnd[4]
*/
srb->cmd_len = 16;
- srb->cmnd = ses.cmnd;
srb->cmnd[2] = 1;
usb_stor_transparent_scsi_command(srb, us);
#include "usb.h"
#include "debug.h"
-#include "scsi.h"
void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb)
/* maximum number of LUNs supported */
unsigned char MaxLUNs;
- unsigned char cmnd[BLK_MAX_CDB];
+ unsigned char cmnd[MAX_COMMAND_SIZE];
struct scsi_cmnd srb;
struct scatterlist sg;
};
int status;
memset(&ata, 0, sizeof(ata));
- srb->cmnd = info->cmnd;
+ memcpy(srb->cmnd, info->cmnd, MAX_COMMAND_SIZE);
srb->device = &srb_dev;
ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
continue;
cmnd = devinfo->cmnd[i];
- cmdinfo = (void *)&cmnd->SCp;
+ cmdinfo = scsi_cmd_priv(cmnd);
if (!(cmdinfo->state & IS_IN_WORK_LIST))
continue;
dev_dbg(&devinfo->intf->dev, "scan complete\n");
}
-static void uas_add_work(struct uas_cmd_info *cmdinfo)
+static void uas_add_work(struct scsi_cmnd *cmnd)
{
- struct scsi_pointer *scp = (void *)cmdinfo;
- struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct uas_dev_info *devinfo = cmnd->device->hostdata;
lockdep_assert_held(&devinfo->lock);
continue;
cmnd = devinfo->cmnd[i];
- cmdinfo = (void *)&cmnd->SCp;
+ cmdinfo = scsi_cmd_priv(cmnd);
uas_log_cmd_state(cmnd, __func__, 0);
/* Sense urbs were killed, clear COMMAND_INFLIGHT manually */
cmdinfo->state &= ~COMMAND_INFLIGHT;
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status)
{
- struct uas_cmd_info *ci = (void *)&cmnd->SCp;
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *ci = scsi_cmd_priv(cmnd);
if (status == -ENODEV) /* too late */
return;
scmd_printk(KERN_INFO, cmnd,
"%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
- prefix, status, cmdinfo->uas_tag,
+ prefix, status, ci->uas_tag,
(ci->state & SUBMIT_STATUS_URB) ? " s-st" : "",
(ci->state & ALLOC_DATA_IN_URB) ? " a-in" : "",
(ci->state & SUBMIT_DATA_IN_URB) ? " s-in" : "",
if (!cmnd)
return;
- cmdinfo = (void *)&cmnd->SCp;
+ cmdinfo = scsi_cmd_priv(cmnd);
if (cmdinfo->state & SUBMIT_CMD_URB)
usb_free_urb(cmdinfo->cmd_urb);
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
{
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
lockdep_assert_held(&devinfo->lock);
static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
unsigned direction)
{
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
int err;
cmdinfo->state |= direction | SUBMIT_STATUS_URB;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata);
if (err) {
- uas_add_work(cmdinfo);
+ uas_add_work(cmnd);
}
}
}
cmnd = devinfo->cmnd[idx];
- cmdinfo = (void *)&cmnd->SCp;
+ cmdinfo = scsi_cmd_priv(cmnd);
if (!(cmdinfo->state & COMMAND_INFLIGHT)) {
uas_log_cmd_state(cmnd, "unexpected status cmplt", 0);
static void uas_data_cmplt(struct urb *urb)
{
struct scsi_cmnd *cmnd = urb->context;
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned long flags;
enum dma_data_direction dir)
{
struct usb_device *udev = devinfo->udev;
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct urb *urb = usb_alloc_urb(0, gfp);
struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned int pipe = (dir == DMA_FROM_DEVICE)
struct scsi_cmnd *cmnd)
{
struct usb_device *udev = devinfo->udev;
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct urb *urb = usb_alloc_urb(0, gfp);
struct sense_iu *iu;
{
struct usb_device *udev = devinfo->udev;
struct scsi_device *sdev = cmnd->device;
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct urb *urb = usb_alloc_urb(0, gfp);
struct command_iu *iu;
int len;
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo)
{
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct urb *urb;
int err;
{
struct scsi_device *sdev = cmnd->device;
struct uas_dev_info *devinfo = sdev->hostdata;
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
unsigned long flags;
int idx, err;
- BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
-
/* Re-check scsi_block_requests now that we've the host-lock */
if (cmnd->device->host->host_self_blocked)
return SCSI_MLQUEUE_DEVICE_BUSY;
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
- uas_add_work(cmdinfo);
+ uas_add_work(cmnd);
}
devinfo->cmnd[idx] = cmnd;
*/
static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
{
- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct urb *data_in_urb = NULL;
struct urb *data_out_urb = NULL;
.this_id = -1,
.skip_settle_delay = 1,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct uas_cmd_info),
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
if (srb) {
usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
srb->result);
- scsi_done(srb);
+ scsi_done_direct(srb);
}
} /* for (;;) */
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_SAME ||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return 0;
- case REQ_OP_WRITE_SAME:
- return 1;
default:
break;
}
REQ_OP_DISCARD = 3,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = 5,
- /* write the same sector many times */
- REQ_OP_WRITE_SAME = 7,
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 9,
/* Open a zone */
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
- unsigned int max_write_same_sectors;
unsigned int max_write_zeroes_sectors;
unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
return min(q->limits.max_discard_sectors,
UINT_MAX >> SECTOR_SHIFT);
- if (unlikely(op == REQ_OP_WRITE_SAME))
- return q->limits.max_write_same_sectors;
-
if (unlikely(op == REQ_OP_WRITE_ZEROES))
return q->limits.max_write_zeroes_sectors;
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
-extern void blk_queue_max_write_same_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_io_schedule(void);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
-
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
return q->limits.discard_alignment;
}
-static inline unsigned int bdev_write_same(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_same_sectors;
-
- return 0;
-}
-
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
#define _BLK_BSG_
#include <linux/blkdev.h>
-#include <scsi/scsi_request.h>
struct bsg_job;
struct request;
*/
unsigned num_secure_erase_bios;
- /*
- * The number of WRITE SAME bios that will be submitted to the target.
- * The bio number can be accessed with dm_bio_get_target_bio_nr.
- */
- unsigned num_write_same_bios;
-
/*
* The number of WRITE ZEROES bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
struct fc_fdmi_attr_entry {
__be16 type;
__be16 len;
- __u8 value[1];
+ __u8 value[];
} __attribute__((__packed__));
/*
*/
struct fs_fdmi_attrs {
__be32 numattrs;
- struct fc_fdmi_attr_entry attr[1];
+ struct fc_fdmi_attr_entry attr[];
} __attribute__((__packed__));
/*
struct completion tm_done;
} ____cacheline_aligned_in_smp;
+/*
+ * @fsp should be tested and set under the scsi_pkt_queue lock
+ */
+struct libfc_cmd_priv {
+ struct fc_fcp_pkt *fsp;
+ u32 resid_len;
+ u8 status;
+};
+
/*
* Structure and function definitions for managing Fibre Channel Exchanges
* and Sequences
#include <linux/refcount.h>
#include <scsi/iscsi_proto.h>
#include <scsi/iscsi_if.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport_iscsi.h>
struct scsi_transport_template;
task->state == ISCSI_TASK_ABRT_SESS_RECOV;
}
+/* Private data associated with struct scsi_cmnd. */
+struct iscsi_cmd {
+ struct iscsi_task *task;
+ int age;
+};
+
+static inline struct iscsi_cmd *iscsi_cmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/* Connection's states */
enum {
ISCSI_CONN_INITIAL_STAGE,
int state;
struct workqueue_struct *workq;
- char workq_name[20];
};
/*
SAS_INTERRUPTED,
SAS_QUEUE_FULL,
SAS_DEVICE_UNKNOWN,
- SAS_SG_ERR,
SAS_OPEN_REJECT,
SAS_OPEN_TO,
SAS_PROTO_RESPONSE,
u8 stp_affil_pol:1;
u8 device_control_reg_update:1;
+
+ bool force_phy;
+ int force_phy_id;
+};
+
+/* LLDDs rely on these values */
+enum sas_internal_abort {
+ SAS_INTERNAL_ABORT_SINGLE = 0,
+ SAS_INTERNAL_ABORT_DEV = 1,
+};
+
+struct sas_internal_abort_task {
+ enum sas_internal_abort type;
+ unsigned int qid;
+ u16 tag;
};
struct sas_smp_task {
struct scsi_cmnd *cmd;
};
+struct sas_tmf_task {
+ u8 tmf;
+ u16 tag_of_task_to_be_managed;
+};
+
struct sas_task {
struct domain_device *dev;
struct sas_ata_task ata_task;
struct sas_smp_task smp_task;
struct sas_ssp_task ssp_task;
+ struct sas_internal_abort_task abort_task;
};
struct scatterlist *scatter;
void *lldd_task; /* for use by LLDDs */
void *uldd_task;
struct sas_task_slow *slow_task;
+ struct sas_tmf_task *tmf;
};
struct sas_task_slow {
#define SAS_TASK_STATE_DONE 2
#define SAS_TASK_STATE_ABORTED 4
#define SAS_TASK_NEED_DEV_RESET 8
-#define SAS_TASK_AT_INITIATOR 16
extern struct sas_task *sas_alloc_task(gfp_t flags);
extern struct sas_task *sas_alloc_slow_task(gfp_t flags);
extern void sas_free_task(struct sas_task *task);
+static inline bool sas_is_internal_abort(struct sas_task *task)
+{
+ return task->task_proto == SAS_PROTOCOL_INTERNAL_ABORT;
+}
+
struct sas_domain_function_template {
/* The class calls these to notify the LLDD of an event. */
void (*lldd_port_formed)(struct asd_sas_phy *);
/* Task Management Functions. Must be called from process context. */
int (*lldd_abort_task)(struct sas_task *);
int (*lldd_abort_task_set)(struct domain_device *, u8 *lun);
- int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
int (*lldd_I_T_nexus_reset)(struct domain_device *);
int (*lldd_ata_check_ready)(struct domain_device *);
int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
int (*lldd_query_task)(struct sas_task *);
+ /* Special TMF callbacks */
+ void (*lldd_tmf_exec_complete)(struct domain_device *dev);
+ void (*lldd_tmf_aborted)(struct sas_task *task);
+ bool (*lldd_abort_timeout)(struct sas_task *task, void *data);
+
/* Port and Adapter management */
int (*lldd_clear_nexus_port)(struct asd_sas_port *);
int (*lldd_clear_nexus_ha)(struct sas_ha_struct *);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
extern int sas_bios_param(struct scsi_device *, struct block_device *,
sector_t capacity, int *hsc);
+int sas_execute_internal_abort_single(struct domain_device *device,
+ u16 tag, unsigned int qid,
+ void *data);
+int sas_execute_internal_abort_dev(struct domain_device *device,
+ unsigned int qid, void *data);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
extern struct device_attribute dev_attr_phy_event_threshold;
void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *);
-int sas_discover_event(struct asd_sas_port *, enum discover_event ev);
+void sas_discover_event(struct asd_sas_port *, enum discover_event ev);
int sas_discover_sata(struct domain_device *);
int sas_discover_end_dev(struct domain_device *);
int sas_request_addr(struct Scsi_Host *shost, u8 *addr);
-int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
- gfp_t gfp_flags);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
- gfp_t gfp_flags);
+int sas_abort_task_set(struct domain_device *dev, u8 *lun);
+int sas_clear_task_set(struct domain_device *dev, u8 *lun);
+int sas_lu_reset(struct domain_device *dev, u8 *lun);
+int sas_query_task(struct sas_task *task, u16 tag);
+int sas_abort_task(struct sas_task *task, u16 tag);
+
+void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
+ gfp_t gfp_flags);
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t gfp_flags);
#endif /* _SASLIB_H_ */
SAS_PROTOCOL_SSP = 0x08,
SAS_PROTOCOL_ALL = 0x0E,
SAS_PROTOCOL_STP_ALL = SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA,
+ /* these are internal to libsas */
+ SAS_PROTOCOL_INTERNAL_ABORT = 0x10,
};
/* From the spec; local phys only */
SAS_GPIO_REG_TX_GP = 4,
};
+/* Response frame DATAPRES field */
+enum {
+ SAS_DATAPRES_NO_DATA = 0,
+ SAS_DATAPRES_RESPONSE_DATA = 1,
+ SAS_DATAPRES_SENSE_DATA = 2,
+};
+
struct dev_to_host_fis {
u8 fis_type; /* 0x34 */
u8 flags;
int sas_ata_init(struct domain_device *dev);
void sas_ata_task_abort(struct sas_task *task);
void sas_ata_strategy_handler(struct Scsi_Host *shost);
-void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q);
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q);
void sas_ata_schedule_reset(struct domain_device *dev);
void sas_ata_wait_eh(struct domain_device *dev);
void sas_probe_sata(struct asd_sas_port *port);
void sas_suspend_sata(struct asd_sas_port *port);
void sas_resume_sata(struct asd_sas_port *port);
void sas_ata_end_eh(struct ata_port *ap);
+int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ int force_phy_id);
#else
{
}
-static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q)
+static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
{
}
static inline void sas_ata_end_eh(struct ata_port *ap)
{
}
+
+static inline int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ int force_phy_id)
+{
+ return 0;
+}
#endif
#endif /* _SAS_ATA_H_ */
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
-#include <scsi/scsi_request.h>
struct Scsi_Host;
* supports without specifying a cmd_len by ULD's
*/
#define MAX_COMMAND_SIZE 16
-#if (MAX_COMMAND_SIZE > BLK_MAX_CDB)
-# error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB
-#endif
struct scsi_data_buffer {
struct sg_table table;
} __packed;
struct scsi_cmnd {
- struct scsi_request req;
struct scsi_device *device;
struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */
struct delayed_work abort_work;
unsigned short cmd_len;
enum dma_data_direction sc_data_direction;
- /* These elements define the operation we are about to perform */
- unsigned char *cmnd;
-
+ unsigned char cmnd[32]; /* SCSI CDB */
/* These elements define the operation we ultimately want to perform */
struct scsi_data_buffer sdb;
(ie, between disconnect /
reconnects. Probably == sector
size */
-
+ unsigned resid_len; /* residual count */
+ unsigned sense_len;
unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
* command (auto-sense). Length must be
* SCSI_SENSE_BUFFERSIZE bytes. */
+ int flags; /* Command flags */
+ unsigned long state; /* Command completion state */
+
+ unsigned int extra_len; /* length of alignment and padding */
+
/*
- * The following fields can be written to by the host specific code.
- * Everything else should be left alone.
+ * The fields below can be modified by the LLD but the fields above
+ * must not be modified.
*/
- struct scsi_pointer SCp; /* Scratchpad used by some host adapters */
unsigned char *host_scribble; /* The host adapter is allowed to
* call scsi_malloc and get some memory
* to be at an address < 16Mb). */
int result; /* Status code from lower level driver */
- int flags; /* Command flags */
- unsigned long state; /* Command completion state */
-
- unsigned int extra_len; /* length of alignment and padding */
};
/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
}
void scsi_done(struct scsi_cmnd *cmd);
+void scsi_done_direct(struct scsi_cmnd *cmd);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid)
{
- cmd->req.resid_len = resid;
+ cmd->resid_len = resid;
}
static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
{
- return cmd->req.resid_len;
+ return cmd->resid_len;
}
#define scsi_for_each_sg(cmd, sg, nseg, __i) \
unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
* creation time */
unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
+ unsigned silence_suspend:1; /* Do not print runtime PM related messages */
unsigned int queue_stopped; /* request queue is quiesced */
bool offline_already; /* Device offline message logged */
unsigned underflow;
unsigned char cmd_len;
unsigned char prot_op;
- unsigned char *cmnd;
+ unsigned char cmnd[32];
struct scsi_data_buffer sdb;
- /* new command support */
- unsigned char eh_cmnd[BLK_MAX_CDB];
struct scatterlist sense_sgl;
};
struct module;
struct scsi_cmnd;
struct scsi_device;
-struct scsi_host_cmd_pool;
struct scsi_target;
struct Scsi_Host;
struct scsi_transport_template;
*/
u64 vendor_id;
- struct scsi_host_cmd_pool *cmd_pool;
-
/* Delay for runtime autosuspend */
int rpm_autosuspend_delay;
};
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SCSI_SCSI_REQUEST_H
-#define _SCSI_SCSI_REQUEST_H
-
-#include <linux/blk-mq.h>
-
-#define BLK_MAX_CDB 16
-
-struct scsi_request {
- unsigned char __cmd[BLK_MAX_CDB];
- unsigned char *cmd;
- unsigned short cmd_len;
- int result;
- unsigned int sense_len;
- unsigned int resid_len; /* residual count */
- int retries;
- void *sense;
-};
-
-static inline struct scsi_request *scsi_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline void scsi_req_free_cmd(struct scsi_request *req)
-{
- if (req->cmd != req->__cmd)
- kfree(req->cmd);
-}
-
-#endif /* _SCSI_SCSI_REQUEST_H */
bool recovery_tmo_sysfs_override;
struct delayed_work recovery_work;
+ struct workqueue_struct *workq;
+
unsigned int target_id;
bool ida_used;
iscsi_dev_to_session(_stgt->dev.parent)
struct iscsi_cls_host {
- atomic_t nr_scans;
struct mutex mutex;
struct request_queue *bsg_q;
uint32_t port_speed;
unsigned int target_id);
extern void iscsi_remove_session(struct iscsi_cls_session *session);
extern void iscsi_free_session(struct iscsi_cls_session *session);
-extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+extern struct iscsi_cls_conn *iscsi_alloc_conn(struct iscsi_cls_session *sess,
int dd_size, uint32_t cid);
+extern int iscsi_add_conn(struct iscsi_cls_conn *conn);
+extern void iscsi_remove_conn(struct iscsi_cls_conn *conn);
extern void iscsi_put_conn(struct iscsi_cls_conn *conn);
extern void iscsi_get_conn(struct iscsi_cls_conn *conn);
-extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
extern void iscsi_unblock_session(struct iscsi_cls_session *session);
extern void iscsi_block_session(struct iscsi_cls_session *session);
-extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
};
struct viosrp_crq {
- u8 valid; /* used by RPA */
- u8 format; /* SCSI vs out-of-band */
- u8 reserved;
- u8 status; /* non-scsi failure? (e.g. DMA failure) */
- __be16 timeout; /* in seconds */
- __be16 IU_length; /* in bytes */
+ union {
+ __be64 high; /* High 64 bits */
+ struct {
+ u8 valid; /* used by RPA */
+ u8 format; /* SCSI vs out-of-band */
+ u8 reserved;
+ u8 status; /* non-scsi failure? (e.g. DMA failure) */
+ __be16 timeout; /* in seconds */
+ __be16 IU_length; /* in bytes */
+ };
+ };
__be64 IU_data_ptr; /* the TCE for transferring data */
};
struct ahash_request *conn_tx_hash;
/* Used for scheduling TX and RX connection kthreads */
cpumask_var_t conn_cpumask;
+ cpumask_var_t allowed_cpumask;
unsigned int conn_rx_reset_cpumask:1;
unsigned int conn_tx_reset_cpumask:1;
/* list_head of struct iscsi_cmd for this connection */
/* Thread Set bitmap pointer */
unsigned long *ts_bitmap;
spinlock_t ts_bitmap_lock;
+ cpumask_var_t allowed_cpumask;
/* Used for iSCSI discovery session authentication */
struct iscsi_node_acl discovery_acl;
struct iscsi_portal_group *discovery_tpg;
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
-static inline void iscsit_thread_check_cpumask(
- struct iscsi_conn *conn,
- struct task_struct *p,
- int mode)
-{
- /*
- * mode == 1 signals iscsi_target_tx_thread() usage.
- * mode == 0 signals iscsi_target_rx_thread() usage.
- */
- if (mode == 1) {
- if (!conn->conn_tx_reset_cpumask)
- return;
- conn->conn_tx_reset_cpumask = 0;
- } else {
- if (!conn->conn_rx_reset_cpumask)
- return;
- conn->conn_rx_reset_cpumask = 0;
- }
- /*
- * Update the CPU mask for this single kthread so that
- * both TX and RX kthreads are scheduled to run on the
- * same CPU.
- */
- set_cpus_allowed_ptr(p, conn->conn_cpumask);
-}
+extern void iscsit_thread_check_cpumask(struct iscsi_conn *conn,
+ struct task_struct *p,
+ int mode);
+
#endif /* ISCSI_TARGET_CORE_H */
switch (op & REQ_OP_MASK) {
case REQ_OP_WRITE:
- case REQ_OP_WRITE_SAME:
rwbs[i++] = 'W';
break;
case REQ_OP_DISCARD: