Contributors: 18
Author Tokens Token Proportion Commits Commit Proportion
Swen Schillig 1678 34.43% 13 16.46%
Steffen Maier 1039 21.32% 20 25.32%
Maxim Shchetynin 744 15.26% 2 2.53%
Martin Peschke 647 13.27% 12 15.19%
Christof Schmitt 313 6.42% 12 15.19%
Benjamin Block 192 3.94% 3 3.80%
Jens Remus 136 2.79% 3 3.80%
Martin Schwidefsky 97 1.99% 2 2.53%
Heiko Carstens 7 0.14% 3 3.80%
Hannes Reinecke 6 0.12% 1 1.27%
James Smart 4 0.08% 1 1.27%
Martin Petermann 3 0.06% 1 1.27%
James Bottomley 2 0.04% 1 1.27%
Linus Torvalds (pre-git) 2 0.04% 1 1.27%
Linus Torvalds 1 0.02% 1 1.27%
Greg Kroah-Hartman 1 0.02% 1 1.27%
Andreas Herrmann 1 0.02% 1 1.27%
Dan Carpenter 1 0.02% 1 1.27%
Total 4874 79


// SPDX-License-Identifier: GPL-2.0
/*
 * zfcp device driver
 *
 * Debug traces for zfcp.
 *
 * Copyright IBM Corp. 2002, 2023
 */

#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <asm/debug.h>
#include "zfcp_dbf.h"
#include "zfcp_ext.h"
#include "zfcp_fc.h"

static u32 dbfsize = 4;

module_param(dbfsize, uint, 0400);
MODULE_PARM_DESC(dbfsize,
		 "number of pages for each debug feature area (default 4)");

static u32 dbflevel = 3;

module_param(dbflevel, uint, 0400);
MODULE_PARM_DESC(dbflevel,
		 "log level for each debug feature area "
		 "(default 3, range 0..6)");

static inline unsigned int zfcp_dbf_plen(unsigned int offset)
{
	return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
}

static inline
void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
		       u64 req_id)
{
	struct zfcp_dbf_pay *pl = &dbf->pay_buf;
	u16 offset = 0, rec_length;

	spin_lock(&dbf->pay_lock);
	memset(pl, 0, sizeof(*pl));
	pl->fsf_req_id = req_id;
	memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);

	while (offset < length) {
		rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
				 (u16) (length - offset));
		memcpy(pl->data, data + offset, rec_length);
		debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));

		offset += rec_length;
		pl->counter++;
	}

	spin_unlock(&dbf->pay_lock);
}

/**
 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
 * @tag: tag indicating which kind of FSF response has been received
 * @level: trace level to be used for event
 * @req: request for which a response was received
 */
void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
{
	struct zfcp_dbf *dbf = req->adapter->dbf;
	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
	struct fsf_qtcb_header *q_head = &req->qtcb->header;
	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
	unsigned long flags;

	spin_lock_irqsave(&dbf->hba_lock, flags);
	memset(rec, 0, sizeof(*rec));

	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	rec->id = ZFCP_DBF_HBA_RES;
	rec->fsf_req_id = req->req_id;
	rec->fsf_req_status = req->status;
	rec->fsf_cmd = q_head->fsf_command;
	rec->fsf_seq_no = q_pref->req_seq_no;
	rec->u.res.req_issued = req->issued;
	rec->u.res.prot_status = q_pref->prot_status;
	rec->u.res.fsf_status = q_head->fsf_status;
	rec->u.res.port_handle = q_head->port_handle;
	rec->u.res.lun_handle = q_head->lun_handle;

	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
	       FSF_PROT_STATUS_QUAL_SIZE);
	memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
	       FSF_STATUS_QUALIFIER_SIZE);

	rec->pl_len = q_head->log_length;
	zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
			  rec->pl_len, "fsf_res", req->req_id);

	debug_event(dbf->hba, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->hba_lock, flags);
}

/**
 * zfcp_dbf_hba_fsf_fces - trace event for fsf responses related to
 *			   FC Endpoint Security (FCES)
 * @tag: tag indicating which kind of FC Endpoint Security event has occurred
 * @req: request for which a response was received
 * @wwpn: remote port or ZFCP_DBF_INVALID_WWPN
 * @fc_security_old: old FC Endpoint Security of FCP device or connection
 * @fc_security_new: new FC Endpoint Security of FCP device or connection
 */
void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
			   u32 fc_security_old, u32 fc_security_new)
{
	struct zfcp_dbf *dbf = req->adapter->dbf;
	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
	struct fsf_qtcb_header *q_head = &req->qtcb->header;
	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
	static int const level = 3;
	unsigned long flags;

	if (unlikely(!debug_level_enabled(dbf->hba, level)))
		return;

	spin_lock_irqsave(&dbf->hba_lock, flags);
	memset(rec, 0, sizeof(*rec));

	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	rec->id = ZFCP_DBF_HBA_FCES;
	rec->fsf_req_id = req->req_id;
	rec->fsf_req_status = req->status;
	rec->fsf_cmd = q_head->fsf_command;
	rec->fsf_seq_no = q_pref->req_seq_no;
	rec->u.fces.req_issued = req->issued;
	rec->u.fces.fsf_status = q_head->fsf_status;
	rec->u.fces.port_handle = q_head->port_handle;
	rec->u.fces.wwpn = wwpn;
	rec->u.fces.fc_security_old = fc_security_old;
	rec->u.fces.fc_security_new = fc_security_new;

	debug_event(dbf->hba, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->hba_lock, flags);
}

/**
 * zfcp_dbf_hba_fsf_reqid - trace only the tag and a request ID
 * @tag: tag documenting the source
 * @level: trace level
 * @adapter: adapter instance the request ID belongs to
 * @req_id: the request ID to trace
 */
void zfcp_dbf_hba_fsf_reqid(const char *const tag, const int level,
			    struct zfcp_adapter *const adapter,
			    const u64 req_id)
{
	struct zfcp_dbf *const dbf = adapter->dbf;
	struct zfcp_dbf_hba *const rec = &dbf->hba_buf;
	struct zfcp_dbf_hba_res *const res = &rec->u.res;
	unsigned long flags;

	if (unlikely(!debug_level_enabled(dbf->hba, level)))
		return;

	spin_lock_irqsave(&dbf->hba_lock, flags);
	memset(rec, 0, sizeof(*rec));

	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);

	rec->id = ZFCP_DBF_HBA_RES;
	rec->fsf_req_id = req_id;
	rec->fsf_req_status = ~0u;
	rec->fsf_cmd = ~0u;
	rec->fsf_seq_no = ~0u;

	res->req_issued = ~0ull;
	res->prot_status = ~0u;
	memset(res->prot_status_qual, 0xff, sizeof(res->prot_status_qual));
	res->fsf_status = ~0u;
	memset(res->fsf_status_qual, 0xff, sizeof(res->fsf_status_qual));
	res->port_handle = ~0u;
	res->lun_handle = ~0u;

	debug_event(dbf->hba, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->hba_lock, flags);
}

/**
 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
 * @tag: tag indicating which kind of unsolicited status has been received
 * @req: request providing the unsolicited status
 */
void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
{
	struct zfcp_dbf *dbf = req->adapter->dbf;
	struct fsf_status_read_buffer *srb = req->data;
	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
	static int const level = 2;
	unsigned long flags;

	if (unlikely(!debug_level_enabled(dbf->hba, level)))
		return;

	spin_lock_irqsave(&dbf->hba_lock, flags);
	memset(rec, 0, sizeof(*rec));

	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	rec->id = ZFCP_DBF_HBA_USS;
	rec->fsf_req_id = req->req_id;
	rec->fsf_req_status = req->status;
	rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;

	if (!srb)
		goto log;

	rec->u.uss.status_type = srb->status_type;
	rec->u.uss.status_subtype = srb->status_subtype;
	rec->u.uss.d_id = ntoh24(srb->d_id);
	rec->u.uss.lun = srb->fcp_lun;
	memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
	       sizeof(rec->u.uss.queue_designator));

	/* status read buffer payload length */
	rec->pl_len = (!srb->length) ? 0 : srb->length -
			offsetof(struct fsf_status_read_buffer, payload);

	if (rec->pl_len)
		zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
				  "fsf_uss", req->req_id);
log:
	debug_event(dbf->hba, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->hba_lock, flags);
}

/**
 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
 * @tag: tag indicating which kind of bit error unsolicited status was received
 * @req: request which caused the bit_error condition
 */
void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
{
	struct zfcp_dbf *dbf = req->adapter->dbf;
	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
	struct fsf_status_read_buffer *sr_buf = req->data;
	static int const level = 1;
	unsigned long flags;

	if (unlikely(!debug_level_enabled(dbf->hba, level)))
		return;

	spin_lock_irqsave(&dbf->hba_lock, flags);
	memset(rec, 0, sizeof(*rec));

	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	rec->id = ZFCP_DBF_HBA_BIT;
	rec->fsf_req_id = req->req_id;
	rec->fsf_req_status = req->status;
	rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
	       sizeof(struct fsf_bit_error_payload));

	debug_event(dbf->hba, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->hba_lock, flags);
}

/**
 * zfcp_dbf_hba_def_err - trace event for deferred error messages
 * @adapter: pointer to struct zfcp_adapter
 * @req_id: request id which caused the deferred error message
 * @scount: number of sbals incl. the signaling sbal
 * @pl: array of all involved sbals
 */
void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
			  void **pl)
{
	struct zfcp_dbf *dbf = adapter->dbf;
	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
	unsigned long flags;
	static int const level = 1;
	u16 length;

	if (unlikely(!debug_level_enabled(dbf->pay, level)))
		return;

	if (!pl)
		return;

	spin_lock_irqsave(&dbf->pay_lock, flags);
	memset(payload, 0, sizeof(*payload));

	memcpy(payload->area, "def_err", 7);
	payload->fsf_req_id = req_id;
	payload->counter = 0;
	length = min((u16)sizeof(struct qdio_buffer),
		     (u16)ZFCP_DBF_PAY_MAX_REC);

	while (payload->counter < scount && (char *)pl[payload->counter]) {
		memcpy(payload->data, (char *)pl[payload->counter], length);
		debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
		payload->counter++;
	}

	spin_unlock_irqrestore(&dbf->pay_lock, flags);
}

static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
				struct zfcp_adapter *adapter,
				struct zfcp_port *port,
				struct scsi_device *sdev)
{
	rec->adapter_status = atomic_read(&adapter->status);
	if (port) {
		rec->port_status = atomic_read(&port->status);
		rec->wwpn = port->wwpn;
		rec->d_id = port->d_id;
	}
	if (sdev) {
		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
		rec->lun = zfcp_scsi_dev_lun(sdev);
	} else
		rec->lun = ZFCP_DBF_INVALID_LUN;
}

/**
 * zfcp_dbf_rec_trig - trace event related to triggered recovery
 * @tag: identifier for event
 * @adapter: adapter on which the erp_action should run
 * @port: remote port involved in the erp_action
 * @sdev: scsi device involved in the erp_action
 * @want: wanted erp_action
 * @need: required erp_action
 *
 * The adapter->erp_lock has to be held.
 */
void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
		       struct zfcp_port *port, struct scsi_device *sdev,
		       u8 want, u8 need)
{
	struct zfcp_dbf *dbf = adapter->dbf;
	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
	static int const level = 1;
	struct list_head *entry;
	unsigned long flags;

	lockdep_assert_held(&adapter->erp_lock);

	if (unlikely(!debug_level_enabled(dbf->rec, level)))
		return;

	spin_lock_irqsave(&dbf->rec_lock, flags);
	memset(rec, 0, sizeof(*rec));

	rec->id = ZFCP_DBF_REC_TRIG;
	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	zfcp_dbf_set_common(rec, adapter, port, sdev);

	list_for_each(entry, &adapter->erp_ready_head)
		rec->u.trig.ready++;

	list_for_each(entry, &adapter->erp_running_head)
		rec->u.trig.running++;

	rec->u.trig.want = want;
	rec->u.trig.need = need;

	debug_event(dbf->rec, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->rec_lock, flags);
}

/**
 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
 * @tag: identifier for event
 * @adapter: adapter on which the erp_action should run
 * @port: remote port involved in the erp_action
 * @sdev: scsi device involved in the erp_action
 * @want: wanted erp_action
 * @need: required erp_action
 *
 * The adapter->erp_lock must not be held.
 */
void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
			    struct zfcp_port *port, struct scsi_device *sdev,
			    u8 want, u8 need)
{
	unsigned long flags;

	read_lock_irqsave(&adapter->erp_lock, flags);
	zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
	read_unlock_irqrestore(&adapter->erp_lock, flags);
}

/**
 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
 * @level: trace level to be used for event
 * @tag: identifier for event
 * @erp: erp_action running
 */
void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
{
	struct zfcp_dbf *dbf = erp->adapter->dbf;
	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
	unsigned long flags;

	if (!debug_level_enabled(dbf->rec, level))
		return;

	spin_lock_irqsave(&dbf->rec_lock, flags);
	memset(rec, 0, sizeof(*rec));

	rec->id = ZFCP_DBF_REC_RUN;
	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);

	rec->u.run.fsf_req_id = erp->fsf_req_id;
	rec->u.run.rec_status = erp->status;
	rec->u.run.rec_step = erp->step;
	rec->u.run.rec_action = erp->type;

	if (erp->sdev)
		rec->u.run.rec_count =
			atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
	else if (erp->port)
		rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
	else
		rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);

	debug_event(dbf->rec, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->rec_lock, flags);
}

/**
 * zfcp_dbf_rec_run - trace event related to running recovery
 * @tag: identifier for event
 * @erp: erp_action running
 */
void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
{
	zfcp_dbf_rec_run_lvl(1, tag, erp);
}

/**
 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
 * @tag: identifier for event
 * @wka_port: well known address port
 * @req_id: request ID to correlate with potential HBA trace record
 */
void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
			  u64 req_id)
{
	struct zfcp_dbf *dbf = wka_port->adapter->dbf;
	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
	static int const level = 1;
	unsigned long flags;

	if (unlikely(!debug_level_enabled(dbf->rec, level)))
		return;

	spin_lock_irqsave(&dbf->rec_lock, flags);
	memset(rec, 0, sizeof(*rec));

	rec->id = ZFCP_DBF_REC_RUN;
	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	rec->port_status = wka_port->status;
	rec->d_id = wka_port->d_id;
	rec->lun = ZFCP_DBF_INVALID_LUN;

	rec->u.run.fsf_req_id = req_id;
	rec->u.run.rec_status = ~0;
	rec->u.run.rec_step = ~0;
	rec->u.run.rec_action = ~0;
	rec->u.run.rec_count = ~0;

	debug_event(dbf->rec, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->rec_lock, flags);
}

#define ZFCP_DBF_SAN_LEVEL 1

static inline
void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
		  char *paytag, struct scatterlist *sg, u8 id, u16 len,
		  u64 req_id, u32 d_id, u16 cap_len)
{
	struct zfcp_dbf_san *rec = &dbf->san_buf;
	u16 rec_len;
	unsigned long flags;
	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
	u16 pay_sum = 0;

	spin_lock_irqsave(&dbf->san_lock, flags);
	memset(rec, 0, sizeof(*rec));

	rec->id = id;
	rec->fsf_req_id = req_id;
	rec->d_id = d_id;
	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	rec->pl_len = len; /* full length even if we cap pay below */
	if (!sg)
		goto out;
	rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
	memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
	if (len <= rec_len)
		goto out; /* skip pay record if full content in rec->payload */

	/* if (len > rec_len):
	 * dump data up to cap_len ignoring small duplicate in rec->payload
	 */
	spin_lock(&dbf->pay_lock);
	memset(payload, 0, sizeof(*payload));
	memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
	payload->fsf_req_id = req_id;
	payload->counter = 0;
	for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
		u16 pay_len, offset = 0;

		while (offset < sg->length && pay_sum < cap_len) {
			pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
				      (u16)(sg->length - offset));
			/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
			memcpy(payload->data, sg_virt(sg) + offset, pay_len);
			debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
				    zfcp_dbf_plen(pay_len));
			payload->counter++;
			offset += pay_len;
			pay_sum += pay_len;
		}
	}
	spin_unlock(&dbf->pay_lock);

out:
	debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->san_lock, flags);
}

/**
 * zfcp_dbf_san_req - trace event for issued SAN request
 * @tag: identifier for event
 * @fsf: request containing issued CT or ELS data
 * @d_id: N_Port_ID where SAN request is sent to
 * d_id: destination ID
 */
void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
{
	struct zfcp_dbf *dbf = fsf->adapter->dbf;
	struct zfcp_fsf_ct_els *ct_els = fsf->data;
	u16 length;

	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
		return;

	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
		     length, fsf->req_id, d_id, length);
}

static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
					      struct zfcp_fsf_req *fsf,
					      u16 len)
{
	struct zfcp_fsf_ct_els *ct_els = fsf->data;
	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
	struct scatterlist *resp_entry = ct_els->resp;
	struct fc_ct_hdr *resph;
	struct fc_gpn_ft_resp *acc;
	int max_entries, x, last = 0;

	if (!(memcmp(tag, "fsscth2", 7) == 0
	      && ct_els->d_id == FC_FID_DIR_SERV
	      && reqh->ct_rev == FC_CT_REV
	      && reqh->ct_in_id[0] == 0
	      && reqh->ct_in_id[1] == 0
	      && reqh->ct_in_id[2] == 0
	      && reqh->ct_fs_type == FC_FST_DIR
	      && reqh->ct_fs_subtype == FC_NS_SUBTYPE
	      && reqh->ct_options == 0
	      && reqh->_ct_resvd1 == 0
	      && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
	      /* reqh->ct_mr_size can vary so do not match but read below */
	      && reqh->_ct_resvd2 == 0
	      && reqh->ct_reason == 0
	      && reqh->ct_explan == 0
	      && reqh->ct_vendor == 0
	      && reqn->fn_resvd == 0
	      && reqn->fn_domain_id_scope == 0
	      && reqn->fn_area_id_scope == 0
	      && reqn->fn_fc4_type == FC_TYPE_FCP))
		return len; /* not GPN_FT response so do not cap */

	acc = sg_virt(resp_entry);

	/* cap all but accept CT responses to at least the CT header */
	resph = (struct fc_ct_hdr *)acc;
	if ((ct_els->status) ||
	    (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
		return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);

	max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
		       sizeof(struct fc_gpn_ft_resp))
		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
		     * to account for header as 1st pseudo "entry" */;

	/* the basic CT_IU preamble is the same size as one entry in the GPN_FT
	 * response, allowing us to skip special handling for it - just skip it
	 */
	for (x = 1; x < max_entries && !last; x++) {
		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
			acc++;
		else
			acc = sg_virt(++resp_entry);

		last = acc->fp_flags & FC_NS_FID_LAST;
	}
	len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
	return len; /* cap after last entry */
}

/**
 * zfcp_dbf_san_res - trace event for received SAN request
 * @tag: identifier for event
 * @fsf: request containing received CT or ELS data
 */
void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
{
	struct zfcp_dbf *dbf = fsf->adapter->dbf;
	struct zfcp_fsf_ct_els *ct_els = fsf->data;
	u16 length;

	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
		return;

	length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
	zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
		     length, fsf->req_id, ct_els->d_id,
		     zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
}

/**
 * zfcp_dbf_san_in_els - trace event for incoming ELS
 * @tag: identifier for event
 * @fsf: request containing received ELS data
 */
void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
{
	struct zfcp_dbf *dbf = fsf->adapter->dbf;
	struct fsf_status_read_buffer *srb =
		(struct fsf_status_read_buffer *) fsf->data;
	u16 length;
	struct scatterlist sg;

	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
		return;

	length = (u16)(srb->length -
			offsetof(struct fsf_status_read_buffer, payload));
	sg_init_one(&sg, srb->payload.data, length);
	zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
		     fsf->req_id, ntoh24(srb->d_id), length);
}

/**
 * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
 * @tag: Identifier for event.
 * @level: trace level of event.
 * @sdev: Pointer to SCSI device as context for this event.
 * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
 * @fsf: Pointer to FSF request, or NULL.
 */
void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
			  struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
{
	struct zfcp_adapter *adapter =
		(struct zfcp_adapter *) sdev->host->hostdata[0];
	struct zfcp_dbf *dbf = adapter->dbf;
	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
	struct fcp_resp_with_ext *fcp_rsp;
	struct fcp_resp_rsp_info *fcp_rsp_info;
	unsigned long flags;

	spin_lock_irqsave(&dbf->scsi_lock, flags);
	memset(rec, 0, sizeof(*rec));

	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	rec->id = ZFCP_DBF_SCSI_CMND;
	if (sc) {
		rec->scsi_result = sc->result;
		rec->scsi_retries = sc->retries;
		rec->scsi_allowed = sc->allowed;
		rec->scsi_id = sc->device->id;
		rec->scsi_lun = (u32)sc->device->lun;
		rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
		rec->host_scribble = (u64)sc->host_scribble;

		memcpy(rec->scsi_opcode, sc->cmnd,
		       min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
	} else {
		rec->scsi_result = ~0;
		rec->scsi_retries = ~0;
		rec->scsi_allowed = ~0;
		rec->scsi_id = sdev->id;
		rec->scsi_lun = (u32)sdev->lun;
		rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
		rec->host_scribble = ~0;

		memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
	}

	if (fsf) {
		rec->fsf_req_id = fsf->req_id;
		rec->pl_len = FCP_RESP_WITH_EXT;
		fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
		/* mandatory parts of FCP_RSP IU in this SCSI record */
		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
		}
		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
		}
		/* complete FCP_RSP IU in associated PAYload record
		 * but only if there are optional parts
		 */
		if (fcp_rsp->resp.fr_flags != 0)
			zfcp_dbf_pl_write(
				dbf, fcp_rsp,
				/* at least one full PAY record
				 * but not beyond hardware response field
				 */
				min_t(u16, max_t(u16, rec->pl_len,
						 ZFCP_DBF_PAY_MAX_REC),
				      FSF_FCP_RSP_SIZE),
				"fcp_riu", fsf->req_id);
	}

	debug_event(dbf->scsi, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}

/**
 * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
 * @tag: Identifier for event.
 * @adapter: Pointer to zfcp adapter as context for this event.
 * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
 * @ret: Return value of calling function.
 *
 * This SCSI trace variant does not depend on any of:
 * scsi_cmnd, zfcp_fsf_req, scsi_device.
 */
void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
		      unsigned int scsi_id, int ret)
{
	struct zfcp_dbf *dbf = adapter->dbf;
	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
	unsigned long flags;
	static int const level = 1;

	if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
		return;

	spin_lock_irqsave(&dbf->scsi_lock, flags);
	memset(rec, 0, sizeof(*rec));

	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
	rec->id = ZFCP_DBF_SCSI_CMND;
	rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
	rec->scsi_retries = ~0;
	rec->scsi_allowed = ~0;
	rec->fcp_rsp_info = ~0;
	rec->scsi_id = scsi_id;
	rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
	rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
	rec->host_scribble = ~0;
	memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);

	debug_event(dbf->scsi, level, rec, sizeof(*rec));
	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}

static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
{
	struct debug_info *d;

	d = debug_register(name, size, 1, rec_size);
	if (!d)
		return NULL;

	debug_register_view(d, &debug_hex_ascii_view);
	debug_set_level(d, dbflevel);

	return d;
}

static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
{
	if (!dbf)
		return;

	debug_unregister(dbf->scsi);
	debug_unregister(dbf->san);
	debug_unregister(dbf->hba);
	debug_unregister(dbf->pay);
	debug_unregister(dbf->rec);
	kfree(dbf);
}

/**
 * zfcp_dbf_adapter_register - registers debug feature for an adapter
 * @adapter: pointer to adapter for which debug features should be registered
 * return: -ENOMEM on error, 0 otherwise
 */
int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
{
	char name[DEBUG_MAX_NAME_LEN];
	struct zfcp_dbf *dbf;

	dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
	if (!dbf)
		return -ENOMEM;

	spin_lock_init(&dbf->pay_lock);
	spin_lock_init(&dbf->hba_lock);
	spin_lock_init(&dbf->san_lock);
	spin_lock_init(&dbf->scsi_lock);
	spin_lock_init(&dbf->rec_lock);

	/* debug feature area which records recovery activity */
	sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
	dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
	if (!dbf->rec)
		goto err_out;

	/* debug feature area which records HBA (FSF and QDIO) conditions */
	sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
	dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
	if (!dbf->hba)
		goto err_out;

	/* debug feature area which records payload info */
	sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
	dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
	if (!dbf->pay)
		goto err_out;

	/* debug feature area which records SAN command failures and recovery */
	sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
	dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
	if (!dbf->san)
		goto err_out;

	/* debug feature area which records SCSI command failures and recovery */
	sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
	dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
	if (!dbf->scsi)
		goto err_out;

	adapter->dbf = dbf;

	return 0;
err_out:
	zfcp_dbf_unregister(dbf);
	return -ENOMEM;
}

/**
 * zfcp_dbf_adapter_unregister - unregisters debug feature for an adapter
 * @adapter: pointer to adapter for which debug features should be unregistered
 */
void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
{
	struct zfcp_dbf *dbf = adapter->dbf;

	adapter->dbf = NULL;
	zfcp_dbf_unregister(dbf);
}