Release 4.7 drivers/s390/block/scm_blk.h
#ifndef SCM_BLK_H
#define SCM_BLK_H
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/list.h>
#include <asm/debug.h>
#include <asm/eadm.h>
#define SCM_NR_PARTS 8
#define SCM_QUEUE_DELAY 5
struct scm_blk_dev {
struct tasklet_struct tasklet;
struct request_queue *rq;
struct gendisk *gendisk;
struct scm_device *scmdev;
spinlock_t rq_lock; /* guard the request queue */
spinlock_t lock; /* guard the rest of the blockdev */
atomic_t queued_reqs;
enum {SCM_OPER, SCM_WR_PROHIBIT} state;
struct list_head finished_requests;
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
struct list_head cluster_list;
#endif
};
struct scm_request {
struct scm_blk_dev *bdev;
struct aidaw *next_aidaw;
struct request **request;
struct aob *aob;
struct list_head list;
u8 retries;
int error;
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
struct {
enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
struct list_head list;
void **buf;
}
cluster;
#endif
};
#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
void scm_blk_dev_cleanup(struct scm_blk_dev *);
void scm_blk_set_available(struct scm_blk_dev *);
void scm_blk_irq(struct scm_device *, void *, int);
void scm_request_finish(struct scm_request *);
void scm_request_requeue(struct scm_request *);
struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
int scm_drv_init(void);
void scm_drv_cleanup(void);
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
void __scm_free_rq_cluster(struct scm_request *);
int __scm_alloc_rq_cluster(struct scm_request *);
void scm_request_cluster_init(struct scm_request *);
bool scm_reserve_cluster(struct scm_request *);
void scm_release_cluster(struct scm_request *);
void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
bool scm_need_cluster_request(struct scm_request *);
void scm_initiate_cluster_request(struct scm_request *);
void scm_cluster_request_irq(struct scm_request *);
bool scm_test_cluster_request(struct scm_request *);
bool scm_cluster_size_valid(void);
#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 11 | 100.00% | 2 | 100.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 15 | 100.00% | 2 | 100.00% |
| Total | 15 | 100.00% | 2 | 100.00% |
static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 11 | 100.00% | 2 | 100.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
static inline bool scm_reserve_cluster(struct scm_request *scmrq)
{
return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 15 | 100.00% | 2 | 100.00% |
| Total | 15 | 100.00% | 2 | 100.00% |
static inline void scm_release_cluster(struct scm_request *scmrq) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 11 | 100.00% | 2 | 100.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 11 | 100.00% | 2 | 100.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
static inline bool scm_need_cluster_request(struct scm_request *scmrq)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 15 | 100.00% | 2 | 100.00% |
| Total | 15 | 100.00% | 2 | 100.00% |
static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 11 | 100.00% | 2 | 100.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 11 | 100.00% | 2 | 100.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
static inline bool scm_test_cluster_request(struct scm_request *scmrq)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 15 | 100.00% | 2 | 100.00% |
| Total | 15 | 100.00% | 2 | 100.00% |
static inline bool scm_cluster_size_valid(void)
{
return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 12 | 100.00% | 2 | 100.00% |
| Total | 12 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
extern debug_info_t *scm_debug;
#define SCM_LOG(imp, txt) do { \
debug_text_event(scm_debug, imp, txt); \
} while (0)
static inline void SCM_LOG_HEX(int level, void *data, int length)
{
if (!debug_level_enabled(scm_debug, level))
return;
while (length > 0) {
debug_event(scm_debug, level, data, length);
length -= scm_debug->buf_size;
data += scm_debug->buf_size;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 54 | 91.53% | 1 | 50.00% |
hendrik brueckner | hendrik brueckner | 5 | 8.47% | 1 | 50.00% |
| Total | 59 | 100.00% | 2 | 100.00% |
static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
{
struct {
u64 address;
u8 oper_state;
u8 rank;
} __packed data = {
.address = scmdev->address,
.oper_state = scmdev->attrs.oper_state,
.rank = scmdev->attrs.rank,
};
SCM_LOG_HEX(level, &data, sizeof(data));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 70 | 100.00% | 1 | 100.00% |
| Total | 70 | 100.00% | 1 | 100.00% |
#endif /* SCM_BLK_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sebastian ott | sebastian ott | 621 | 99.20% | 7 | 87.50% |
hendrik brueckner | hendrik brueckner | 5 | 0.80% | 1 | 12.50% |
| Total | 626 | 100.00% | 8 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.