Release 4.11 net/ceph/osd_client.c
#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#ifdef CONFIG_BLOCK
#include <linux/bio.h>
#endif
#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
#define OSD_OPREPLY_FRONT_LEN 512
static struct kmem_cache *ceph_osd_request_cache;
static const struct ceph_connection_operations osd_con_ops;
/*
* Implement client access to distributed object storage cluster.
*
* All data objects are stored within a cluster/cloud of OSDs, or
* "object storage devices." (Note that Ceph OSDs have _nothing_ to
* do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
* remote daemons serving up and coordinating consistent and safe
* access to storage.
*
* Cluster membership and the mapping of data objects onto storage devices
* are described by the osd map.
*
* We keep track of pending OSD requests (read, write), resubmit
* requests to different OSDs when the cluster topology/data layout
* change, or retry the affected requests when the communications
* channel with an OSD is reset.
*/
static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
static void link_linger(struct ceph_osd *osd,
struct ceph_osd_linger_request *lreq);
static void unlink_linger(struct ceph_osd *osd,
struct ceph_osd_linger_request *lreq);
#if 1
static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
{
bool wrlocked = true;
if (unlikely(down_read_trylock(sem))) {
wrlocked = false;
up_read(sem);
}
return wrlocked;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
{
WARN_ON(!rwsem_is_locked(&osdc->lock));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 16 | 66.67% | 1 | 50.00% |
Yehuda Sadeh Weinraub | 8 | 33.33% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
{
WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 22 | 91.67% | 2 | 66.67% |
Yehuda Sadeh Weinraub | 2 | 8.33% | 1 | 33.33% |
Total | 24 | 100.00% | 3 | 100.00% |
static inline void verify_osd_locked(struct ceph_osd *osd)
{
struct ceph_osd_client *osdc = osd->o_osdc;
WARN_ON(!(mutex_is_locked(&osd->lock) &&
rwsem_is_locked(&osdc->lock)) &&
!rwsem_is_wrlocked(&osdc->lock));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 52 | 100.00% | 2 | 100.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
{
WARN_ON(!mutex_is_locked(&lreq->lock));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
#else
static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 6 | 54.55% | 2 | 66.67% |
Yehuda Sadeh Weinraub | 5 | 45.45% | 1 | 33.33% |
Total | 11 | 100.00% | 3 | 100.00% |
static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 8 | 72.73% | 1 | 50.00% |
Yehuda Sadeh Weinraub | 3 | 27.27% | 1 | 50.00% |
Total | 11 | 100.00% | 2 | 100.00% |
static inline void verify_osd_locked(struct ceph_osd *osd) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 9 | 81.82% | 2 | 50.00% |
Yehuda Sadeh Weinraub | 1 | 9.09% | 1 | 25.00% |
Sage Weil | 1 | 9.09% | 1 | 25.00% |
Total | 11 | 100.00% | 4 | 100.00% |
static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
#endif
/*
* calculate the mapping of a file extent onto an object, and fill out the
* request accordingly. shorten extent as necessary if it crosses an
* object boundary.
*
* fill osd op in request message.
*/
static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
u64 *objnum, u64 *objoff, u64 *objlen)
{
u64 orig_len = *plen;
int r;
/* object extent? */
r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
objoff, objlen);
if (r < 0)
return r;
if (*objlen < orig_len) {
*plen = *objlen;
dout(" skipping last %llu, final file extent %llu~%llu\n",
orig_len - *plen, off, *plen);
}
dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sage Weil | 65 | 57.52% | 2 | 22.22% |
Alex Elder | 31 | 27.43% | 5 | 55.56% |
Yehuda Sadeh Weinraub | 17 | 15.04% | 2 | 22.22% |
Total | 113 | 100.00% | 9 | 100.00% |
static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
{
memset(osd_data, 0, sizeof (*osd_data));
osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
struct page **pages, u64 length, u32 alignment,
bool pages_from_pool, bool own_pages)
{
osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
osd_data->pages = pages;
osd_data->length = length;
osd_data->alignment = alignment;
osd_data->pages_from_pool = pages_from_pool;
osd_data->own_pages = own_pages;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 65 | 100.00% | 2 | 100.00% |
Total | 65 | 100.00% | 2 | 100.00% |
static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
struct ceph_pagelist *pagelist)
{
osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
osd_data->pagelist = pagelist;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 28 | 100.00% | 2 | 100.00% |
Total | 28 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_BLOCK
static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
struct bio *bio, size_t bio_length)
{
osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
osd_data->bio = bio;
osd_data->bio_length = bio_length;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_BLOCK */
#define osd_req_op_data(oreq, whch, typ, fld) \
({ \
struct ceph_osd_request *__oreq = (oreq); \
unsigned int __whch = (whch); \
BUG_ON(__whch >= __oreq->r_num_ops); \
&__oreq->r_ops[__whch].typ.fld; \
})
static struct ceph_osd_data *
osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
{
BUG_ON(which >= osd_req->r_num_ops);
return &osd_req->r_ops[which].raw_data_in;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
struct ceph_osd_data *
osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
unsigned int which)
{
return osd_req_op_data(osd_req, which, extent, osd_data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 28 | 100.00% | 3 | 100.00% |
Total | 28 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(osd_req_op_extent_osd_data);
void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages,
u64 length, u32 alignment,
bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_raw_data_in(osd_req, which);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 61 | 100.00% | 1 | 100.00% |
Total | 61 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages,
u64 length, u32 alignment,
bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 65 | 100.00% | 2 | 100.00% |
Total | 65 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
unsigned int which, struct ceph_pagelist *pagelist)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_data_pagelist_init(osd_data, pagelist);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 44 | 100.00% | 3 | 100.00% |
Total | 44 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
#ifdef CONFIG_BLOCK
void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
unsigned int which, struct bio *bio, size_t bio_length)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
ceph_osd_data_bio_init(osd_data, bio, bio_length);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 49 | 100.00% | 3 | 100.00% |
Total | 49 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
#endif /* CONFIG_BLOCK */
static void osd_req_op_cls_request_info_pagelist(
struct ceph_osd_request *osd_req,
unsigned int which, struct ceph_pagelist *pagelist)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, request_info);
ceph_osd_data_pagelist_init(osd_data, pagelist);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 45 | 100.00% | 3 | 100.00% |
Total | 45 | 100.00% | 3 | 100.00% |
void osd_req_op_cls_request_data_pagelist(
struct ceph_osd_request *osd_req,
unsigned int which, struct ceph_pagelist *pagelist)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
ceph_osd_data_pagelist_init(osd_data, pagelist);
osd_req->r_ops[which].cls.indata_len += pagelist->length;
osd_req->r_ops[which].indata_len += pagelist->length;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 44 | 61.11% | 2 | 66.67% |
Ilya Dryomov | 28 | 38.89% | 1 | 33.33% |
Total | 72 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages, u64 length,
u32 alignment, bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
osd_req->r_ops[which].cls.indata_len += length;
osd_req->r_ops[which].indata_len += length;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 65 | 73.03% | 1 | 50.00% |
Ilya Dryomov | 24 | 26.97% | 1 | 50.00% |
Total | 89 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages, u64 length,
u32 alignment, bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_data(osd_req, which, cls, response_data);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 65 | 100.00% | 3 | 100.00% |
Total | 65 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
{
switch (osd_data->type) {
case CEPH_OSD_DATA_TYPE_NONE:
return 0;
case CEPH_OSD_DATA_TYPE_PAGES:
return osd_data->length;
case CEPH_OSD_DATA_TYPE_PAGELIST:
return (u64)osd_data->pagelist->length;
#ifdef CONFIG_BLOCK
case CEPH_OSD_DATA_TYPE_BIO:
return (u64)osd_data->bio_length;
#endif /* CONFIG_BLOCK */
default:
WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
return 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 81 | 100.00% | 1 | 100.00% |
Total | 81 | 100.00% | 1 | 100.00% |
static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
{
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
int num_pages;
num_pages = calc_pages_for((u64)osd_data->alignment,
(u64)osd_data->length);
ceph_release_page_vector(osd_data->pages, num_pages);
}
ceph_osd_data_init(osd_data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 61 | 100.00% | 2 | 100.00% |
Total | 61 | 100.00% | 2 | 100.00% |
static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
unsigned int which)
{
struct ceph_osd_req_op *op;
BUG_ON(which >= osd_req->r_num_ops);
op = &osd_req->r_ops[which];
switch (op->op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
ceph_osd_data_release(&op->extent.osd_data);
break;
case CEPH_OSD_OP_CALL:
ceph_osd_data_release(&op->cls.request_info);
ceph_osd_data_release(&op->cls.request_data);
ceph_osd_data_release(&op->cls.response_data);
break;
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
ceph_osd_data_release(&op->xattr.osd_data);
break;
case CEPH_OSD_OP_STAT:
ceph_osd_data_release(&op->raw_data_in);
break;
case CEPH_OSD_OP_NOTIFY_ACK:
ceph_osd_data_release(&op->notify_ack.request_data);
break;
case CEPH_OSD_OP_NOTIFY:
ceph_osd_data_release(&op->notify.request_data);
ceph_osd_data_release(&op->notify.response_data);
break;
case CEPH_OSD_OP_LIST_WATCHERS:
ceph_osd_data_release(&op->list_watchers.response_data);
break;
default:
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 100 | 54.35% | 3 | 33.33% |
Ilya Dryomov | 41 | 22.28% | 3 | 33.33% |
Yan, Zheng | 29 | 15.76% | 2 | 22.22% |
Douglas Fuller | 14 | 7.61% | 1 | 11.11% |
Total | 184 | 100.00% | 9 | 100.00% |
/*
* Assumes @t is zero-initialized.
*/
static void target_init(struct ceph_osd_request_target *t)
{
ceph_oid_init(&t->base_oid);
ceph_oloc_init(&t->base_oloc);
ceph_oid_init(&t->target_oid);
ceph_oloc_init(&t->target_oloc);
ceph_osds_init(&t->acting);
ceph_osds_init(&t->up);
t->size = -1;
t->min_size = -1;
t->osd = CEPH_HOMELESS_OSD;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 79 | 100.00% | 1 | 100.00% |
Total | 79 | 100.00% | 1 | 100.00% |
static void target_copy(struct ceph_osd_request_target *dest,
const struct ceph_osd_request_target *src)
{
ceph_oid_copy(&dest->base_oid, &src->base_oid);
ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
ceph_oid_copy(&dest->target_oid, &src->target_oid);
ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
dest->pgid = src->pgid; /* struct */
dest->pg_num = src->pg_num;
dest->pg_num_mask = src->pg_num_mask;
ceph_osds_copy(&dest->acting, &src->acting);
ceph_osds_copy(&dest->up, &src->up);
dest->size = src->size;
dest->min_size = src->min_size;
dest->sort_bitwise = src->sort_bitwise;
dest->flags = src->flags;
dest->paused = src->paused;
dest->osd = src->osd;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 168 | 100.00% | 1 | 100.00% |
Total | 168 | 100.00% | 1 | 100.00% |
static void target_destroy(struct ceph_osd_request_target *t)
{
ceph_oid_destroy(&t->base_oid);
ceph_oloc_destroy(&t->base_oloc);
ceph_oid_destroy(&t->target_oid);
ceph_oloc_destroy(&t->target_oloc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 27 | 62.79% | 1 | 50.00% |
Yan, Zheng | 16 | 37.21% | 1 | 50.00% |
Total | 43 | 100.00% | 2 | 100.00% |
/*
* requests
*/
static void request_release_checks(struct ceph_osd_request *req)
{
WARN_ON(!RB_EMPTY_NODE(&req->r_node));
WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
WARN_ON(!list_empty(&req->r_unsafe_item));
WARN_ON(req->r_osd);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 46 | 85.19% | 4 | 57.14% |
Sage Weil | 7 | 12.96% | 2 | 28.57% |
Alex Elder | 1 | 1.85% | 1 | 14.29% |
Total | 54 | 100.00% | 7 | 100.00% |
static void ceph_osdc_release_request(struct kref *kref)
{
struct ceph_osd_request *req = container_of(kref,
struct ceph_osd_request, r_kref);
unsigned int which;
dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
req->r_request, req->r_reply);
request_release_checks(req);
if (req->r_request)
ceph_msg_put(req->r_request);
if (req->r_reply)
ceph_msg_put(req->r_reply);
for (which = 0; which < req->r_num_ops; which++)
osd_req_op_data_release(req, which);
target_destroy(&req->r_t);
ceph_put_snap_context(req->r_snapc);
if (req->r_mempool)
mempool_free(req, req->r_osdc->req_mempool);
else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
kmem_cache_free(ceph_osd_request_cache, req);
else
kfree(req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 74 | 48.05% | 4 | 30.77% |
Sage Weil | 45 | 29.22% | 2 | 15.38% |
Alex Elder | 31 | 20.13% | 6 | 46.15% |
Yehuda Sadeh Weinraub | 4 | 2.60% | 1 | 7.69% |
Total | 154 | 100.00% | 13 | 100.00% |
void ceph_osdc_get_request(struct ceph_osd_request *req)
{
dout("%s %p (was %d)\n", __func__, req,
kref_read(&req->r_kref));
kref_get(&req->r_kref);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 34 | 97.14% | 1 | 50.00% |
Peter Zijlstra | 1 | 2.86% | 1 | 50.00% |
Total | 35 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(ceph_osdc_get_request);
void ceph_osdc_put_request(struct ceph_osd_request *req)
{
if (req) {
dout("%s %p (was %d)\n", __func__, req,
kref_read(&req->r_kref));
kref_put(&req->r_kref, ceph_osdc_release_request);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 39 | 90.70% | 2 | 50.00% |
Yehuda Sadeh Weinraub | 3 | 6.98% | 1 | 25.00% |
Peter Zijlstra | 1 | 2.33% | 1 | 25.00% |
Total | 43 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(ceph_osdc_put_request);
static void request_init(struct ceph_osd_request *req)
{
/* req only, each op is zeroed in _osd_req_op_init() */
memset(req, 0, sizeof(*req));
kref_init(&req->r_kref);
init_completion(&req->r_completion);
RB_CLEAR_NODE(&req->r_node);
RB_CLEAR_NODE(&req->r_mc_node);
INIT_LIST_HEAD(&req->r_unsafe_item);
target_init(&req->r_t);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 73 | 100.00% | 3 | 100.00% |
Total | 73 | 100.00% | 3 | 100.00% |
/*
* This is ugly, but it allows us to reuse linger registration and ping
* requests, keeping the structure of the code around send_linger{_ping}()
* reasonable. Setting up a min_nr=2 mempool for each linger request
* and dealing with copying ops (this blasts req only, watch op remains
* intact) isn't any better.
*/
static void request_reinit(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
bool mempool = req->r_mempool;
unsigned int num_ops = req->r_num_ops;
u64 snapid = req->r_snapid;
struct ceph_snap_context *snapc = req->r_snapc;
bool linger = req->r_linger;
struct ceph_msg *request_msg = req->r_request;
struct ceph_msg *reply_msg = req->r_reply;
dout("%s req %p\n", __func__, req);
WARN_ON(kref_read(&req->r_kref) != 1);
request_release_checks(req);
WARN_ON(kref_read(&request_msg->kref) != 1);
WARN_ON(kref_read(&reply_msg->kref) != 1);
target_destroy(&req->r_t);
request_init(req);
req->r_osdc = osdc;
req->r_mempool = mempool;
req->r_num_ops = num_ops;
req->r_snapid = snapid;
req->r_snapc = snapc;
req->r_linger = linger;
req->r_request = request_msg;
req->r_reply = reply_msg;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 187 | 98.42% | 2 | 66.67% |
Peter Zijlstra | 3 | 1.58% | 1 | 33.33% |
Total | 190 | 100.00% | 3 | 100.00% |
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
unsigned int num_ops,
bool use_mempool,
gfp_t gfp_flags)
{
struct ceph_osd_request *req;
if (use_mempool) {
BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
req = mempool_alloc(osdc->req_mempool, gfp_flags);
} else if (num_ops <= CEPH_OSD_SLAB_OPS) {
req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
} else {
BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
gfp_flags);
}
if (unlikely(!req))
return NULL;
request_init(req);
req->r_osdc = osdc;
req->r_mempool = use_mempool;
req->r_num_ops = num_ops;
req->r_snapid = CEPH_NOSNAP;
req->r_snapc = ceph_get_snap_context(snapc);
dout("%s req %p\n", __func__, req);
return req;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 73 | 42.94% | 4 | 33.33% |
Yehuda Sadeh Weinraub | 45 | 26.47% | 2 | 16.67% |
Sage Weil | 38 | 22.35% | 4 | 33.33% |
Alex Elder | 14 | 8.24% | 2 | 16.67% |
Total | 170 | 100.00% | 12 | 100.00% |
EXPORT_SYMBOL(ceph_osdc_alloc_request);
static int ceph_oloc_encoding_size(struct ceph_object_locator *oloc)
{
return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yan, Zheng | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
{
struct ceph_osd_client *osdc = req->r_osdc;
struct ceph_msg *msg;
int msg_size;
WARN_ON(ceph_oid_empty(&req->r_base_oid));
WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
/* create request message */
msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
msg_size += CEPH_ENCODING_START_BLK_LEN +
ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
msg_size += 1 + 8 + 4 + 4; /* pgid */
msg_size += 4 + req->r_base_oid.name_len; /* oid */
msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
msg_size += 8; /* snapid */
msg_size += 8; /* snap_seq */
msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
msg_size += 4; /* retry_attempt */
if (req->r_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
else
msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
if (!msg)
return -ENOMEM;
memset(msg->front.iov_base, 0, msg->front.iov_len);
req->r_request = msg;
/* create reply message */
msg_size = OSD_OPREPLY_FRONT_LEN;
msg_size += req->r_base_oid.name_len;
msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
if (req->r_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
else
msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
if (!msg)
return -ENOMEM;
req->r_reply = msg;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ilya Dryomov | 212 | 71.62% | 4 | 33.33% |
Sage Weil | 54 | 18.24% | 5 | 41.67% |
Yan, Zheng | 19 | 6.42% | 1 | 8.33% |
Yehuda Sadeh Weinraub | 11 | 3.72% | 2 | 16.67% |
Total | 296 | 100.00% | 12 | 100.00% |
EXPORT_SYMBOL(ceph_osdc_alloc_messages);
static bool osd_req_opcode_valid(u16 opcode)
{
switch (opcode) {
#define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
__CEPH_FORALL_OSD_OPS(GENERATE_CASE)
#undef GENERATE_CASE
default:
return false;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 19 | 51.35% | 1 | 50.00% |
Ilya Dryomov | 18 | 48.65% | 1 | 50.00% |
Total | 37 | 100.00% | 2 | 100.00% |
/*
* This is an osd op init function for opcodes that have no data or
* other information associated with them. It also serves as a
* common init routine for all the other init functions, below.
*/
static struct ceph_osd_req_op *
_osd_req_op_init(struct ceph_osd_request *osd_req,