Contributors: 22
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
David Sterba |
209 |
23.48% |
21 |
23.60% |
Chris Mason |
204 |
22.92% |
12 |
13.48% |
Dennis Zhou |
118 |
13.26% |
6 |
6.74% |
Li Zefan |
104 |
11.69% |
3 |
3.37% |
Qu Wenruo |
92 |
10.34% |
11 |
12.36% |
Christoph Hellwig |
51 |
5.73% |
12 |
13.48% |
Anand Jain |
29 |
3.26% |
4 |
4.49% |
Nick Terrell |
21 |
2.36% |
1 |
1.12% |
Li Zetao |
8 |
0.90% |
4 |
4.49% |
Filipe David Borba Manana |
8 |
0.90% |
2 |
2.25% |
Timofey Titovets |
8 |
0.90% |
1 |
1.12% |
Johannes Thumshirn |
7 |
0.79% |
1 |
1.12% |
Omar Sandoval |
6 |
0.67% |
1 |
1.12% |
Mingming Cao |
5 |
0.56% |
1 |
1.12% |
Nikolay Borisov |
4 |
0.45% |
1 |
1.12% |
Sage Weil |
4 |
0.45% |
1 |
1.12% |
Liu Bo |
3 |
0.34% |
2 |
2.25% |
Chengguang Xu |
3 |
0.34% |
1 |
1.12% |
Byongho Lee |
2 |
0.22% |
1 |
1.12% |
Josef Whiter |
2 |
0.22% |
1 |
1.12% |
Jan Engelhardt |
1 |
0.11% |
1 |
1.12% |
Bart Van Assche |
1 |
0.11% |
1 |
1.12% |
Total |
890 |
|
89 |
|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2008 Oracle. All rights reserved.
*/
#ifndef BTRFS_COMPRESSION_H
#define BTRFS_COMPRESSION_H
#include <linux/sizes.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include "bio.h"
struct address_space;
struct page;
struct inode;
struct btrfs_inode;
struct btrfs_ordered_extent;
struct btrfs_bio;
/*
* We want to make sure that amount of RAM required to uncompress an extent is
* reasonable, so we limit the total size in ram of a compressed extent to
* 128k. This is a crucial number because it also controls how easily we can
* spread reads across cpus for decompression.
*
* We also want to make sure the amount of IO required to do a random read is
* reasonably small, so we limit the size of a compressed extent to 128k.
*/
/* Maximum length of compressed data stored on disk */
#define BTRFS_MAX_COMPRESSED (SZ_128K)
#define BTRFS_MAX_COMPRESSED_PAGES (BTRFS_MAX_COMPRESSED / PAGE_SIZE)
static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
/* Maximum size of data before compression */
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
struct compressed_bio {
/* Number of compressed folios in the array. */
unsigned int nr_folios;
/* The folios with the compressed data on them. */
struct folio **compressed_folios;
/* starting offset in the inode for our pages */
u64 start;
/* Number of bytes in the inode we're working on */
unsigned int len;
/* Number of bytes on disk */
unsigned int compressed_len;
/* The compression algorithm for this bio */
u8 compress_type;
/* Whether this is a write for writeback. */
bool writeback;
union {
/* For reads, this is the bio we are copying the data into */
struct btrfs_bio *orig_bbio;
struct work_struct write_end_work;
};
/* Must be last. */
struct btrfs_bio bbio;
};
static inline unsigned int btrfs_compress_type(unsigned int type_level)
{
return (type_level & 0xF);
}
static inline unsigned int btrfs_compress_level(unsigned int type_level)
{
return ((type_level & 0xF0) >> 4);
}
/* @range_end must be exclusive. */
static inline u32 btrfs_calc_input_length(u64 range_end, u64 cur)
{
u64 page_end = round_down(cur, PAGE_SIZE) + PAGE_SIZE;
return min(range_end, page_end) - cur;
}
int __init btrfs_init_compress(void);
void __cold btrfs_exit_compress(void);
int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
unsigned long start_byte, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
struct folio **compressed_folios,
unsigned int nr_folios, blk_opf_t write_flags,
bool writeback);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
struct folio *btrfs_alloc_compr_folio(void);
void btrfs_free_compr_folio(struct folio *folio);
enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
BTRFS_COMPRESS_ZLIB = 1,
BTRFS_COMPRESS_LZO = 2,
BTRFS_COMPRESS_ZSTD = 3,
BTRFS_NR_COMPRESS_TYPES = 4,
};
struct workspace_manager {
struct list_head idle_ws;
spinlock_t ws_lock;
/* Number of free workspaces */
int free_ws;
/* Total number of allocated workspaces */
atomic_t total_ws;
/* Waiters for a free workspace */
wait_queue_head_t ws_wait;
};
struct list_head *btrfs_get_workspace(int type, unsigned int level);
void btrfs_put_workspace(int type, struct list_head *ws);
struct btrfs_compress_op {
struct workspace_manager *workspace_manager;
/* Maximum level supported by the compression algorithm */
unsigned int max_level;
unsigned int default_level;
};
/* The heuristic workspaces are managed via the 0th workspace manager */
#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
extern const struct btrfs_compress_op btrfs_heuristic_compress;
extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress;
extern const struct btrfs_compress_op btrfs_zstd_compress;
const char* btrfs_compress_type2str(enum btrfs_compression_type type);
bool btrfs_compress_is_valid_type(const char *str, size_t len);
int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end);
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
struct folio **in_folio_ret);
int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *zlib_alloc_workspace(unsigned int level);
void zlib_free_workspace(struct list_head *ws);
struct list_head *zlib_get_workspace(unsigned int level);
int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *lzo_alloc_workspace(void);
void lzo_free_workspace(struct list_head *ws);
int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
void zstd_init_workspace_manager(void);
void zstd_cleanup_workspace_manager(void);
struct list_head *zstd_alloc_workspace(unsigned int level);
void zstd_free_workspace(struct list_head *ws);
struct list_head *zstd_get_workspace(unsigned int level);
void zstd_put_workspace(struct list_head *ws);
#endif