1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2008 Oracle.  All rights reserved.
4   */
5  
6  #include <linux/kernel.h>
7  #include <linux/bio.h>
8  #include <linux/file.h>
9  #include <linux/fs.h>
10  #include <linux/pagemap.h>
11  #include <linux/pagevec.h>
12  #include <linux/highmem.h>
13  #include <linux/kthread.h>
14  #include <linux/time.h>
15  #include <linux/init.h>
16  #include <linux/string.h>
17  #include <linux/backing-dev.h>
18  #include <linux/writeback.h>
19  #include <linux/psi.h>
20  #include <linux/slab.h>
21  #include <linux/sched/mm.h>
22  #include <linux/log2.h>
23  #include <linux/shrinker.h>
24  #include <crypto/hash.h>
25  #include "misc.h"
26  #include "ctree.h"
27  #include "fs.h"
28  #include "btrfs_inode.h"
29  #include "bio.h"
30  #include "ordered-data.h"
31  #include "compression.h"
32  #include "extent_io.h"
33  #include "extent_map.h"
34  #include "subpage.h"
35  #include "messages.h"
36  #include "super.h"
37  
38  static struct bio_set btrfs_compressed_bioset;
39  
40  static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
41  
btrfs_compress_type2str(enum btrfs_compression_type type)42  const char* btrfs_compress_type2str(enum btrfs_compression_type type)
43  {
44  	switch (type) {
45  	case BTRFS_COMPRESS_ZLIB:
46  	case BTRFS_COMPRESS_LZO:
47  	case BTRFS_COMPRESS_ZSTD:
48  	case BTRFS_COMPRESS_NONE:
49  		return btrfs_compress_types[type];
50  	default:
51  		break;
52  	}
53  
54  	return NULL;
55  }
56  
to_compressed_bio(struct btrfs_bio * bbio)57  static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio)
58  {
59  	return container_of(bbio, struct compressed_bio, bbio);
60  }
61  
alloc_compressed_bio(struct btrfs_inode * inode,u64 start,blk_opf_t op,btrfs_bio_end_io_t end_io)62  static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
63  						   u64 start, blk_opf_t op,
64  						   btrfs_bio_end_io_t end_io)
65  {
66  	struct btrfs_bio *bbio;
67  
68  	bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
69  					  GFP_NOFS, &btrfs_compressed_bioset));
70  	btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
71  	bbio->inode = inode;
72  	bbio->file_offset = start;
73  	return to_compressed_bio(bbio);
74  }
75  
btrfs_compress_is_valid_type(const char * str,size_t len)76  bool btrfs_compress_is_valid_type(const char *str, size_t len)
77  {
78  	int i;
79  
80  	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
81  		size_t comp_len = strlen(btrfs_compress_types[i]);
82  
83  		if (len < comp_len)
84  			continue;
85  
86  		if (!strncmp(btrfs_compress_types[i], str, comp_len))
87  			return true;
88  	}
89  	return false;
90  }
91  
compression_compress_pages(int type,struct list_head * ws,struct address_space * mapping,u64 start,struct folio ** folios,unsigned long * out_folios,unsigned long * total_in,unsigned long * total_out)92  static int compression_compress_pages(int type, struct list_head *ws,
93  				      struct address_space *mapping, u64 start,
94  				      struct folio **folios, unsigned long *out_folios,
95  				      unsigned long *total_in, unsigned long *total_out)
96  {
97  	switch (type) {
98  	case BTRFS_COMPRESS_ZLIB:
99  		return zlib_compress_folios(ws, mapping, start, folios,
100  					    out_folios, total_in, total_out);
101  	case BTRFS_COMPRESS_LZO:
102  		return lzo_compress_folios(ws, mapping, start, folios,
103  					   out_folios, total_in, total_out);
104  	case BTRFS_COMPRESS_ZSTD:
105  		return zstd_compress_folios(ws, mapping, start, folios,
106  					    out_folios, total_in, total_out);
107  	case BTRFS_COMPRESS_NONE:
108  	default:
109  		/*
110  		 * This can happen when compression races with remount setting
111  		 * it to 'no compress', while caller doesn't call
112  		 * inode_need_compress() to check if we really need to
113  		 * compress.
114  		 *
115  		 * Not a big deal, just need to inform caller that we
116  		 * haven't allocated any pages yet.
117  		 */
118  		*out_folios = 0;
119  		return -E2BIG;
120  	}
121  }
122  
compression_decompress_bio(struct list_head * ws,struct compressed_bio * cb)123  static int compression_decompress_bio(struct list_head *ws,
124  				      struct compressed_bio *cb)
125  {
126  	switch (cb->compress_type) {
127  	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
128  	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
129  	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
130  	case BTRFS_COMPRESS_NONE:
131  	default:
132  		/*
133  		 * This can't happen, the type is validated several times
134  		 * before we get here.
135  		 */
136  		BUG();
137  	}
138  }
139  
compression_decompress(int type,struct list_head * ws,const u8 * data_in,struct folio * dest_folio,unsigned long dest_pgoff,size_t srclen,size_t destlen)140  static int compression_decompress(int type, struct list_head *ws,
141  		const u8 *data_in, struct folio *dest_folio,
142  		unsigned long dest_pgoff, size_t srclen, size_t destlen)
143  {
144  	switch (type) {
145  	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
146  						dest_pgoff, srclen, destlen);
147  	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_folio,
148  						dest_pgoff, srclen, destlen);
149  	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
150  						dest_pgoff, srclen, destlen);
151  	case BTRFS_COMPRESS_NONE:
152  	default:
153  		/*
154  		 * This can't happen, the type is validated several times
155  		 * before we get here.
156  		 */
157  		BUG();
158  	}
159  }
160  
btrfs_free_compressed_folios(struct compressed_bio * cb)161  static void btrfs_free_compressed_folios(struct compressed_bio *cb)
162  {
163  	for (unsigned int i = 0; i < cb->nr_folios; i++)
164  		btrfs_free_compr_folio(cb->compressed_folios[i]);
165  	kfree(cb->compressed_folios);
166  }
167  
168  static int btrfs_decompress_bio(struct compressed_bio *cb);
169  
170  /*
171   * Global cache of last unused pages for compression/decompression.
172   */
173  static struct btrfs_compr_pool {
174  	struct shrinker *shrinker;
175  	spinlock_t lock;
176  	struct list_head list;
177  	int count;
178  	int thresh;
179  } compr_pool;
180  
btrfs_compr_pool_count(struct shrinker * sh,struct shrink_control * sc)181  static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc)
182  {
183  	int ret;
184  
185  	/*
186  	 * We must not read the values more than once if 'ret' gets expanded in
187  	 * the return statement so we don't accidentally return a negative
188  	 * number, even if the first condition finds it positive.
189  	 */
190  	ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh);
191  
192  	return ret > 0 ? ret : 0;
193  }
194  
btrfs_compr_pool_scan(struct shrinker * sh,struct shrink_control * sc)195  static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
196  {
197  	struct list_head remove;
198  	struct list_head *tmp, *next;
199  	int freed;
200  
201  	if (compr_pool.count == 0)
202  		return SHRINK_STOP;
203  
204  	INIT_LIST_HEAD(&remove);
205  
206  	/* For now, just simply drain the whole list. */
207  	spin_lock(&compr_pool.lock);
208  	list_splice_init(&compr_pool.list, &remove);
209  	freed = compr_pool.count;
210  	compr_pool.count = 0;
211  	spin_unlock(&compr_pool.lock);
212  
213  	list_for_each_safe(tmp, next, &remove) {
214  		struct page *page = list_entry(tmp, struct page, lru);
215  
216  		ASSERT(page_ref_count(page) == 1);
217  		put_page(page);
218  	}
219  
220  	return freed;
221  }
222  
223  /*
224   * Common wrappers for page allocation from compression wrappers
225   */
btrfs_alloc_compr_folio(void)226  struct folio *btrfs_alloc_compr_folio(void)
227  {
228  	struct folio *folio = NULL;
229  
230  	spin_lock(&compr_pool.lock);
231  	if (compr_pool.count > 0) {
232  		folio = list_first_entry(&compr_pool.list, struct folio, lru);
233  		list_del_init(&folio->lru);
234  		compr_pool.count--;
235  	}
236  	spin_unlock(&compr_pool.lock);
237  
238  	if (folio)
239  		return folio;
240  
241  	return folio_alloc(GFP_NOFS, 0);
242  }
243  
btrfs_free_compr_folio(struct folio * folio)244  void btrfs_free_compr_folio(struct folio *folio)
245  {
246  	bool do_free = false;
247  
248  	spin_lock(&compr_pool.lock);
249  	if (compr_pool.count > compr_pool.thresh) {
250  		do_free = true;
251  	} else {
252  		list_add(&folio->lru, &compr_pool.list);
253  		compr_pool.count++;
254  	}
255  	spin_unlock(&compr_pool.lock);
256  
257  	if (!do_free)
258  		return;
259  
260  	ASSERT(folio_ref_count(folio) == 1);
261  	folio_put(folio);
262  }
263  
end_bbio_compressed_read(struct btrfs_bio * bbio)264  static void end_bbio_compressed_read(struct btrfs_bio *bbio)
265  {
266  	struct compressed_bio *cb = to_compressed_bio(bbio);
267  	blk_status_t status = bbio->bio.bi_status;
268  
269  	if (!status)
270  		status = errno_to_blk_status(btrfs_decompress_bio(cb));
271  
272  	btrfs_free_compressed_folios(cb);
273  	btrfs_bio_end_io(cb->orig_bbio, status);
274  	bio_put(&bbio->bio);
275  }
276  
277  /*
278   * Clear the writeback bits on all of the file
279   * pages for a compressed write
280   */
end_compressed_writeback(const struct compressed_bio * cb)281  static noinline void end_compressed_writeback(const struct compressed_bio *cb)
282  {
283  	struct inode *inode = &cb->bbio.inode->vfs_inode;
284  	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
285  	unsigned long index = cb->start >> PAGE_SHIFT;
286  	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
287  	struct folio_batch fbatch;
288  	const int error = blk_status_to_errno(cb->bbio.bio.bi_status);
289  	int i;
290  	int ret;
291  
292  	if (error)
293  		mapping_set_error(inode->i_mapping, error);
294  
295  	folio_batch_init(&fbatch);
296  	while (index <= end_index) {
297  		ret = filemap_get_folios(inode->i_mapping, &index, end_index,
298  				&fbatch);
299  
300  		if (ret == 0)
301  			return;
302  
303  		for (i = 0; i < ret; i++) {
304  			struct folio *folio = fbatch.folios[i];
305  
306  			btrfs_folio_clamp_clear_writeback(fs_info, folio,
307  							  cb->start, cb->len);
308  		}
309  		folio_batch_release(&fbatch);
310  	}
311  	/* the inode may be gone now */
312  }
313  
btrfs_finish_compressed_write_work(struct work_struct * work)314  static void btrfs_finish_compressed_write_work(struct work_struct *work)
315  {
316  	struct compressed_bio *cb =
317  		container_of(work, struct compressed_bio, write_end_work);
318  
319  	btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
320  				    cb->bbio.bio.bi_status == BLK_STS_OK);
321  
322  	if (cb->writeback)
323  		end_compressed_writeback(cb);
324  	/* Note, our inode could be gone now */
325  
326  	btrfs_free_compressed_folios(cb);
327  	bio_put(&cb->bbio.bio);
328  }
329  
330  /*
331   * Do the cleanup once all the compressed pages hit the disk.  This will clear
332   * writeback on the file pages and free the compressed pages.
333   *
334   * This also calls the writeback end hooks for the file pages so that metadata
335   * and checksums can be updated in the file.
336   */
end_bbio_compressed_write(struct btrfs_bio * bbio)337  static void end_bbio_compressed_write(struct btrfs_bio *bbio)
338  {
339  	struct compressed_bio *cb = to_compressed_bio(bbio);
340  	struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
341  
342  	queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
343  }
344  
btrfs_add_compressed_bio_folios(struct compressed_bio * cb)345  static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
346  {
347  	struct bio *bio = &cb->bbio.bio;
348  	u32 offset = 0;
349  
350  	while (offset < cb->compressed_len) {
351  		int ret;
352  		u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
353  
354  		/* Maximum compressed extent is smaller than bio size limit. */
355  		ret = bio_add_folio(bio, cb->compressed_folios[offset >> PAGE_SHIFT],
356  				    len, 0);
357  		ASSERT(ret);
358  		offset += len;
359  	}
360  }
361  
362  /*
363   * worker function to build and submit bios for previously compressed pages.
364   * The corresponding pages in the inode should be marked for writeback
365   * and the compressed pages should have a reference on them for dropping
366   * when the IO is complete.
367   *
368   * This also checksums the file bytes and gets things ready for
369   * the end io hooks.
370   */
btrfs_submit_compressed_write(struct btrfs_ordered_extent * ordered,struct folio ** compressed_folios,unsigned int nr_folios,blk_opf_t write_flags,bool writeback)371  void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
372  				   struct folio **compressed_folios,
373  				   unsigned int nr_folios,
374  				   blk_opf_t write_flags,
375  				   bool writeback)
376  {
377  	struct btrfs_inode *inode = ordered->inode;
378  	struct btrfs_fs_info *fs_info = inode->root->fs_info;
379  	struct compressed_bio *cb;
380  
381  	ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
382  	ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
383  
384  	cb = alloc_compressed_bio(inode, ordered->file_offset,
385  				  REQ_OP_WRITE | write_flags,
386  				  end_bbio_compressed_write);
387  	cb->start = ordered->file_offset;
388  	cb->len = ordered->num_bytes;
389  	cb->compressed_folios = compressed_folios;
390  	cb->compressed_len = ordered->disk_num_bytes;
391  	cb->writeback = writeback;
392  	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
393  	cb->nr_folios = nr_folios;
394  	cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
395  	cb->bbio.ordered = ordered;
396  	btrfs_add_compressed_bio_folios(cb);
397  
398  	btrfs_submit_bbio(&cb->bbio, 0);
399  }
400  
401  /*
402   * Add extra pages in the same compressed file extent so that we don't need to
403   * re-read the same extent again and again.
404   *
405   * NOTE: this won't work well for subpage, as for subpage read, we lock the
406   * full page then submit bio for each compressed/regular extents.
407   *
408   * This means, if we have several sectors in the same page points to the same
409   * on-disk compressed data, we will re-read the same extent many times and
410   * this function can only help for the next page.
411   */
add_ra_bio_pages(struct inode * inode,u64 compressed_end,struct compressed_bio * cb,int * memstall,unsigned long * pflags)412  static noinline int add_ra_bio_pages(struct inode *inode,
413  				     u64 compressed_end,
414  				     struct compressed_bio *cb,
415  				     int *memstall, unsigned long *pflags)
416  {
417  	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
418  	unsigned long end_index;
419  	struct bio *orig_bio = &cb->orig_bbio->bio;
420  	u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
421  	u64 isize = i_size_read(inode);
422  	int ret;
423  	struct folio *folio;
424  	struct extent_map *em;
425  	struct address_space *mapping = inode->i_mapping;
426  	struct extent_map_tree *em_tree;
427  	struct extent_io_tree *tree;
428  	int sectors_missed = 0;
429  
430  	em_tree = &BTRFS_I(inode)->extent_tree;
431  	tree = &BTRFS_I(inode)->io_tree;
432  
433  	if (isize == 0)
434  		return 0;
435  
436  	/*
437  	 * For current subpage support, we only support 64K page size,
438  	 * which means maximum compressed extent size (128K) is just 2x page
439  	 * size.
440  	 * This makes readahead less effective, so here disable readahead for
441  	 * subpage for now, until full compressed write is supported.
442  	 */
443  	if (fs_info->sectorsize < PAGE_SIZE)
444  		return 0;
445  
446  	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
447  
448  	while (cur < compressed_end) {
449  		u64 page_end;
450  		u64 pg_index = cur >> PAGE_SHIFT;
451  		u32 add_size;
452  
453  		if (pg_index > end_index)
454  			break;
455  
456  		folio = __filemap_get_folio(mapping, pg_index, 0, 0);
457  		if (!IS_ERR(folio)) {
458  			u64 folio_sz = folio_size(folio);
459  			u64 offset = offset_in_folio(folio, cur);
460  
461  			folio_put(folio);
462  			sectors_missed += (folio_sz - offset) >>
463  					  fs_info->sectorsize_bits;
464  
465  			/* Beyond threshold, no need to continue */
466  			if (sectors_missed > 4)
467  				break;
468  
469  			/*
470  			 * Jump to next page start as we already have page for
471  			 * current offset.
472  			 */
473  			cur += (folio_sz - offset);
474  			continue;
475  		}
476  
477  		folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
478  								   ~__GFP_FS), 0);
479  		if (!folio)
480  			break;
481  
482  		if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) {
483  			/* There is already a page, skip to page end */
484  			cur += folio_size(folio);
485  			folio_put(folio);
486  			continue;
487  		}
488  
489  		if (!*memstall && folio_test_workingset(folio)) {
490  			psi_memstall_enter(pflags);
491  			*memstall = 1;
492  		}
493  
494  		ret = set_folio_extent_mapped(folio);
495  		if (ret < 0) {
496  			folio_unlock(folio);
497  			folio_put(folio);
498  			break;
499  		}
500  
501  		page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
502  		lock_extent(tree, cur, page_end, NULL);
503  		read_lock(&em_tree->lock);
504  		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
505  		read_unlock(&em_tree->lock);
506  
507  		/*
508  		 * At this point, we have a locked page in the page cache for
509  		 * these bytes in the file.  But, we have to make sure they map
510  		 * to this compressed extent on disk.
511  		 */
512  		if (!em || cur < em->start ||
513  		    (cur + fs_info->sectorsize > extent_map_end(em)) ||
514  		    (extent_map_block_start(em) >> SECTOR_SHIFT) !=
515  		    orig_bio->bi_iter.bi_sector) {
516  			free_extent_map(em);
517  			unlock_extent(tree, cur, page_end, NULL);
518  			folio_unlock(folio);
519  			folio_put(folio);
520  			break;
521  		}
522  		add_size = min(em->start + em->len, page_end + 1) - cur;
523  		free_extent_map(em);
524  		unlock_extent(tree, cur, page_end, NULL);
525  
526  		if (folio->index == end_index) {
527  			size_t zero_offset = offset_in_folio(folio, isize);
528  
529  			if (zero_offset) {
530  				int zeros;
531  				zeros = folio_size(folio) - zero_offset;
532  				folio_zero_range(folio, zero_offset, zeros);
533  			}
534  		}
535  
536  		if (!bio_add_folio(orig_bio, folio, add_size,
537  				   offset_in_folio(folio, cur))) {
538  			folio_unlock(folio);
539  			folio_put(folio);
540  			break;
541  		}
542  		/*
543  		 * If it's subpage, we also need to increase its
544  		 * subpage::readers number, as at endio we will decrease
545  		 * subpage::readers and to unlock the page.
546  		 */
547  		if (fs_info->sectorsize < PAGE_SIZE)
548  			btrfs_subpage_start_reader(fs_info, folio, cur,
549  						   add_size);
550  		folio_put(folio);
551  		cur += add_size;
552  	}
553  	return 0;
554  }
555  
556  /*
557   * for a compressed read, the bio we get passed has all the inode pages
558   * in it.  We don't actually do IO on those pages but allocate new ones
559   * to hold the compressed pages on disk.
560   *
561   * bio->bi_iter.bi_sector points to the compressed extent on disk
562   * bio->bi_io_vec points to all of the inode pages
563   *
564   * After the compressed pages are read, we copy the bytes into the
565   * bio we were passed and then call the bio end_io calls
566   */
btrfs_submit_compressed_read(struct btrfs_bio * bbio)567  void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
568  {
569  	struct btrfs_inode *inode = bbio->inode;
570  	struct btrfs_fs_info *fs_info = inode->root->fs_info;
571  	struct extent_map_tree *em_tree = &inode->extent_tree;
572  	struct compressed_bio *cb;
573  	unsigned int compressed_len;
574  	u64 file_offset = bbio->file_offset;
575  	u64 em_len;
576  	u64 em_start;
577  	struct extent_map *em;
578  	unsigned long pflags;
579  	int memstall = 0;
580  	blk_status_t ret;
581  	int ret2;
582  
583  	/* we need the actual starting offset of this extent in the file */
584  	read_lock(&em_tree->lock);
585  	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
586  	read_unlock(&em_tree->lock);
587  	if (!em) {
588  		ret = BLK_STS_IOERR;
589  		goto out;
590  	}
591  
592  	ASSERT(extent_map_is_compressed(em));
593  	compressed_len = em->disk_num_bytes;
594  
595  	cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
596  				  end_bbio_compressed_read);
597  
598  	cb->start = em->start - em->offset;
599  	em_len = em->len;
600  	em_start = em->start;
601  
602  	cb->len = bbio->bio.bi_iter.bi_size;
603  	cb->compressed_len = compressed_len;
604  	cb->compress_type = extent_map_compression(em);
605  	cb->orig_bbio = bbio;
606  
607  	free_extent_map(em);
608  
609  	cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
610  	cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
611  	if (!cb->compressed_folios) {
612  		ret = BLK_STS_RESOURCE;
613  		goto out_free_bio;
614  	}
615  
616  	ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios);
617  	if (ret2) {
618  		ret = BLK_STS_RESOURCE;
619  		goto out_free_compressed_pages;
620  	}
621  
622  	add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
623  			 &pflags);
624  
625  	/* include any pages we added in add_ra-bio_pages */
626  	cb->len = bbio->bio.bi_iter.bi_size;
627  	cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
628  	btrfs_add_compressed_bio_folios(cb);
629  
630  	if (memstall)
631  		psi_memstall_leave(&pflags);
632  
633  	btrfs_submit_bbio(&cb->bbio, 0);
634  	return;
635  
636  out_free_compressed_pages:
637  	kfree(cb->compressed_folios);
638  out_free_bio:
639  	bio_put(&cb->bbio.bio);
640  out:
641  	btrfs_bio_end_io(bbio, ret);
642  }
643  
644  /*
645   * Heuristic uses systematic sampling to collect data from the input data
646   * range, the logic can be tuned by the following constants:
647   *
648   * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
649   * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
650   */
651  #define SAMPLING_READ_SIZE	(16)
652  #define SAMPLING_INTERVAL	(256)
653  
654  /*
655   * For statistical analysis of the input data we consider bytes that form a
656   * Galois Field of 256 objects. Each object has an attribute count, ie. how
657   * many times the object appeared in the sample.
658   */
659  #define BUCKET_SIZE		(256)
660  
661  /*
662   * The size of the sample is based on a statistical sampling rule of thumb.
663   * The common way is to perform sampling tests as long as the number of
664   * elements in each cell is at least 5.
665   *
666   * Instead of 5, we choose 32 to obtain more accurate results.
667   * If the data contain the maximum number of symbols, which is 256, we obtain a
668   * sample size bound by 8192.
669   *
670   * For a sample of at most 8KB of data per data range: 16 consecutive bytes
671   * from up to 512 locations.
672   */
673  #define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
674  				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
675  
676  struct bucket_item {
677  	u32 count;
678  };
679  
680  struct heuristic_ws {
681  	/* Partial copy of input data */
682  	u8 *sample;
683  	u32 sample_size;
684  	/* Buckets store counters for each byte value */
685  	struct bucket_item *bucket;
686  	/* Sorting buffer */
687  	struct bucket_item *bucket_b;
688  	struct list_head list;
689  };
690  
691  static struct workspace_manager heuristic_wsm;
692  
free_heuristic_ws(struct list_head * ws)693  static void free_heuristic_ws(struct list_head *ws)
694  {
695  	struct heuristic_ws *workspace;
696  
697  	workspace = list_entry(ws, struct heuristic_ws, list);
698  
699  	kvfree(workspace->sample);
700  	kfree(workspace->bucket);
701  	kfree(workspace->bucket_b);
702  	kfree(workspace);
703  }
704  
alloc_heuristic_ws(unsigned int level)705  static struct list_head *alloc_heuristic_ws(unsigned int level)
706  {
707  	struct heuristic_ws *ws;
708  
709  	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
710  	if (!ws)
711  		return ERR_PTR(-ENOMEM);
712  
713  	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
714  	if (!ws->sample)
715  		goto fail;
716  
717  	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
718  	if (!ws->bucket)
719  		goto fail;
720  
721  	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
722  	if (!ws->bucket_b)
723  		goto fail;
724  
725  	INIT_LIST_HEAD(&ws->list);
726  	return &ws->list;
727  fail:
728  	free_heuristic_ws(&ws->list);
729  	return ERR_PTR(-ENOMEM);
730  }
731  
732  const struct btrfs_compress_op btrfs_heuristic_compress = {
733  	.workspace_manager = &heuristic_wsm,
734  };
735  
736  static const struct btrfs_compress_op * const btrfs_compress_op[] = {
737  	/* The heuristic is represented as compression type 0 */
738  	&btrfs_heuristic_compress,
739  	&btrfs_zlib_compress,
740  	&btrfs_lzo_compress,
741  	&btrfs_zstd_compress,
742  };
743  
alloc_workspace(int type,unsigned int level)744  static struct list_head *alloc_workspace(int type, unsigned int level)
745  {
746  	switch (type) {
747  	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
748  	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
749  	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
750  	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
751  	default:
752  		/*
753  		 * This can't happen, the type is validated several times
754  		 * before we get here.
755  		 */
756  		BUG();
757  	}
758  }
759  
free_workspace(int type,struct list_head * ws)760  static void free_workspace(int type, struct list_head *ws)
761  {
762  	switch (type) {
763  	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
764  	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
765  	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
766  	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
767  	default:
768  		/*
769  		 * This can't happen, the type is validated several times
770  		 * before we get here.
771  		 */
772  		BUG();
773  	}
774  }
775  
btrfs_init_workspace_manager(int type)776  static void btrfs_init_workspace_manager(int type)
777  {
778  	struct workspace_manager *wsm;
779  	struct list_head *workspace;
780  
781  	wsm = btrfs_compress_op[type]->workspace_manager;
782  	INIT_LIST_HEAD(&wsm->idle_ws);
783  	spin_lock_init(&wsm->ws_lock);
784  	atomic_set(&wsm->total_ws, 0);
785  	init_waitqueue_head(&wsm->ws_wait);
786  
787  	/*
788  	 * Preallocate one workspace for each compression type so we can
789  	 * guarantee forward progress in the worst case
790  	 */
791  	workspace = alloc_workspace(type, 0);
792  	if (IS_ERR(workspace)) {
793  		pr_warn(
794  	"BTRFS: cannot preallocate compression workspace, will try later\n");
795  	} else {
796  		atomic_set(&wsm->total_ws, 1);
797  		wsm->free_ws = 1;
798  		list_add(workspace, &wsm->idle_ws);
799  	}
800  }
801  
btrfs_cleanup_workspace_manager(int type)802  static void btrfs_cleanup_workspace_manager(int type)
803  {
804  	struct workspace_manager *wsman;
805  	struct list_head *ws;
806  
807  	wsman = btrfs_compress_op[type]->workspace_manager;
808  	while (!list_empty(&wsman->idle_ws)) {
809  		ws = wsman->idle_ws.next;
810  		list_del(ws);
811  		free_workspace(type, ws);
812  		atomic_dec(&wsman->total_ws);
813  	}
814  }
815  
816  /*
817   * This finds an available workspace or allocates a new one.
818   * If it's not possible to allocate a new one, waits until there's one.
819   * Preallocation makes a forward progress guarantees and we do not return
820   * errors.
821   */
btrfs_get_workspace(int type,unsigned int level)822  struct list_head *btrfs_get_workspace(int type, unsigned int level)
823  {
824  	struct workspace_manager *wsm;
825  	struct list_head *workspace;
826  	int cpus = num_online_cpus();
827  	unsigned nofs_flag;
828  	struct list_head *idle_ws;
829  	spinlock_t *ws_lock;
830  	atomic_t *total_ws;
831  	wait_queue_head_t *ws_wait;
832  	int *free_ws;
833  
834  	wsm = btrfs_compress_op[type]->workspace_manager;
835  	idle_ws	 = &wsm->idle_ws;
836  	ws_lock	 = &wsm->ws_lock;
837  	total_ws = &wsm->total_ws;
838  	ws_wait	 = &wsm->ws_wait;
839  	free_ws	 = &wsm->free_ws;
840  
841  again:
842  	spin_lock(ws_lock);
843  	if (!list_empty(idle_ws)) {
844  		workspace = idle_ws->next;
845  		list_del(workspace);
846  		(*free_ws)--;
847  		spin_unlock(ws_lock);
848  		return workspace;
849  
850  	}
851  	if (atomic_read(total_ws) > cpus) {
852  		DEFINE_WAIT(wait);
853  
854  		spin_unlock(ws_lock);
855  		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
856  		if (atomic_read(total_ws) > cpus && !*free_ws)
857  			schedule();
858  		finish_wait(ws_wait, &wait);
859  		goto again;
860  	}
861  	atomic_inc(total_ws);
862  	spin_unlock(ws_lock);
863  
864  	/*
865  	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
866  	 * to turn it off here because we might get called from the restricted
867  	 * context of btrfs_compress_bio/btrfs_compress_pages
868  	 */
869  	nofs_flag = memalloc_nofs_save();
870  	workspace = alloc_workspace(type, level);
871  	memalloc_nofs_restore(nofs_flag);
872  
873  	if (IS_ERR(workspace)) {
874  		atomic_dec(total_ws);
875  		wake_up(ws_wait);
876  
877  		/*
878  		 * Do not return the error but go back to waiting. There's a
879  		 * workspace preallocated for each type and the compression
880  		 * time is bounded so we get to a workspace eventually. This
881  		 * makes our caller's life easier.
882  		 *
883  		 * To prevent silent and low-probability deadlocks (when the
884  		 * initial preallocation fails), check if there are any
885  		 * workspaces at all.
886  		 */
887  		if (atomic_read(total_ws) == 0) {
888  			static DEFINE_RATELIMIT_STATE(_rs,
889  					/* once per minute */ 60 * HZ,
890  					/* no burst */ 1);
891  
892  			if (__ratelimit(&_rs)) {
893  				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
894  			}
895  		}
896  		goto again;
897  	}
898  	return workspace;
899  }
900  
get_workspace(int type,int level)901  static struct list_head *get_workspace(int type, int level)
902  {
903  	switch (type) {
904  	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
905  	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
906  	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
907  	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
908  	default:
909  		/*
910  		 * This can't happen, the type is validated several times
911  		 * before we get here.
912  		 */
913  		BUG();
914  	}
915  }
916  
917  /*
918   * put a workspace struct back on the list or free it if we have enough
919   * idle ones sitting around
920   */
btrfs_put_workspace(int type,struct list_head * ws)921  void btrfs_put_workspace(int type, struct list_head *ws)
922  {
923  	struct workspace_manager *wsm;
924  	struct list_head *idle_ws;
925  	spinlock_t *ws_lock;
926  	atomic_t *total_ws;
927  	wait_queue_head_t *ws_wait;
928  	int *free_ws;
929  
930  	wsm = btrfs_compress_op[type]->workspace_manager;
931  	idle_ws	 = &wsm->idle_ws;
932  	ws_lock	 = &wsm->ws_lock;
933  	total_ws = &wsm->total_ws;
934  	ws_wait	 = &wsm->ws_wait;
935  	free_ws	 = &wsm->free_ws;
936  
937  	spin_lock(ws_lock);
938  	if (*free_ws <= num_online_cpus()) {
939  		list_add(ws, idle_ws);
940  		(*free_ws)++;
941  		spin_unlock(ws_lock);
942  		goto wake;
943  	}
944  	spin_unlock(ws_lock);
945  
946  	free_workspace(type, ws);
947  	atomic_dec(total_ws);
948  wake:
949  	cond_wake_up(ws_wait);
950  }
951  
put_workspace(int type,struct list_head * ws)952  static void put_workspace(int type, struct list_head *ws)
953  {
954  	switch (type) {
955  	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
956  	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
957  	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
958  	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
959  	default:
960  		/*
961  		 * This can't happen, the type is validated several times
962  		 * before we get here.
963  		 */
964  		BUG();
965  	}
966  }
967  
968  /*
969   * Adjust @level according to the limits of the compression algorithm or
970   * fallback to default
971   */
btrfs_compress_set_level(int type,unsigned level)972  static unsigned int btrfs_compress_set_level(int type, unsigned level)
973  {
974  	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
975  
976  	if (level == 0)
977  		level = ops->default_level;
978  	else
979  		level = min(level, ops->max_level);
980  
981  	return level;
982  }
983  
984  /* Wrapper around find_get_page(), with extra error message. */
btrfs_compress_filemap_get_folio(struct address_space * mapping,u64 start,struct folio ** in_folio_ret)985  int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
986  				     struct folio **in_folio_ret)
987  {
988  	struct folio *in_folio;
989  
990  	/*
991  	 * The compressed write path should have the folio locked already, thus
992  	 * we only need to grab one reference.
993  	 */
994  	in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
995  	if (IS_ERR(in_folio)) {
996  		struct btrfs_inode *inode = BTRFS_I(mapping->host);
997  
998  		btrfs_crit(inode->root->fs_info,
999  		"failed to get page cache, root %lld ino %llu file offset %llu",
1000  			   btrfs_root_id(inode->root), btrfs_ino(inode), start);
1001  		return -ENOENT;
1002  	}
1003  	*in_folio_ret = in_folio;
1004  	return 0;
1005  }
1006  
1007  /*
1008   * Given an address space and start and length, compress the bytes into @pages
1009   * that are allocated on demand.
1010   *
1011   * @type_level is encoded algorithm and level, where level 0 means whatever
1012   * default the algorithm chooses and is opaque here;
1013   * - compression algo are 0-3
1014   * - the level are bits 4-7
1015   *
1016   * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1017   * and returns number of actually allocated pages
1018   *
1019   * @total_in is used to return the number of bytes actually read.  It
1020   * may be smaller than the input length if we had to exit early because we
1021   * ran out of room in the pages array or because we cross the
1022   * max_out threshold.
1023   *
1024   * @total_out is an in/out parameter, must be set to the input length and will
1025   * be also used to return the total number of compressed bytes
1026   */
btrfs_compress_folios(unsigned int type_level,struct address_space * mapping,u64 start,struct folio ** folios,unsigned long * out_folios,unsigned long * total_in,unsigned long * total_out)1027  int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
1028  			 u64 start, struct folio **folios, unsigned long *out_folios,
1029  			 unsigned long *total_in, unsigned long *total_out)
1030  {
1031  	int type = btrfs_compress_type(type_level);
1032  	int level = btrfs_compress_level(type_level);
1033  	struct list_head *workspace;
1034  	int ret;
1035  
1036  	level = btrfs_compress_set_level(type, level);
1037  	workspace = get_workspace(type, level);
1038  	ret = compression_compress_pages(type, workspace, mapping, start, folios,
1039  					 out_folios, total_in, total_out);
1040  	put_workspace(type, workspace);
1041  	return ret;
1042  }
1043  
btrfs_decompress_bio(struct compressed_bio * cb)1044  static int btrfs_decompress_bio(struct compressed_bio *cb)
1045  {
1046  	struct list_head *workspace;
1047  	int ret;
1048  	int type = cb->compress_type;
1049  
1050  	workspace = get_workspace(type, 0);
1051  	ret = compression_decompress_bio(workspace, cb);
1052  	put_workspace(type, workspace);
1053  
1054  	if (!ret)
1055  		zero_fill_bio(&cb->orig_bbio->bio);
1056  	return ret;
1057  }
1058  
1059  /*
1060   * a less complex decompression routine.  Our compressed data fits in a
1061   * single page, and we want to read a single page out of it.
1062   * start_byte tells us the offset into the compressed data we're interested in
1063   */
btrfs_decompress(int type,const u8 * data_in,struct folio * dest_folio,unsigned long dest_pgoff,size_t srclen,size_t destlen)1064  int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
1065  		     unsigned long dest_pgoff, size_t srclen, size_t destlen)
1066  {
1067  	struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
1068  	struct list_head *workspace;
1069  	const u32 sectorsize = fs_info->sectorsize;
1070  	int ret;
1071  
1072  	/*
1073  	 * The full destination page range should not exceed the page size.
1074  	 * And the @destlen should not exceed sectorsize, as this is only called for
1075  	 * inline file extents, which should not exceed sectorsize.
1076  	 */
1077  	ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
1078  
1079  	workspace = get_workspace(type, 0);
1080  	ret = compression_decompress(type, workspace, data_in, dest_folio,
1081  				     dest_pgoff, srclen, destlen);
1082  	put_workspace(type, workspace);
1083  
1084  	return ret;
1085  }
1086  
btrfs_init_compress(void)1087  int __init btrfs_init_compress(void)
1088  {
1089  	if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
1090  			offsetof(struct compressed_bio, bbio.bio),
1091  			BIOSET_NEED_BVECS))
1092  		return -ENOMEM;
1093  
1094  	compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages");
1095  	if (!compr_pool.shrinker)
1096  		return -ENOMEM;
1097  
1098  	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1099  	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1100  	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1101  	zstd_init_workspace_manager();
1102  
1103  	spin_lock_init(&compr_pool.lock);
1104  	INIT_LIST_HEAD(&compr_pool.list);
1105  	compr_pool.count = 0;
1106  	/* 128K / 4K = 32, for 8 threads is 256 pages. */
1107  	compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
1108  	compr_pool.shrinker->count_objects = btrfs_compr_pool_count;
1109  	compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan;
1110  	compr_pool.shrinker->batch = 32;
1111  	compr_pool.shrinker->seeks = DEFAULT_SEEKS;
1112  	shrinker_register(compr_pool.shrinker);
1113  
1114  	return 0;
1115  }
1116  
btrfs_exit_compress(void)1117  void __cold btrfs_exit_compress(void)
1118  {
1119  	/* For now scan drains all pages and does not touch the parameters. */
1120  	btrfs_compr_pool_scan(NULL, NULL);
1121  	shrinker_free(compr_pool.shrinker);
1122  
1123  	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1124  	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1125  	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1126  	zstd_cleanup_workspace_manager();
1127  	bioset_exit(&btrfs_compressed_bioset);
1128  }
1129  
1130  /*
1131   * Copy decompressed data from working buffer to pages.
1132   *
1133   * @buf:		The decompressed data buffer
1134   * @buf_len:		The decompressed data length
1135   * @decompressed:	Number of bytes that are already decompressed inside the
1136   * 			compressed extent
1137   * @cb:			The compressed extent descriptor
1138   * @orig_bio:		The original bio that the caller wants to read for
1139   *
1140   * An easier to understand graph is like below:
1141   *
1142   * 		|<- orig_bio ->|     |<- orig_bio->|
1143   * 	|<-------      full decompressed extent      ----->|
1144   * 	|<-----------    @cb range   ---->|
1145   * 	|			|<-- @buf_len -->|
1146   * 	|<--- @decompressed --->|
1147   *
1148   * Note that, @cb can be a subpage of the full decompressed extent, but
1149   * @cb->start always has the same as the orig_file_offset value of the full
1150   * decompressed extent.
1151   *
1152   * When reading compressed extent, we have to read the full compressed extent,
1153   * while @orig_bio may only want part of the range.
1154   * Thus this function will ensure only data covered by @orig_bio will be copied
1155   * to.
1156   *
1157   * Return 0 if we have copied all needed contents for @orig_bio.
1158   * Return >0 if we need continue decompress.
1159   */
btrfs_decompress_buf2page(const char * buf,u32 buf_len,struct compressed_bio * cb,u32 decompressed)1160  int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1161  			      struct compressed_bio *cb, u32 decompressed)
1162  {
1163  	struct bio *orig_bio = &cb->orig_bbio->bio;
1164  	/* Offset inside the full decompressed extent */
1165  	u32 cur_offset;
1166  
1167  	cur_offset = decompressed;
1168  	/* The main loop to do the copy */
1169  	while (cur_offset < decompressed + buf_len) {
1170  		struct bio_vec bvec;
1171  		size_t copy_len;
1172  		u32 copy_start;
1173  		/* Offset inside the full decompressed extent */
1174  		u32 bvec_offset;
1175  
1176  		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1177  		/*
1178  		 * cb->start may underflow, but subtracting that value can still
1179  		 * give us correct offset inside the full decompressed extent.
1180  		 */
1181  		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1182  
1183  		/* Haven't reached the bvec range, exit */
1184  		if (decompressed + buf_len <= bvec_offset)
1185  			return 1;
1186  
1187  		copy_start = max(cur_offset, bvec_offset);
1188  		copy_len = min(bvec_offset + bvec.bv_len,
1189  			       decompressed + buf_len) - copy_start;
1190  		ASSERT(copy_len);
1191  
1192  		/*
1193  		 * Extra range check to ensure we didn't go beyond
1194  		 * @buf + @buf_len.
1195  		 */
1196  		ASSERT(copy_start - decompressed < buf_len);
1197  		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1198  			       buf + copy_start - decompressed, copy_len);
1199  		cur_offset += copy_len;
1200  
1201  		bio_advance(orig_bio, copy_len);
1202  		/* Finished the bio */
1203  		if (!orig_bio->bi_iter.bi_size)
1204  			return 0;
1205  	}
1206  	return 1;
1207  }
1208  
1209  /*
1210   * Shannon Entropy calculation
1211   *
1212   * Pure byte distribution analysis fails to determine compressibility of data.
1213   * Try calculating entropy to estimate the average minimum number of bits
1214   * needed to encode the sampled data.
1215   *
1216   * For convenience, return the percentage of needed bits, instead of amount of
1217   * bits directly.
1218   *
1219   * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1220   *			    and can be compressible with high probability
1221   *
1222   * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1223   *
1224   * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1225   */
1226  #define ENTROPY_LVL_ACEPTABLE		(65)
1227  #define ENTROPY_LVL_HIGH		(80)
1228  
1229  /*
1230   * For increasead precision in shannon_entropy calculation,
1231   * let's do pow(n, M) to save more digits after comma:
1232   *
1233   * - maximum int bit length is 64
1234   * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1235   * - 13 * 4 = 52 < 64		-> M = 4
1236   *
1237   * So use pow(n, 4).
1238   */
ilog2_w(u64 n)1239  static inline u32 ilog2_w(u64 n)
1240  {
1241  	return ilog2(n * n * n * n);
1242  }
1243  
shannon_entropy(struct heuristic_ws * ws)1244  static u32 shannon_entropy(struct heuristic_ws *ws)
1245  {
1246  	const u32 entropy_max = 8 * ilog2_w(2);
1247  	u32 entropy_sum = 0;
1248  	u32 p, p_base, sz_base;
1249  	u32 i;
1250  
1251  	sz_base = ilog2_w(ws->sample_size);
1252  	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1253  		p = ws->bucket[i].count;
1254  		p_base = ilog2_w(p);
1255  		entropy_sum += p * (sz_base - p_base);
1256  	}
1257  
1258  	entropy_sum /= ws->sample_size;
1259  	return entropy_sum * 100 / entropy_max;
1260  }
1261  
1262  #define RADIX_BASE		4U
1263  #define COUNTERS_SIZE		(1U << RADIX_BASE)
1264  
get4bits(u64 num,int shift)1265  static u8 get4bits(u64 num, int shift) {
1266  	u8 low4bits;
1267  
1268  	num >>= shift;
1269  	/* Reverse order */
1270  	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1271  	return low4bits;
1272  }
1273  
1274  /*
1275   * Use 4 bits as radix base
1276   * Use 16 u32 counters for calculating new position in buf array
1277   *
1278   * @array     - array that will be sorted
1279   * @array_buf - buffer array to store sorting results
1280   *              must be equal in size to @array
1281   * @num       - array size
1282   */
radix_sort(struct bucket_item * array,struct bucket_item * array_buf,int num)1283  static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1284  		       int num)
1285  {
1286  	u64 max_num;
1287  	u64 buf_num;
1288  	u32 counters[COUNTERS_SIZE];
1289  	u32 new_addr;
1290  	u32 addr;
1291  	int bitlen;
1292  	int shift;
1293  	int i;
1294  
1295  	/*
1296  	 * Try avoid useless loop iterations for small numbers stored in big
1297  	 * counters.  Example: 48 33 4 ... in 64bit array
1298  	 */
1299  	max_num = array[0].count;
1300  	for (i = 1; i < num; i++) {
1301  		buf_num = array[i].count;
1302  		if (buf_num > max_num)
1303  			max_num = buf_num;
1304  	}
1305  
1306  	buf_num = ilog2(max_num);
1307  	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1308  
1309  	shift = 0;
1310  	while (shift < bitlen) {
1311  		memset(counters, 0, sizeof(counters));
1312  
1313  		for (i = 0; i < num; i++) {
1314  			buf_num = array[i].count;
1315  			addr = get4bits(buf_num, shift);
1316  			counters[addr]++;
1317  		}
1318  
1319  		for (i = 1; i < COUNTERS_SIZE; i++)
1320  			counters[i] += counters[i - 1];
1321  
1322  		for (i = num - 1; i >= 0; i--) {
1323  			buf_num = array[i].count;
1324  			addr = get4bits(buf_num, shift);
1325  			counters[addr]--;
1326  			new_addr = counters[addr];
1327  			array_buf[new_addr] = array[i];
1328  		}
1329  
1330  		shift += RADIX_BASE;
1331  
1332  		/*
1333  		 * Normal radix expects to move data from a temporary array, to
1334  		 * the main one.  But that requires some CPU time. Avoid that
1335  		 * by doing another sort iteration to original array instead of
1336  		 * memcpy()
1337  		 */
1338  		memset(counters, 0, sizeof(counters));
1339  
1340  		for (i = 0; i < num; i ++) {
1341  			buf_num = array_buf[i].count;
1342  			addr = get4bits(buf_num, shift);
1343  			counters[addr]++;
1344  		}
1345  
1346  		for (i = 1; i < COUNTERS_SIZE; i++)
1347  			counters[i] += counters[i - 1];
1348  
1349  		for (i = num - 1; i >= 0; i--) {
1350  			buf_num = array_buf[i].count;
1351  			addr = get4bits(buf_num, shift);
1352  			counters[addr]--;
1353  			new_addr = counters[addr];
1354  			array[new_addr] = array_buf[i];
1355  		}
1356  
1357  		shift += RADIX_BASE;
1358  	}
1359  }
1360  
1361  /*
1362   * Size of the core byte set - how many bytes cover 90% of the sample
1363   *
1364   * There are several types of structured binary data that use nearly all byte
1365   * values. The distribution can be uniform and counts in all buckets will be
1366   * nearly the same (eg. encrypted data). Unlikely to be compressible.
1367   *
1368   * Other possibility is normal (Gaussian) distribution, where the data could
1369   * be potentially compressible, but we have to take a few more steps to decide
1370   * how much.
1371   *
1372   * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1373   *                       compression algo can easy fix that
1374   * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1375   *                       probability is not compressible
1376   */
1377  #define BYTE_CORE_SET_LOW		(64)
1378  #define BYTE_CORE_SET_HIGH		(200)
1379  
byte_core_set_size(struct heuristic_ws * ws)1380  static int byte_core_set_size(struct heuristic_ws *ws)
1381  {
1382  	u32 i;
1383  	u32 coreset_sum = 0;
1384  	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1385  	struct bucket_item *bucket = ws->bucket;
1386  
1387  	/* Sort in reverse order */
1388  	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1389  
1390  	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1391  		coreset_sum += bucket[i].count;
1392  
1393  	if (coreset_sum > core_set_threshold)
1394  		return i;
1395  
1396  	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1397  		coreset_sum += bucket[i].count;
1398  		if (coreset_sum > core_set_threshold)
1399  			break;
1400  	}
1401  
1402  	return i;
1403  }
1404  
1405  /*
1406   * Count byte values in buckets.
1407   * This heuristic can detect textual data (configs, xml, json, html, etc).
1408   * Because in most text-like data byte set is restricted to limited number of
1409   * possible characters, and that restriction in most cases makes data easy to
1410   * compress.
1411   *
1412   * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1413   *	less - compressible
1414   *	more - need additional analysis
1415   */
1416  #define BYTE_SET_THRESHOLD		(64)
1417  
byte_set_size(const struct heuristic_ws * ws)1418  static u32 byte_set_size(const struct heuristic_ws *ws)
1419  {
1420  	u32 i;
1421  	u32 byte_set_size = 0;
1422  
1423  	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1424  		if (ws->bucket[i].count > 0)
1425  			byte_set_size++;
1426  	}
1427  
1428  	/*
1429  	 * Continue collecting count of byte values in buckets.  If the byte
1430  	 * set size is bigger then the threshold, it's pointless to continue,
1431  	 * the detection technique would fail for this type of data.
1432  	 */
1433  	for (; i < BUCKET_SIZE; i++) {
1434  		if (ws->bucket[i].count > 0) {
1435  			byte_set_size++;
1436  			if (byte_set_size > BYTE_SET_THRESHOLD)
1437  				return byte_set_size;
1438  		}
1439  	}
1440  
1441  	return byte_set_size;
1442  }
1443  
sample_repeated_patterns(struct heuristic_ws * ws)1444  static bool sample_repeated_patterns(struct heuristic_ws *ws)
1445  {
1446  	const u32 half_of_sample = ws->sample_size / 2;
1447  	const u8 *data = ws->sample;
1448  
1449  	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1450  }
1451  
heuristic_collect_sample(struct inode * inode,u64 start,u64 end,struct heuristic_ws * ws)1452  static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1453  				     struct heuristic_ws *ws)
1454  {
1455  	struct page *page;
1456  	u64 index, index_end;
1457  	u32 i, curr_sample_pos;
1458  	u8 *in_data;
1459  
1460  	/*
1461  	 * Compression handles the input data by chunks of 128KiB
1462  	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1463  	 *
1464  	 * We do the same for the heuristic and loop over the whole range.
1465  	 *
1466  	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1467  	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1468  	 */
1469  	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1470  		end = start + BTRFS_MAX_UNCOMPRESSED;
1471  
1472  	index = start >> PAGE_SHIFT;
1473  	index_end = end >> PAGE_SHIFT;
1474  
1475  	/* Don't miss unaligned end */
1476  	if (!PAGE_ALIGNED(end))
1477  		index_end++;
1478  
1479  	curr_sample_pos = 0;
1480  	while (index < index_end) {
1481  		page = find_get_page(inode->i_mapping, index);
1482  		in_data = kmap_local_page(page);
1483  		/* Handle case where the start is not aligned to PAGE_SIZE */
1484  		i = start % PAGE_SIZE;
1485  		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1486  			/* Don't sample any garbage from the last page */
1487  			if (start > end - SAMPLING_READ_SIZE)
1488  				break;
1489  			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1490  					SAMPLING_READ_SIZE);
1491  			i += SAMPLING_INTERVAL;
1492  			start += SAMPLING_INTERVAL;
1493  			curr_sample_pos += SAMPLING_READ_SIZE;
1494  		}
1495  		kunmap_local(in_data);
1496  		put_page(page);
1497  
1498  		index++;
1499  	}
1500  
1501  	ws->sample_size = curr_sample_pos;
1502  }
1503  
1504  /*
1505   * Compression heuristic.
1506   *
1507   * The following types of analysis can be performed:
1508   * - detect mostly zero data
1509   * - detect data with low "byte set" size (text, etc)
1510   * - detect data with low/high "core byte" set
1511   *
1512   * Return non-zero if the compression should be done, 0 otherwise.
1513   */
btrfs_compress_heuristic(struct btrfs_inode * inode,u64 start,u64 end)1514  int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
1515  {
1516  	struct list_head *ws_list = get_workspace(0, 0);
1517  	struct heuristic_ws *ws;
1518  	u32 i;
1519  	u8 byte;
1520  	int ret = 0;
1521  
1522  	ws = list_entry(ws_list, struct heuristic_ws, list);
1523  
1524  	heuristic_collect_sample(&inode->vfs_inode, start, end, ws);
1525  
1526  	if (sample_repeated_patterns(ws)) {
1527  		ret = 1;
1528  		goto out;
1529  	}
1530  
1531  	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1532  
1533  	for (i = 0; i < ws->sample_size; i++) {
1534  		byte = ws->sample[i];
1535  		ws->bucket[byte].count++;
1536  	}
1537  
1538  	i = byte_set_size(ws);
1539  	if (i < BYTE_SET_THRESHOLD) {
1540  		ret = 2;
1541  		goto out;
1542  	}
1543  
1544  	i = byte_core_set_size(ws);
1545  	if (i <= BYTE_CORE_SET_LOW) {
1546  		ret = 3;
1547  		goto out;
1548  	}
1549  
1550  	if (i >= BYTE_CORE_SET_HIGH) {
1551  		ret = 0;
1552  		goto out;
1553  	}
1554  
1555  	i = shannon_entropy(ws);
1556  	if (i <= ENTROPY_LVL_ACEPTABLE) {
1557  		ret = 4;
1558  		goto out;
1559  	}
1560  
1561  	/*
1562  	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1563  	 * needed to give green light to compression.
1564  	 *
1565  	 * For now just assume that compression at that level is not worth the
1566  	 * resources because:
1567  	 *
1568  	 * 1. it is possible to defrag the data later
1569  	 *
1570  	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1571  	 * values, every bucket has counter at level ~54. The heuristic would
1572  	 * be confused. This can happen when data have some internal repeated
1573  	 * patterns like "abbacbbc...". This can be detected by analyzing
1574  	 * pairs of bytes, which is too costly.
1575  	 */
1576  	if (i < ENTROPY_LVL_HIGH) {
1577  		ret = 5;
1578  		goto out;
1579  	} else {
1580  		ret = 0;
1581  		goto out;
1582  	}
1583  
1584  out:
1585  	put_workspace(0, ws_list);
1586  	return ret;
1587  }
1588  
1589  /*
1590   * Convert the compression suffix (eg. after "zlib" starting with ":") to
1591   * level, unrecognized string will set the default level
1592   */
btrfs_compress_str2level(unsigned int type,const char * str)1593  unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1594  {
1595  	unsigned int level = 0;
1596  	int ret;
1597  
1598  	if (!type)
1599  		return 0;
1600  
1601  	if (str[0] == ':') {
1602  		ret = kstrtouint(str + 1, 10, &level);
1603  		if (ret)
1604  			level = 0;
1605  	}
1606  
1607  	level = btrfs_compress_set_level(type, level);
1608  
1609  	return level;
1610  }
1611