1/*
2 * Copyright (c) 2017 Grzegorz Kostka (kostka.grzegorz@gmail.com)
3 * Copyright (c) 2017 Kaho Ng (ngkaho1234@gmail.com)
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 */
10
11#include <ext4_config.h>
12#include <ext4_types.h>
13#include <ext4_misc.h>
14#include <ext4_errno.h>
15#include <ext4_debug.h>
16
17#include <ext4_blockdev.h>
18#include <ext4_trans.h>
19#include <ext4_fs.h>
20#include <ext4_super.h>
21#include <ext4_crc32.h>
22#include <ext4_balloc.h>
23#include <ext4_extent.h>
24
25#include <stdlib.h>
26#include <string.h>
27#include <inttypes.h>
28#include <stddef.h>
29
30#if CONFIG_EXTENTS_ENABLE
31/*
32 * used by extent splitting.
33 */
34#define EXT4_EXT_MARK_UNWRIT1 0x02 /* mark first half unwritten */
35#define EXT4_EXT_MARK_UNWRIT2 0x04 /* mark second half unwritten */
36#define EXT4_EXT_DATA_VALID1 0x08 /* first half contains valid data */
37#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
38#define EXT4_EXT_NO_COMBINE 0x20 /* do not combine two extents */
39
40#define EXT4_EXT_UNWRITTEN_MASK (1L << 15)
41
42#define EXT4_EXT_MAX_LEN_WRITTEN (1L << 15)
43#define EXT4_EXT_MAX_LEN_UNWRITTEN \
44 (EXT4_EXT_MAX_LEN_WRITTEN - 1)
45
46#define EXT4_EXT_GET_LEN(ex) to_le16((ex)->block_count)
47#define EXT4_EXT_GET_LEN_UNWRITTEN(ex) \
48 (EXT4_EXT_GET_LEN(ex) & ~(EXT4_EXT_UNWRITTEN_MASK))
49#define EXT4_EXT_SET_LEN(ex, count) \
50 ((ex)->block_count = to_le16(count))
51
52#define EXT4_EXT_IS_UNWRITTEN(ex) \
53 (EXT4_EXT_GET_LEN(ex) > EXT4_EXT_MAX_LEN_WRITTEN)
54#define EXT4_EXT_SET_UNWRITTEN(ex) \
55 ((ex)->block_count |= to_le16(EXT4_EXT_UNWRITTEN_MASK))
56#define EXT4_EXT_SET_WRITTEN(ex) \
57 ((ex)->block_count &= ~(to_le16(EXT4_EXT_UNWRITTEN_MASK)))
58
59/*
60 * Array of ext4_ext_path contains path to some extent.
61 * Creation/lookup routines use it for traversal/splitting/etc.
62 * Truncate uses it to simulate recursive walking.
63 */
64struct ext4_extent_path {
65 ext4_fsblk_t p_block;
66 struct ext4_block block;
67 int32_t depth;
68 int32_t maxdepth;
69 struct ext4_extent_header *header;
70 struct ext4_extent_index *index;
71 struct ext4_extent *extent;
72};
73
74
75#pragma pack(push, 1)
76
77/*
78 * This is the extent tail on-disk structure.
79 * All other extent structures are 12 bytes long. It turns out that
80 * block_size % 12 >= 4 for at least all powers of 2 greater than 512, which
81 * covers all valid ext4 block sizes. Therefore, this tail structure can be
82 * crammed into the end of the block without having to rebalance the tree.
83 */
84struct ext4_extent_tail
85{
86 uint32_t et_checksum; /* crc32c(uuid+inum+extent_block) */
87};
88
89/*
90 * This is the extent on-disk structure.
91 * It's used at the bottom of the tree.
92 */
93struct ext4_extent {
94 uint32_t first_block; /* First logical block extent covers */
95 uint16_t block_count; /* Number of blocks covered by extent */
96 uint16_t start_hi; /* High 16 bits of physical block */
97 uint32_t start_lo; /* Low 32 bits of physical block */
98};
99
100/*
101 * This is index on-disk structure.
102 * It's used at all the levels except the bottom.
103 */
104struct ext4_extent_index {
105 uint32_t first_block; /* Index covers logical blocks from 'block' */
106
107 /**
108 * Pointer to the physical block of the next
109 * level. leaf or next index could be there
110 * high 16 bits of physical block
111 */
112 uint32_t leaf_lo;
113 uint16_t leaf_hi;
114 uint16_t padding;
115};
116
117/*
118 * Each block (leaves and indexes), even inode-stored has header.
119 */
120struct ext4_extent_header {
121 uint16_t magic;
122 uint16_t entries_count; /* Number of valid entries */
123 uint16_t max_entries_count; /* Capacity of store in entries */
124 uint16_t depth; /* Has tree real underlying blocks? */
125 uint32_t generation; /* generation of the tree */
126};
127
128#pragma pack(pop)
129
130
131#define EXT4_EXTENT_MAGIC 0xF30A
132
133#define EXT4_EXTENT_FIRST(header) \
134 ((struct ext4_extent *)(((char *)(header)) + \
135 sizeof(struct ext4_extent_header)))
136
137#define EXT4_EXTENT_FIRST_INDEX(header) \
138 ((struct ext4_extent_index *)(((char *)(header)) + \
139 sizeof(struct ext4_extent_header)))
140
141/*
142 * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
143 * initialized extent. This is 2^15 and not (2^16 - 1), since we use the
144 * MSB of ee_len field in the extent datastructure to signify if this
145 * particular extent is an initialized extent or an uninitialized (i.e.
146 * preallocated).
147 * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an
148 * uninitialized extent.
149 * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an
150 * uninitialized one. In other words, if MSB of ee_len is set, it is an
151 * uninitialized extent with only one special scenario when ee_len = 0x8000.
152 * In this case we can not have an uninitialized extent of zero length and
153 * thus we make it as a special case of initialized extent with 0x8000 length.
154 * This way we get better extent-to-group alignment for initialized extents.
155 * Hence, the maximum number of blocks we can have in an *initialized*
156 * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1 (32767).
157 */
158#define EXT_INIT_MAX_LEN (1L << 15)
159#define EXT_UNWRITTEN_MAX_LEN (EXT_INIT_MAX_LEN - 1)
160
161#define EXT_EXTENT_SIZE sizeof(struct ext4_extent)
162#define EXT_INDEX_SIZE sizeof(struct ext4_extent_idx)
163
164#define EXT_FIRST_EXTENT(__hdr__) \
165 ((struct ext4_extent *)(((char *)(__hdr__)) + \
166 sizeof(struct ext4_extent_header)))
167#define EXT_FIRST_INDEX(__hdr__) \
168 ((struct ext4_extent_index *)(((char *)(__hdr__)) + \
169 sizeof(struct ext4_extent_header)))
170#define EXT_HAS_FREE_INDEX(__path__) \
171 (to_le16((__path__)->header->entries_count) < \
172 to_le16((__path__)->header->max_entries_count))
173#define EXT_LAST_EXTENT(__hdr__) \
174 (EXT_FIRST_EXTENT((__hdr__)) + to_le16((__hdr__)->entries_count) - 1)
175#define EXT_LAST_INDEX(__hdr__) \
176 (EXT_FIRST_INDEX((__hdr__)) + to_le16((__hdr__)->entries_count) - 1)
177#define EXT_MAX_EXTENT(__hdr__) \
178 (EXT_FIRST_EXTENT((__hdr__)) + to_le16((__hdr__)->max_entries_count) - 1)
179#define EXT_MAX_INDEX(__hdr__) \
180 (EXT_FIRST_INDEX((__hdr__)) + to_le16((__hdr__)->max_entries_count) - 1)
181
182#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
183 (sizeof(struct ext4_extent_header) + \
184 (sizeof(struct ext4_extent) * to_le16((hdr)->max_entries_count)))
185
186
187/**@brief Get logical number of the block covered by extent.
188 * @param extent Extent to load number from
189 * @return Logical number of the first block covered by extent */
190static inline uint32_t ext4_extent_get_first_block(struct ext4_extent *extent)
191{
192 return to_le32(extent->first_block);
193}
194
195/**@brief Set logical number of the first block covered by extent.
196 * @param extent Extent to set number to
197 * @param iblock Logical number of the first block covered by extent */
198static inline void ext4_extent_set_first_block(struct ext4_extent *extent,
199 uint32_t iblock)
200{
201 extent->first_block = to_le32(iblock);
202}
203
204/**@brief Get number of blocks covered by extent.
205 * @param extent Extent to load count from
206 * @return Number of blocks covered by extent */
207static inline uint16_t ext4_extent_get_block_count(struct ext4_extent *extent)
208{
209 if (EXT4_EXT_IS_UNWRITTEN(extent))
210 return EXT4_EXT_GET_LEN_UNWRITTEN(extent);
211 else
212 return EXT4_EXT_GET_LEN(extent);
213}
214/**@brief Set number of blocks covered by extent.
215 * @param extent Extent to load count from
216 * @param count Number of blocks covered by extent
217 * @param unwritten Whether the extent is unwritten or not */
218static inline void ext4_extent_set_block_count(struct ext4_extent *extent,
219 uint16_t count, bool unwritten)
220{
221 EXT4_EXT_SET_LEN(extent, count);
222 if (unwritten)
223 EXT4_EXT_SET_UNWRITTEN(extent);
224}
225
226/**@brief Get physical number of the first block covered by extent.
227 * @param extent Extent to load number
228 * @return Physical number of the first block covered by extent */
229static inline uint64_t ext4_extent_get_start(struct ext4_extent *extent)
230{
231 return ((uint64_t)to_le16(extent->start_hi)) << 32 |
232 ((uint64_t)to_le32(extent->start_lo));
233}
234
235
236/**@brief Set physical number of the first block covered by extent.
237 * @param extent Extent to load number
238 * @param fblock Physical number of the first block covered by extent */
239static inline void ext4_extent_set_start(struct ext4_extent *extent, uint64_t fblock)
240{
241 extent->start_lo = to_le32((fblock << 32) >> 32);
242 extent->start_hi = to_le16((uint16_t)(fblock >> 32));
243}
244
245
246/**@brief Get logical number of the block covered by extent index.
247 * @param index Extent index to load number from
248 * @return Logical number of the first block covered by extent index */
249static inline uint32_t
250ext4_extent_index_get_first_block(struct ext4_extent_index *index)
251{
252 return to_le32(index->first_block);
253}
254
255/**@brief Set logical number of the block covered by extent index.
256 * @param index Extent index to set number to
257 * @param iblock Logical number of the first block covered by extent index */
258static inline void
259ext4_extent_index_set_first_block(struct ext4_extent_index *index,
260 uint32_t iblock)
261{
262 index->first_block = to_le32(iblock);
263}
264
265/**@brief Get physical number of block where the child node is located.
266 * @param index Extent index to load number from
267 * @return Physical number of the block with child node */
268static inline uint64_t
269ext4_extent_index_get_leaf(struct ext4_extent_index *index)
270{
271 return ((uint64_t)to_le16(index->leaf_hi)) << 32 |
272 ((uint64_t)to_le32(index->leaf_lo));
273}
274
275/**@brief Set physical number of block where the child node is located.
276 * @param index Extent index to set number to
277 * @param fblock Ohysical number of the block with child node */
278static inline void ext4_extent_index_set_leaf(struct ext4_extent_index *index,
279 uint64_t fblock)
280{
281 index->leaf_lo = to_le32((fblock << 32) >> 32);
282 index->leaf_hi = to_le16((uint16_t)(fblock >> 32));
283}
284
285/**@brief Get magic value from extent header.
286 * @param header Extent header to load value from
287 * @return Magic value of extent header */
288static inline uint16_t
289ext4_extent_header_get_magic(struct ext4_extent_header *header)
290{
291 return to_le16(header->magic);
292}
293
294/**@brief Set magic value to extent header.
295 * @param header Extent header to set value to
296 * @param magic Magic value of extent header */
297static inline void ext4_extent_header_set_magic(struct ext4_extent_header *header,
298 uint16_t magic)
299{
300 header->magic = to_le16(magic);
301}
302
303/**@brief Get number of entries from extent header
304 * @param header Extent header to get value from
305 * @return Number of entries covered by extent header */
306static inline uint16_t
307ext4_extent_header_get_entries_count(struct ext4_extent_header *header)
308{
309 return to_le16(header->entries_count);
310}
311
312/**@brief Set number of entries to extent header
313 * @param header Extent header to set value to
314 * @param count Number of entries covered by extent header */
315static inline void
316ext4_extent_header_set_entries_count(struct ext4_extent_header *header,
317 uint16_t count)
318{
319 header->entries_count = to_le16(count);
320}
321
322/**@brief Get maximum number of entries from extent header
323 * @param header Extent header to get value from
324 * @return Maximum number of entries covered by extent header */
325static inline uint16_t
326ext4_extent_header_get_max_entries_count(struct ext4_extent_header *header)
327{
328 return to_le16(header->max_entries_count);
329}
330
331/**@brief Set maximum number of entries to extent header
332 * @param header Extent header to set value to
333 * @param max_count Maximum number of entries covered by extent header */
334static inline void
335ext4_extent_header_set_max_entries_count(struct ext4_extent_header *header,
336 uint16_t max_count)
337{
338 header->max_entries_count = to_le16(max_count);
339}
340
341/**@brief Get depth of extent subtree.
342 * @param header Extent header to get value from
343 * @return Depth of extent subtree */
344static inline uint16_t
345ext4_extent_header_get_depth(struct ext4_extent_header *header)
346{
347 return to_le16(header->depth);
348}
349
350/**@brief Set depth of extent subtree.
351 * @param header Extent header to set value to
352 * @param depth Depth of extent subtree */
353static inline void
354ext4_extent_header_set_depth(struct ext4_extent_header *header, uint16_t depth)
355{
356 header->depth = to_le16(depth);
357}
358
359/**@brief Get generation from extent header
360 * @param header Extent header to get value from
361 * @return Generation */
362static inline uint32_t
363ext4_extent_header_get_generation(struct ext4_extent_header *header)
364{
365 return to_le32(header->generation);
366}
367
368/**@brief Set generation to extent header
369 * @param header Extent header to set value to
370 * @param generation Generation */
371static inline void
372ext4_extent_header_set_generation(struct ext4_extent_header *header,
373 uint32_t generation)
374{
375 header->generation = to_le32(generation);
376}
377
378void ext4_extent_tree_init(struct ext4_inode_ref *inode_ref)
379{
380 /* Initialize extent root header */
381 struct ext4_extent_header *header =
382 ext4_inode_get_extent_header(inode: inode_ref->inode);
383 ext4_extent_header_set_depth(header, depth: 0);
384 ext4_extent_header_set_entries_count(header, count: 0);
385 ext4_extent_header_set_generation(header, generation: 0);
386 ext4_extent_header_set_magic(header, EXT4_EXTENT_MAGIC);
387
388 uint16_t max_entries = (EXT4_INODE_BLOCKS * sizeof(uint32_t) -
389 sizeof(struct ext4_extent_header)) /
390 sizeof(struct ext4_extent);
391
392 ext4_extent_header_set_max_entries_count(header, max_count: max_entries);
393 inode_ref->dirty = true;
394}
395
396
397static struct ext4_extent_tail *
398find_ext4_extent_tail(struct ext4_extent_header *eh)
399{
400 return (struct ext4_extent_tail *)(((char *)eh) +
401 EXT4_EXTENT_TAIL_OFFSET(eh));
402}
403
404static struct ext4_extent_header *ext_inode_hdr(struct ext4_inode *inode)
405{
406 return (struct ext4_extent_header *)inode->blocks;
407}
408
409static struct ext4_extent_header *ext_block_hdr(struct ext4_block *block)
410{
411 return (struct ext4_extent_header *)block->data;
412}
413
414static uint16_t ext_depth(struct ext4_inode *inode)
415{
416 return to_le16(ext_inode_hdr(inode)->depth);
417}
418
419static uint16_t ext4_ext_get_actual_len(struct ext4_extent *ext)
420{
421 return (to_le16(ext->block_count) <= EXT_INIT_MAX_LEN
422 ? to_le16(ext->block_count)
423 : (to_le16(ext->block_count) - EXT_INIT_MAX_LEN));
424}
425
426static void ext4_ext_mark_initialized(struct ext4_extent *ext)
427{
428 ext->block_count = to_le16(ext4_ext_get_actual_len(ext));
429}
430
431static void ext4_ext_mark_unwritten(struct ext4_extent *ext)
432{
433 ext->block_count |= to_le16(EXT_INIT_MAX_LEN);
434}
435
436static int ext4_ext_is_unwritten(struct ext4_extent *ext)
437{
438 /* Extent with ee_len of 0x8000 is treated as an initialized extent */
439 return (to_le16(ext->block_count) > EXT_INIT_MAX_LEN);
440}
441
442/*
443 * ext4_ext_pblock:
444 * combine low and high parts of physical block number into ext4_fsblk_t
445 */
446static ext4_fsblk_t ext4_ext_pblock(struct ext4_extent *ex)
447{
448 ext4_fsblk_t block;
449
450 block = to_le32(ex->start_lo);
451 block |= ((ext4_fsblk_t)to_le16(ex->start_hi) << 31) << 1;
452 return block;
453}
454
455/*
456 * ext4_idx_pblock:
457 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
458 */
459static ext4_fsblk_t ext4_idx_pblock(struct ext4_extent_index *ix)
460{
461 ext4_fsblk_t block;
462
463 block = to_le32(ix->leaf_lo);
464 block |= ((ext4_fsblk_t)to_le16(ix->leaf_hi) << 31) << 1;
465 return block;
466}
467
468/*
469 * ext4_ext_store_pblock:
470 * stores a large physical block number into an extent struct,
471 * breaking it into parts
472 */
473static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
474{
475 ex->start_lo = to_le32((uint32_t)(pb & 0xffffffff));
476 ex->start_hi = to_le16((uint16_t)((pb >> 32)) & 0xffff);
477}
478
479/*
480 * ext4_idx_store_pblock:
481 * stores a large physical block number into an index struct,
482 * breaking it into parts
483 */
484static void ext4_idx_store_pblock(struct ext4_extent_index *ix, ext4_fsblk_t pb)
485{
486 ix->leaf_lo = to_le32((uint32_t)(pb & 0xffffffff));
487 ix->leaf_hi = to_le16((uint16_t)((pb >> 32)) & 0xffff);
488}
489
490static int ext4_allocate_single_block(struct ext4_inode_ref *inode_ref,
491 ext4_fsblk_t goal, ext4_fsblk_t *blockp)
492{
493 return ext4_balloc_alloc_block(inode_ref, goal, baddr: blockp);
494}
495
496static ext4_fsblk_t ext4_new_meta_blocks(struct ext4_inode_ref *inode_ref,
497 ext4_fsblk_t goal,
498 uint32_t flags __unused,
499 uint32_t *count, int *errp)
500{
501 ext4_fsblk_t block = 0;
502
503 *errp = ext4_allocate_single_block(inode_ref, goal, blockp: &block);
504 if (count)
505 *count = 1;
506 return block;
507}
508
509static void ext4_ext_free_blocks(struct ext4_inode_ref *inode_ref,
510 ext4_fsblk_t block, uint32_t count,
511 uint32_t flags __unused)
512{
513 ext4_balloc_free_blocks(inode_ref, first: block, count);
514}
515
516static uint16_t ext4_ext_space_block(struct ext4_inode_ref *inode_ref)
517{
518 uint16_t size;
519 uint32_t block_size = ext4_sb_get_block_size(s: &inode_ref->fs->sb);
520
521 size = (block_size - sizeof(struct ext4_extent_header)) /
522 sizeof(struct ext4_extent);
523#ifdef AGGRESSIVE_TEST
524 if (size > 6)
525 size = 6;
526#endif
527 return size;
528}
529
530static uint16_t ext4_ext_space_block_idx(struct ext4_inode_ref *inode_ref)
531{
532 uint16_t size;
533 uint32_t block_size = ext4_sb_get_block_size(s: &inode_ref->fs->sb);
534
535 size = (block_size - sizeof(struct ext4_extent_header)) /
536 sizeof(struct ext4_extent_index);
537#ifdef AGGRESSIVE_TEST
538 if (size > 5)
539 size = 5;
540#endif
541 return size;
542}
543
544static uint16_t ext4_ext_space_root(struct ext4_inode_ref *inode_ref)
545{
546 uint16_t size;
547
548 size = sizeof(inode_ref->inode->blocks);
549 size -= sizeof(struct ext4_extent_header);
550 size /= sizeof(struct ext4_extent);
551#ifdef AGGRESSIVE_TEST
552 if (size > 3)
553 size = 3;
554#endif
555 return size;
556}
557
558static uint16_t ext4_ext_space_root_idx(struct ext4_inode_ref *inode_ref)
559{
560 uint16_t size;
561
562 size = sizeof(inode_ref->inode->blocks);
563 size -= sizeof(struct ext4_extent_header);
564 size /= sizeof(struct ext4_extent_index);
565#ifdef AGGRESSIVE_TEST
566 if (size > 4)
567 size = 4;
568#endif
569 return size;
570}
571
572static uint16_t ext4_ext_max_entries(struct ext4_inode_ref *inode_ref,
573 uint32_t depth)
574{
575 uint16_t max;
576
577 if (depth == ext_depth(inode: inode_ref->inode)) {
578 if (depth == 0)
579 max = ext4_ext_space_root(inode_ref);
580 else
581 max = ext4_ext_space_root_idx(inode_ref);
582 } else {
583 if (depth == 0)
584 max = ext4_ext_space_block(inode_ref);
585 else
586 max = ext4_ext_space_block_idx(inode_ref);
587 }
588
589 return max;
590}
591
592static ext4_fsblk_t ext4_ext_find_goal(struct ext4_inode_ref *inode_ref,
593 struct ext4_extent_path *path,
594 ext4_lblk_t block)
595{
596 if (path) {
597 uint32_t depth = path->depth;
598 struct ext4_extent *ex;
599
600 /*
601 * Try to predict block placement assuming that we are
602 * filling in a file which will eventually be
603 * non-sparse --- i.e., in the case of libbfd writing
604 * an ELF object sections out-of-order but in a way
605 * the eventually results in a contiguous object or
606 * executable file, or some database extending a table
607 * space file. However, this is actually somewhat
608 * non-ideal if we are writing a sparse file such as
609 * qemu or KVM writing a raw image file that is going
610 * to stay fairly sparse, since it will end up
611 * fragmenting the file system's free space. Maybe we
612 * should have some hueristics or some way to allow
613 * userspace to pass a hint to file system,
614 * especially if the latter case turns out to be
615 * common.
616 */
617 ex = path[depth].extent;
618 if (ex) {
619 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
620 ext4_lblk_t ext_block = to_le32(ex->first_block);
621
622 if (block > ext_block)
623 return ext_pblk + (block - ext_block);
624 else
625 return ext_pblk - (ext_block - block);
626 }
627
628 /* it looks like index is empty;
629 * try to find starting block from index itself */
630 if (path[depth].block.lb_id)
631 return path[depth].block.lb_id;
632 }
633
634 /* OK. use inode's group */
635 return ext4_fs_inode_to_goal_block(inode_ref);
636}
637
638/*
639 * Allocation for a meta data block
640 */
641static ext4_fsblk_t ext4_ext_new_meta_block(struct ext4_inode_ref *inode_ref,
642 struct ext4_extent_path *path,
643 struct ext4_extent *ex, int *err,
644 uint32_t flags)
645{
646 ext4_fsblk_t goal, newblock;
647
648 goal = ext4_ext_find_goal(inode_ref, path, to_le32(ex->first_block));
649 newblock = ext4_new_meta_blocks(inode_ref, goal, flags, NULL, errp: err);
650 return newblock;
651}
652
653#if CONFIG_META_CSUM_ENABLE
654static uint32_t ext4_ext_block_csum(struct ext4_inode_ref *inode_ref,
655 struct ext4_extent_header *eh)
656{
657 uint32_t checksum = 0;
658 struct ext4_sblock *sb = &inode_ref->fs->sb;
659
660 if (ext4_sb_feature_ro_com(s: sb, EXT4_FRO_COM_METADATA_CSUM)) {
661 uint32_t ino_index = to_le32(inode_ref->index);
662 uint32_t ino_gen =
663 to_le32(ext4_inode_get_generation(inode_ref->inode));
664 /* First calculate crc32 checksum against fs uuid */
665 checksum =
666 ext4_crc32c(EXT4_CRC32_INIT, buf: sb->uuid, size: sizeof(sb->uuid));
667 /* Then calculate crc32 checksum against inode number
668 * and inode generation */
669 checksum = ext4_crc32c(crc: checksum, buf: &ino_index, size: sizeof(ino_index));
670 checksum = ext4_crc32c(crc: checksum, buf: &ino_gen, size: sizeof(ino_gen));
671 /* Finally calculate crc32 checksum against
672 * the entire extent block up to the checksum field */
673 checksum =
674 ext4_crc32c(crc: checksum, buf: eh, EXT4_EXTENT_TAIL_OFFSET(eh));
675 }
676 return checksum;
677}
678#else
679#define ext4_ext_block_csum(...) 0
680#endif
681
682static void
683ext4_extent_block_csum_set(struct ext4_inode_ref *inode_ref __unused,
684 struct ext4_extent_header *eh)
685{
686 struct ext4_extent_tail *tail;
687
688 tail = find_ext4_extent_tail(eh);
689 tail->et_checksum = to_le32(ext4_ext_block_csum(inode_ref, eh));
690}
691
692static int ext4_ext_dirty(struct ext4_inode_ref *inode_ref,
693 struct ext4_extent_path *path)
694{
695 if (path->block.lb_id)
696 ext4_trans_set_block_dirty(buf: path->block.buf);
697 else
698 inode_ref->dirty = true;
699
700 return EOK;
701}
702
703static void ext4_ext_drop_refs(struct ext4_inode_ref *inode_ref,
704 struct ext4_extent_path *path, bool keep_other)
705{
706 int32_t depth, i;
707
708 if (!path)
709 return;
710 if (keep_other)
711 depth = 0;
712 else
713 depth = path->depth;
714
715 for (i = 0; i <= depth; i++, path++) {
716 if (path->block.lb_id) {
717 if (ext4_bcache_test_flag(path->block.buf, BC_DIRTY))
718 ext4_extent_block_csum_set(inode_ref,
719 eh: path->header);
720
721 ext4_block_set(bdev: inode_ref->fs->bdev, b: &path->block);
722 }
723 }
724}
725
726/*
727 * Check that whether the basic information inside the extent header
728 * is correct or not.
729 */
730static int ext4_ext_check(struct ext4_inode_ref *inode_ref,
731 struct ext4_extent_header *eh, uint16_t depth,
732 ext4_fsblk_t pblk __unused)
733{
734 struct ext4_extent_tail *tail;
735 struct ext4_sblock *sb = &inode_ref->fs->sb;
736 const char *error_msg;
737 (void)error_msg;
738
739 if (to_le16(eh->magic) != EXT4_EXTENT_MAGIC) {
740 error_msg = "invalid magic";
741 goto corrupted;
742 }
743 if (to_le16(eh->depth) != depth) {
744 error_msg = "unexpected eh_depth";
745 goto corrupted;
746 }
747 if (eh->max_entries_count == 0) {
748 error_msg = "invalid eh_max";
749 goto corrupted;
750 }
751 if (to_le16(eh->entries_count) > to_le16(eh->max_entries_count)) {
752 error_msg = "invalid eh_entries";
753 goto corrupted;
754 }
755
756 tail = find_ext4_extent_tail(eh);
757 if (ext4_sb_feature_ro_com(s: sb, EXT4_FRO_COM_METADATA_CSUM)) {
758 if (tail->et_checksum !=
759 to_le32(ext4_ext_block_csum(inode_ref, eh))) {
760 ext4_dbg(DEBUG_EXTENT,
761 DBG_WARN "Extent block checksum failed."
762 "Blocknr: %" PRIu64 "\n",
763 pblk);
764 }
765 }
766
767 return EOK;
768
769corrupted:
770 ext4_dbg(DEBUG_EXTENT, "Bad extents B+ tree block: %s. "
771 "Blocknr: %" PRId64 "\n",
772 error_msg, pblk);
773 return EIO;
774}
775
776static int read_extent_tree_block(struct ext4_inode_ref *inode_ref,
777 ext4_fsblk_t pblk, int32_t depth,
778 struct ext4_block *bh,
779 uint32_t flags __unused)
780{
781 int err;
782
783 err = ext4_trans_block_get(bdev: inode_ref->fs->bdev, b: bh, lba: pblk);
784 if (err != EOK)
785 goto errout;
786
787 err = ext4_ext_check(inode_ref, eh: ext_block_hdr(block: bh), depth, pblk);
788 if (err != EOK)
789 goto errout;
790
791 return EOK;
792errout:
793 if (bh->lb_id)
794 ext4_block_set(bdev: inode_ref->fs->bdev, b: bh);
795
796 return err;
797}
798
799/*
800 * ext4_ext_binsearch_idx:
801 * binary search for the closest index of the given block
802 * the header must be checked before calling this
803 */
804static void ext4_ext_binsearch_idx(struct ext4_extent_path *path,
805 ext4_lblk_t block)
806{
807 struct ext4_extent_header *eh = path->header;
808 struct ext4_extent_index *r, *l, *m;
809
810 l = EXT_FIRST_INDEX(eh) + 1;
811 r = EXT_LAST_INDEX(eh);
812 while (l <= r) {
813 m = l + (r - l) / 2;
814 if (block < to_le32(m->first_block))
815 r = m - 1;
816 else
817 l = m + 1;
818 }
819
820 path->index = l - 1;
821}
822
823/*
824 * ext4_ext_binsearch:
825 * binary search for closest extent of the given block
826 * the header must be checked before calling this
827 */
828static void ext4_ext_binsearch(struct ext4_extent_path *path, ext4_lblk_t block)
829{
830 struct ext4_extent_header *eh = path->header;
831 struct ext4_extent *r, *l, *m;
832
833 if (eh->entries_count == 0) {
834 /*
835 * this leaf is empty:
836 * we get such a leaf in split/add case
837 */
838 return;
839 }
840
841 l = EXT_FIRST_EXTENT(eh) + 1;
842 r = EXT_LAST_EXTENT(eh);
843
844 while (l <= r) {
845 m = l + (r - l) / 2;
846 if (block < to_le32(m->first_block))
847 r = m - 1;
848 else
849 l = m + 1;
850 }
851
852 path->extent = l - 1;
853}
854
855static int ext4_find_extent(struct ext4_inode_ref *inode_ref, ext4_lblk_t block,
856 struct ext4_extent_path **orig_path, uint32_t flags)
857{
858 struct ext4_extent_header *eh;
859 struct ext4_block bh = EXT4_BLOCK_ZERO();
860 ext4_fsblk_t buf_block = 0;
861 struct ext4_extent_path *path = *orig_path;
862 int32_t depth, ppos = 0;
863 int32_t i;
864 int ret;
865
866 eh = ext_inode_hdr(inode: inode_ref->inode);
867 depth = ext_depth(inode: inode_ref->inode);
868
869 if (path) {
870 ext4_ext_drop_refs(inode_ref, path, keep_other: 0);
871 if (depth > path[0].maxdepth) {
872 ext4_free(pointer: path);
873 *orig_path = path = NULL;
874 }
875 }
876 if (!path) {
877 int32_t path_depth = depth + 1;
878 /* account possible depth increase */
879 path = ext4_calloc(count: 1, size: sizeof(struct ext4_extent_path) *
880 (path_depth + 1));
881 if (!path)
882 return ENOMEM;
883 path[0].maxdepth = path_depth;
884 }
885 path[0].header = eh;
886 path[0].block = bh;
887
888 i = depth;
889 /* walk through the tree */
890 while (i) {
891 ext4_ext_binsearch_idx(path: path + ppos, block);
892 path[ppos].p_block = ext4_idx_pblock(ix: path[ppos].index);
893 path[ppos].depth = i;
894 path[ppos].extent = NULL;
895 buf_block = path[ppos].p_block;
896
897 i--;
898 ppos++;
899 if (!path[ppos].block.lb_id ||
900 path[ppos].block.lb_id != buf_block) {
901 ret = read_extent_tree_block(inode_ref, pblk: buf_block, depth: i,
902 bh: &bh, flags);
903 if (ret != EOK) {
904 goto err;
905 }
906 if (ppos > depth) {
907 ext4_block_set(bdev: inode_ref->fs->bdev, b: &bh);
908 ret = EIO;
909 goto err;
910 }
911
912 eh = ext_block_hdr(block: &bh);
913 path[ppos].block = bh;
914 path[ppos].header = eh;
915 }
916 }
917
918 path[ppos].depth = i;
919 path[ppos].extent = NULL;
920 path[ppos].index = NULL;
921
922 /* find extent */
923 ext4_ext_binsearch(path: path + ppos, block);
924 /* if not an empty leaf */
925 if (path[ppos].extent)
926 path[ppos].p_block = ext4_ext_pblock(ex: path[ppos].extent);
927
928 *orig_path = path;
929
930 ret = EOK;
931 return ret;
932
933err:
934 ext4_ext_drop_refs(inode_ref, path, keep_other: 0);
935 ext4_free(pointer: path);
936 if (orig_path)
937 *orig_path = NULL;
938 return ret;
939}
940
941static void ext4_ext_init_header(struct ext4_inode_ref *inode_ref,
942 struct ext4_extent_header *eh, int32_t depth)
943{
944 eh->entries_count = 0;
945 eh->max_entries_count = to_le16(ext4_ext_max_entries(inode_ref, depth));
946 eh->magic = to_le16(EXT4_EXTENT_MAGIC);
947 eh->depth = depth;
948}
949
950static int ext4_ext_insert_index(struct ext4_inode_ref *inode_ref,
951 struct ext4_extent_path *path, int at,
952 ext4_lblk_t insert_index,
953 ext4_fsblk_t insert_block, bool set_to_ix)
954{
955 struct ext4_extent_index *ix;
956 struct ext4_extent_path *curp = path + at;
957 int len, err;
958 struct ext4_extent_header *eh;
959
960 if (curp->index && insert_index == to_le32(curp->index->first_block))
961 return EIO;
962
963 if (to_le16(curp->header->entries_count) ==
964 to_le16(curp->header->max_entries_count))
965 return EIO;
966
967 eh = curp->header;
968 if (curp->index == NULL) {
969 ix = EXT_FIRST_INDEX(eh);
970 curp->index = ix;
971 } else if (insert_index > to_le32(curp->index->first_block)) {
972 /* insert after */
973 ix = curp->index + 1;
974 } else {
975 /* insert before */
976 ix = curp->index;
977 }
978
979 if (ix > EXT_MAX_INDEX(eh))
980 return EIO;
981
982 len = EXT_LAST_INDEX(eh) - ix + 1;
983 ext4_assert(len >= 0);
984 if (len > 0)
985 memmove(dest: ix + 1, src: ix, size: len * sizeof(struct ext4_extent_index));
986
987 ix->first_block = to_le32(insert_index);
988 ext4_idx_store_pblock(ix, pb: insert_block);
989 eh->entries_count = to_le16(to_le16(eh->entries_count) + 1);
990
991 if (ix > EXT_LAST_INDEX(eh)) {
992 err = EIO;
993 goto out;
994 }
995
996 err = ext4_ext_dirty(inode_ref, path: curp);
997
998out:
999 if (err == EOK && set_to_ix) {
1000 curp->index = ix;
1001 curp->p_block = ext4_idx_pblock(ix);
1002 }
1003 return err;
1004}
1005
1006static int ext4_ext_split_node(struct ext4_inode_ref *inode_ref,
1007 struct ext4_extent_path *path, int at,
1008 struct ext4_extent *newext,
1009 struct ext4_extent_path *npath,
1010 bool *ins_right_leaf)
1011{
1012 int i, npath_at, ret;
1013 ext4_lblk_t insert_index;
1014 ext4_fsblk_t newblock = 0;
1015 int depth = ext_depth(inode: inode_ref->inode);
1016 npath_at = depth - at;
1017
1018 ext4_assert(at > 0);
1019
1020 if (path[depth].extent != EXT_MAX_EXTENT(path[depth].header))
1021 insert_index = path[depth].extent[1].first_block;
1022 else
1023 insert_index = newext->first_block;
1024
1025 for (i = depth; i >= at; i--, npath_at--) {
1026 struct ext4_block bh = EXT4_BLOCK_ZERO();
1027
1028 /* FIXME: currently we split at the point after the current
1029 * extent. */
1030 newblock =
1031 ext4_ext_new_meta_block(inode_ref, path, ex: newext, err: &ret, flags: 0);
1032 if (ret != EOK)
1033 goto cleanup;
1034
1035 /* For write access.*/
1036 ret = ext4_trans_block_get_noread(bdev: inode_ref->fs->bdev, b: &bh,
1037 lba: newblock);
1038 if (ret != EOK)
1039 goto cleanup;
1040
1041 if (i == depth) {
1042 /* start copy from next extent */
1043 int m = EXT_MAX_EXTENT(path[i].header) - path[i].extent;
1044 struct ext4_extent_header *neh;
1045 struct ext4_extent *ex;
1046 neh = ext_block_hdr(block: &bh);
1047 ex = EXT_FIRST_EXTENT(neh);
1048 ext4_ext_init_header(inode_ref, eh: neh, depth: 0);
1049 if (m) {
1050 memmove(dest: ex, src: path[i].extent + 1,
1051 size: sizeof(struct ext4_extent) * m);
1052 neh->entries_count =
1053 to_le16(to_le16(neh->entries_count) + m);
1054 path[i].header->entries_count = to_le16(
1055 to_le16(path[i].header->entries_count) - m);
1056 ret = ext4_ext_dirty(inode_ref, path: path + i);
1057 if (ret != EOK)
1058 goto cleanup;
1059
1060 npath[npath_at].p_block = ext4_ext_pblock(ex);
1061 npath[npath_at].extent = ex;
1062 } else {
1063 npath[npath_at].p_block = 0;
1064 npath[npath_at].extent = NULL;
1065 }
1066
1067 npath[npath_at].depth = to_le16(neh->depth);
1068 npath[npath_at].maxdepth = 0;
1069 npath[npath_at].index = NULL;
1070 npath[npath_at].header = neh;
1071 npath[npath_at].block = bh;
1072
1073 ext4_trans_set_block_dirty(buf: bh.buf);
1074 } else {
1075 int m = EXT_MAX_INDEX(path[i].header) - path[i].index;
1076 struct ext4_extent_header *neh;
1077 struct ext4_extent_index *ix;
1078 neh = ext_block_hdr(block: &bh);
1079 ix = EXT_FIRST_INDEX(neh);
1080 ext4_ext_init_header(inode_ref, eh: neh, depth: depth - i);
1081 ix->first_block = to_le32(insert_index);
1082 ext4_idx_store_pblock(ix,
1083 pb: npath[npath_at + 1].block.lb_id);
1084 neh->entries_count = to_le16(1);
1085 if (m) {
1086 memmove(dest: ix + 1, src: path[i].index + 1,
1087 size: sizeof(struct ext4_extent) * m);
1088 neh->entries_count =
1089 to_le16(to_le16(neh->entries_count) + m);
1090 path[i].header->entries_count = to_le16(
1091 to_le16(path[i].header->entries_count) - m);
1092 ret = ext4_ext_dirty(inode_ref, path: path + i);
1093 if (ret != EOK)
1094 goto cleanup;
1095 }
1096
1097 npath[npath_at].p_block = ext4_idx_pblock(ix);
1098 npath[npath_at].depth = to_le16(neh->depth);
1099 npath[npath_at].maxdepth = 0;
1100 npath[npath_at].extent = NULL;
1101 npath[npath_at].index = ix;
1102 npath[npath_at].header = neh;
1103 npath[npath_at].block = bh;
1104
1105 ext4_trans_set_block_dirty(buf: bh.buf);
1106 }
1107 }
1108 newblock = 0;
1109
1110 /*
1111 * If newext->first_block can be included into the
1112 * right sub-tree.
1113 */
1114 if (to_le32(newext->first_block) < insert_index)
1115 *ins_right_leaf = false;
1116 else
1117 *ins_right_leaf = true;
1118
1119 ret = ext4_ext_insert_index(inode_ref, path, at: at - 1, insert_index,
1120 insert_block: npath[0].block.lb_id, set_to_ix: *ins_right_leaf);
1121
1122cleanup:
1123 if (ret != EOK) {
1124 if (newblock)
1125 ext4_ext_free_blocks(inode_ref, block: newblock, count: 1, flags: 0);
1126
1127 npath_at = depth - at;
1128 while (npath_at >= 0) {
1129 if (npath[npath_at].block.lb_id) {
1130 newblock = npath[npath_at].block.lb_id;
1131 ext4_block_set(bdev: inode_ref->fs->bdev,
1132 b: &npath[npath_at].block);
1133 ext4_ext_free_blocks(inode_ref, block: newblock, count: 1, flags: 0);
1134 memset(dest: &npath[npath_at].block, c: 0,
1135 size: sizeof(struct ext4_block));
1136 }
1137 npath_at--;
1138 }
1139 }
1140 return ret;
1141}
1142
1143/*
1144 * ext4_ext_correct_indexes:
1145 * if leaf gets modified and modified extent is first in the leaf,
1146 * then we have to correct all indexes above.
1147 */
1148static int ext4_ext_correct_indexes(struct ext4_inode_ref *inode_ref,
1149 struct ext4_extent_path *path)
1150{
1151 struct ext4_extent_header *eh;
1152 int32_t depth = ext_depth(inode: inode_ref->inode);
1153 struct ext4_extent *ex;
1154 uint32_t border;
1155 int32_t k;
1156 int err = EOK;
1157
1158 eh = path[depth].header;
1159 ex = path[depth].extent;
1160
1161 if (ex == NULL || eh == NULL)
1162 return EIO;
1163
1164 if (depth == 0) {
1165 /* there is no tree at all */
1166 return EOK;
1167 }
1168
1169 if (ex != EXT_FIRST_EXTENT(eh)) {
1170 /* we correct tree if first leaf got modified only */
1171 return EOK;
1172 }
1173
1174 k = depth - 1;
1175 border = path[depth].extent->first_block;
1176 path[k].index->first_block = border;
1177 err = ext4_ext_dirty(inode_ref, path: path + k);
1178 if (err != EOK)
1179 return err;
1180
1181 while (k--) {
1182 /* change all left-side indexes */
1183 if (path[k + 1].index != EXT_FIRST_INDEX(path[k + 1].header))
1184 break;
1185 path[k].index->first_block = border;
1186 err = ext4_ext_dirty(inode_ref, path: path + k);
1187 if (err != EOK)
1188 break;
1189 }
1190
1191 return err;
1192}
1193
1194static inline bool ext4_ext_can_prepend(struct ext4_extent *ex1,
1195 struct ext4_extent *ex2)
1196{
1197 if (ext4_ext_pblock(ex: ex2) + ext4_ext_get_actual_len(ext: ex2) !=
1198 ext4_ext_pblock(ex: ex1))
1199 return 0;
1200
1201#ifdef AGGRESSIVE_TEST
1202 if (ext4_ext_get_actual_len(ex1) + ext4_ext_get_actual_len(ex2) > 4)
1203 return 0;
1204#else
1205 if (ext4_ext_is_unwritten(ext: ex1)) {
1206 if (ext4_ext_get_actual_len(ext: ex1) +
1207 ext4_ext_get_actual_len(ext: ex2) >
1208 EXT_UNWRITTEN_MAX_LEN)
1209 return 0;
1210 } else if (ext4_ext_get_actual_len(ext: ex1) + ext4_ext_get_actual_len(ext: ex2) >
1211 EXT_INIT_MAX_LEN)
1212 return 0;
1213#endif
1214
1215 if (to_le32(ex2->first_block) + ext4_ext_get_actual_len(ext: ex2) !=
1216 to_le32(ex1->first_block))
1217 return 0;
1218
1219 return 1;
1220}
1221
1222static inline bool ext4_ext_can_append(struct ext4_extent *ex1,
1223 struct ext4_extent *ex2)
1224{
1225 if (ext4_ext_pblock(ex: ex1) + ext4_ext_get_actual_len(ext: ex1) !=
1226 ext4_ext_pblock(ex: ex2))
1227 return 0;
1228
1229#ifdef AGGRESSIVE_TEST
1230 if (ext4_ext_get_actual_len(ex1) + ext4_ext_get_actual_len(ex2) > 4)
1231 return 0;
1232#else
1233 if (ext4_ext_is_unwritten(ext: ex1)) {
1234 if (ext4_ext_get_actual_len(ext: ex1) +
1235 ext4_ext_get_actual_len(ext: ex2) >
1236 EXT_UNWRITTEN_MAX_LEN)
1237 return 0;
1238 } else if (ext4_ext_get_actual_len(ext: ex1) + ext4_ext_get_actual_len(ext: ex2) >
1239 EXT_INIT_MAX_LEN)
1240 return 0;
1241#endif
1242
1243 if (to_le32(ex1->first_block) + ext4_ext_get_actual_len(ext: ex1) !=
1244 to_le32(ex2->first_block))
1245 return 0;
1246
1247 return 1;
1248}
1249
1250static int ext4_ext_insert_leaf(struct ext4_inode_ref *inode_ref,
1251 struct ext4_extent_path *path, int at,
1252 struct ext4_extent *newext, int flags,
1253 bool *need_split)
1254{
1255 struct ext4_extent_path *curp = path + at;
1256 struct ext4_extent *ex = curp->extent;
1257 int len, err, unwritten;
1258 struct ext4_extent_header *eh;
1259
1260 *need_split = false;
1261
1262 if (curp->extent &&
1263 to_le32(newext->first_block) == to_le32(curp->extent->first_block))
1264 return EIO;
1265
1266 if (!(flags & EXT4_EXT_NO_COMBINE)) {
1267 if (curp->extent && ext4_ext_can_append(ex1: curp->extent, ex2: newext)) {
1268 unwritten = ext4_ext_is_unwritten(ext: curp->extent);
1269 curp->extent->block_count =
1270 to_le16(ext4_ext_get_actual_len(curp->extent) +
1271 ext4_ext_get_actual_len(newext));
1272 if (unwritten)
1273 ext4_ext_mark_unwritten(ext: curp->extent);
1274
1275 err = ext4_ext_dirty(inode_ref, path: curp);
1276 goto out;
1277 }
1278
1279 if (curp->extent &&
1280 ext4_ext_can_prepend(ex1: curp->extent, ex2: newext)) {
1281 unwritten = ext4_ext_is_unwritten(ext: curp->extent);
1282 curp->extent->first_block = newext->first_block;
1283 curp->extent->block_count =
1284 to_le16(ext4_ext_get_actual_len(curp->extent) +
1285 ext4_ext_get_actual_len(newext));
1286 if (unwritten)
1287 ext4_ext_mark_unwritten(ext: curp->extent);
1288
1289 err = ext4_ext_dirty(inode_ref, path: curp);
1290 goto out;
1291 }
1292 }
1293
1294 if (to_le16(curp->header->entries_count) ==
1295 to_le16(curp->header->max_entries_count)) {
1296 err = EIO;
1297 *need_split = true;
1298 goto out;
1299 } else {
1300 eh = curp->header;
1301 if (curp->extent == NULL) {
1302 ex = EXT_FIRST_EXTENT(eh);
1303 curp->extent = ex;
1304 } else if (to_le32(newext->first_block) >
1305 to_le32(curp->extent->first_block)) {
1306 /* insert after */
1307 ex = curp->extent + 1;
1308 } else {
1309 /* insert before */
1310 ex = curp->extent;
1311 }
1312 }
1313
1314 len = EXT_LAST_EXTENT(eh) - ex + 1;
1315 ext4_assert(len >= 0);
1316 if (len > 0)
1317 memmove(dest: ex + 1, src: ex, size: len * sizeof(struct ext4_extent));
1318
1319 if (ex > EXT_MAX_EXTENT(eh)) {
1320 err = EIO;
1321 goto out;
1322 }
1323
1324 ex->first_block = newext->first_block;
1325 ex->block_count = newext->block_count;
1326 ext4_ext_store_pblock(ex, pb: ext4_ext_pblock(ex: newext));
1327 eh->entries_count = to_le16(to_le16(eh->entries_count) + 1);
1328
1329 if (ex > EXT_LAST_EXTENT(eh)) {
1330 err = EIO;
1331 goto out;
1332 }
1333
1334 err = ext4_ext_correct_indexes(inode_ref, path);
1335 if (err != EOK)
1336 goto out;
1337 err = ext4_ext_dirty(inode_ref, path: curp);
1338
1339out:
1340 if (err == EOK) {
1341 curp->extent = ex;
1342 curp->p_block = ext4_ext_pblock(ex);
1343 }
1344
1345 return err;
1346}
1347
1348/*
1349 * ext4_ext_grow_indepth:
1350 * implements tree growing procedure:
1351 * - allocates new block
1352 * - moves top-level data (index block or leaf) into the new block
1353 * - initializes new top-level, creating index that points to the
1354 * just created block
1355 */
1356static int ext4_ext_grow_indepth(struct ext4_inode_ref *inode_ref,
1357 uint32_t flags)
1358{
1359 struct ext4_extent_header *neh;
1360 struct ext4_block bh = EXT4_BLOCK_ZERO();
1361 ext4_fsblk_t newblock, goal = 0;
1362 int err = EOK;
1363
1364 /* Try to prepend new index to old one */
1365 if (ext_depth(inode: inode_ref->inode))
1366 goal = ext4_idx_pblock(
1367 EXT_FIRST_INDEX(ext_inode_hdr(inode_ref->inode)));
1368 else
1369 goal = ext4_fs_inode_to_goal_block(inode_ref);
1370
1371 newblock = ext4_new_meta_blocks(inode_ref, goal, flags, NULL, errp: &err);
1372 if (newblock == 0)
1373 return err;
1374
1375 /* # */
1376 err = ext4_trans_block_get_noread(bdev: inode_ref->fs->bdev, b: &bh, lba: newblock);
1377 if (err != EOK) {
1378 ext4_ext_free_blocks(inode_ref, block: newblock, count: 1, flags: 0);
1379 return err;
1380 }
1381
1382 /* move top-level index/leaf into new block */
1383 memmove(dest: bh.data, src: inode_ref->inode->blocks,
1384 size: sizeof(inode_ref->inode->blocks));
1385
1386 /* set size of new block */
1387 neh = ext_block_hdr(block: &bh);
1388 /* old root could have indexes or leaves
1389 * so calculate e_max right way */
1390 if (ext_depth(inode: inode_ref->inode))
1391 neh->max_entries_count =
1392 to_le16(ext4_ext_space_block_idx(inode_ref));
1393 else
1394 neh->max_entries_count =
1395 to_le16(ext4_ext_space_block(inode_ref));
1396
1397 neh->magic = to_le16(EXT4_EXTENT_MAGIC);
1398 ext4_extent_block_csum_set(inode_ref, eh: neh);
1399
1400 /* Update top-level index: num,max,pointer */
1401 neh = ext_inode_hdr(inode: inode_ref->inode);
1402 neh->entries_count = to_le16(1);
1403 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), pb: newblock);
1404 if (neh->depth == 0) {
1405 /* Root extent block becomes index block */
1406 neh->max_entries_count =
1407 to_le16(ext4_ext_space_root_idx(inode_ref));
1408 EXT_FIRST_INDEX(neh)
1409 ->first_block = EXT_FIRST_EXTENT(neh)->first_block;
1410 }
1411 neh->depth = to_le16(to_le16(neh->depth) + 1);
1412
1413 ext4_trans_set_block_dirty(buf: bh.buf);
1414 inode_ref->dirty = true;
1415 ext4_block_set(bdev: inode_ref->fs->bdev, b: &bh);
1416
1417 return err;
1418}
1419
1420static inline void ext4_ext_replace_path(struct ext4_inode_ref *inode_ref,
1421 struct ext4_extent_path *path,
1422 struct ext4_extent_path *newpath,
1423 int at)
1424{
1425 ext4_ext_drop_refs(inode_ref, path: path + at, keep_other: 1);
1426 path[at] = *newpath;
1427 memset(dest: newpath, c: 0, size: sizeof(struct ext4_extent_path));
1428}
1429
1430int ext4_ext_insert_extent(struct ext4_inode_ref *inode_ref,
1431 struct ext4_extent_path **ppath,
1432 struct ext4_extent *newext, int flags)
1433{
1434 int depth, level = 0, ret = 0;
1435 struct ext4_extent_path *path = *ppath;
1436 struct ext4_extent_path *npath = NULL;
1437 bool ins_right_leaf = false;
1438 bool need_split;
1439
1440again:
1441 depth = ext_depth(inode: inode_ref->inode);
1442 ret = ext4_ext_insert_leaf(inode_ref, path, at: depth, newext, flags,
1443 need_split: &need_split);
1444 if (ret == EIO && need_split == true) {
1445 int i;
1446 for (i = depth, level = 0; i >= 0; i--, level++)
1447 if (EXT_HAS_FREE_INDEX(path + i))
1448 break;
1449
1450 /* Do we need to grow the tree? */
1451 if (i < 0) {
1452 ret = ext4_ext_grow_indepth(inode_ref, flags: 0);
1453 if (ret != EOK)
1454 goto out;
1455
1456 ret = ext4_find_extent(
1457 inode_ref, to_le32(newext->first_block), orig_path: ppath, flags: 0);
1458 if (ret != EOK)
1459 goto out;
1460
1461 path = *ppath;
1462 /*
1463 * After growing the tree, there should be free space in
1464 * the only child node of the root.
1465 */
1466 level--;
1467 depth++;
1468 }
1469
1470 i = depth - (level - 1);
1471 /* We split from leaf to the i-th node */
1472 if (level > 0) {
1473 npath = ext4_calloc(count: 1, size: sizeof(struct ext4_extent_path) *
1474 (level));
1475 if (!npath) {
1476 ret = ENOMEM;
1477 goto out;
1478 }
1479 ret = ext4_ext_split_node(inode_ref, path, at: i, newext,
1480 npath, ins_right_leaf: &ins_right_leaf);
1481 if (ret != EOK)
1482 goto out;
1483
1484 while (--level >= 0) {
1485 if (ins_right_leaf)
1486 ext4_ext_replace_path(inode_ref, path,
1487 newpath: &npath[level],
1488 at: i + level);
1489 else if (npath[level].block.lb_id)
1490 ext4_ext_drop_refs(inode_ref,
1491 path: npath + level, keep_other: 1);
1492 }
1493 }
1494 goto again;
1495 }
1496
1497out:
1498 if (ret != EOK) {
1499 if (path)
1500 ext4_ext_drop_refs(inode_ref, path, keep_other: 0);
1501
1502 while (--level >= 0 && npath) {
1503 if (npath[level].block.lb_id) {
1504 ext4_fsblk_t block = npath[level].block.lb_id;
1505 ext4_ext_free_blocks(inode_ref, block, count: 1, flags: 0);
1506 ext4_ext_drop_refs(inode_ref, path: npath + level, keep_other: 1);
1507 }
1508 }
1509 }
1510 if (npath)
1511 ext4_free(pointer: npath);
1512
1513 return ret;
1514}
1515
1516static void ext4_ext_remove_blocks(struct ext4_inode_ref *inode_ref,
1517 struct ext4_extent *ex, ext4_lblk_t from,
1518 ext4_lblk_t to)
1519{
1520 ext4_lblk_t len = to - from + 1;
1521 ext4_lblk_t num;
1522 ext4_fsblk_t start;
1523 num = from - to_le32(ex->first_block);
1524 start = ext4_ext_pblock(ex) + num;
1525 ext4_dbg(DEBUG_EXTENT,
1526 "Freeing %" PRIu32 " at %" PRIu64 ", %" PRIu32 "\n", from,
1527 start, len);
1528
1529 ext4_ext_free_blocks(inode_ref, block: start, count: len, flags: 0);
1530}
1531
1532static int ext4_ext_remove_idx(struct ext4_inode_ref *inode_ref,
1533 struct ext4_extent_path *path, int32_t depth)
1534{
1535 int err = EOK;
1536 int32_t i = depth;
1537 ext4_fsblk_t leaf;
1538
1539 /* free index block */
1540 leaf = ext4_idx_pblock(ix: path[i].index);
1541
1542 if (path[i].index != EXT_LAST_INDEX(path[i].header)) {
1543 ptrdiff_t len = EXT_LAST_INDEX(path[i].header) - path[i].index;
1544 memmove(dest: path[i].index, src: path[i].index + 1,
1545 size: len * sizeof(struct ext4_extent_index));
1546 }
1547
1548 path[i].header->entries_count =
1549 to_le16(to_le16(path[i].header->entries_count) - 1);
1550 err = ext4_ext_dirty(inode_ref, path: path + i);
1551 if (err != EOK)
1552 return err;
1553
1554 ext4_dbg(DEBUG_EXTENT, "IDX: Freeing %" PRIu32 " at %" PRIu64 ", %d\n",
1555 to_le32(path[i].index->first_block), leaf, 1);
1556 ext4_ext_free_blocks(inode_ref, block: leaf, count: 1, flags: 0);
1557
1558 /*
1559 * We may need to correct the paths after the first extents/indexes in
1560 * a node being modified.
1561 *
1562 * We do not need to consider whether there's any extents presenting or
1563 * not, as garbage will be cleared soon.
1564 */
1565 while (i > 0) {
1566 if (path[i].index != EXT_FIRST_INDEX(path[i].header))
1567 break;
1568
1569 path[i - 1].index->first_block = path[i].index->first_block;
1570 err = ext4_ext_dirty(inode_ref, path: path + i - 1);
1571 if (err != EOK)
1572 break;
1573
1574 i--;
1575 }
1576 return err;
1577}
1578
1579static int ext4_ext_remove_leaf(struct ext4_inode_ref *inode_ref,
1580 struct ext4_extent_path *path, ext4_lblk_t from,
1581 ext4_lblk_t to)
1582{
1583
1584 int32_t depth = ext_depth(inode: inode_ref->inode);
1585 struct ext4_extent *ex = path[depth].extent;
1586 struct ext4_extent *start_ex, *ex2 = NULL;
1587 struct ext4_extent_header *eh = path[depth].header;
1588 int32_t len;
1589 int err = EOK;
1590 uint16_t new_entries;
1591
1592 start_ex = ex;
1593 new_entries = to_le16(eh->entries_count);
1594 while (ex <= EXT_LAST_EXTENT(path[depth].header) &&
1595 to_le32(ex->first_block) <= to) {
1596 int32_t new_len = 0;
1597 int unwritten;
1598 ext4_lblk_t start, new_start;
1599 ext4_fsblk_t newblock;
1600 new_start = start = to_le32(ex->first_block);
1601 len = ext4_ext_get_actual_len(ext: ex);
1602 newblock = ext4_ext_pblock(ex);
1603 /*
1604 * The 1st case:
1605 * The position that we start truncation is inside the range of an
1606 * extent. Here we should calculate the new length of that extent and
1607 * may start the removal from the next extent.
1608 */
1609 if (start < from) {
1610 len -= from - start;
1611 new_len = from - start;
1612 start = from;
1613 start_ex++;
1614 } else {
1615 /*
1616 * The second case:
1617 * The last block to be truncated is inside the range of an
1618 * extent. We need to calculate the new length and the new
1619 * start of the extent.
1620 */
1621 if (start + len - 1 > to) {
1622 new_len = start + len - 1 - to;
1623 len -= new_len;
1624 new_start = to + 1;
1625 newblock += to + 1 - start;
1626 ex2 = ex;
1627 }
1628 }
1629
1630 ext4_ext_remove_blocks(inode_ref, ex, from: start, to: start + len - 1);
1631 /*
1632 * Set the first block of the extent if it is presented.
1633 */
1634 ex->first_block = to_le32(new_start);
1635
1636 /*
1637 * If the new length of the current extent we are working on is
1638 * zero, remove it.
1639 */
1640 if (!new_len)
1641 new_entries--;
1642 else {
1643 unwritten = ext4_ext_is_unwritten(ext: ex);
1644 ex->block_count = to_le16(new_len);
1645 ext4_ext_store_pblock(ex, pb: newblock);
1646 if (unwritten)
1647 ext4_ext_mark_unwritten(ext: ex);
1648 }
1649
1650 ex += 1;
1651 }
1652
1653 if (ex2 == NULL)
1654 ex2 = ex;
1655
1656 /*
1657 * Move any remaining extents to the starting position of the node.
1658 */
1659 if (ex2 <= EXT_LAST_EXTENT(eh))
1660 memmove(dest: start_ex, src: ex2, size: (EXT_LAST_EXTENT(eh) - ex2 + 1) *
1661 sizeof(struct ext4_extent));
1662
1663 eh->entries_count = to_le16(new_entries);
1664 ext4_ext_dirty(inode_ref, path: path + depth);
1665
1666 /*
1667 * If the extent pointer is pointed to the first extent of the node, and
1668 * there's still extents presenting, we may need to correct the indexes
1669 * of the paths.
1670 */
1671 if (path[depth].extent == EXT_FIRST_EXTENT(eh) && eh->entries_count) {
1672 err = ext4_ext_correct_indexes(inode_ref, path);
1673 if (err != EOK)
1674 return err;
1675 }
1676
1677 /* if this leaf is free, then we should
1678 * remove it from index block above */
1679 if (eh->entries_count == 0 && path[depth].block.lb_id)
1680 err = ext4_ext_remove_idx(inode_ref, path, depth: depth - 1);
1681 else if (depth > 0)
1682 path[depth - 1].index++;
1683
1684 return err;
1685}
1686
1687/*
1688 * Check if there's more to remove at a specific level.
1689 */
1690static bool ext4_ext_more_to_rm(struct ext4_extent_path *path, ext4_lblk_t to)
1691{
1692 if (!to_le16(path->header->entries_count))
1693 return false;
1694
1695 if (path->index > EXT_LAST_INDEX(path->header))
1696 return false;
1697
1698 if (to_le32(path->index->first_block) > to)
1699 return false;
1700
1701 return true;
1702}
1703
1704int ext4_extent_remove_space(struct ext4_inode_ref *inode_ref, ext4_lblk_t from,
1705 ext4_lblk_t to)
1706{
1707 struct ext4_extent_path *path = NULL;
1708 int ret = EOK;
1709 int32_t depth = ext_depth(inode: inode_ref->inode);
1710 int32_t i;
1711
1712 ret = ext4_find_extent(inode_ref, block: from, orig_path: &path, flags: 0);
1713 if (ret != EOK)
1714 goto out;
1715
1716 if (!path[depth].extent) {
1717 ret = EOK;
1718 goto out;
1719 }
1720
1721 bool in_range = IN_RANGE(from, to_le32(path[depth].extent->first_block),
1722 ext4_ext_get_actual_len(path[depth].extent));
1723
1724 if (!in_range) {
1725 ret = EOK;
1726 goto out;
1727 }
1728
1729 /* If we do remove_space inside the range of an extent */
1730 if ((to_le32(path[depth].extent->first_block) < from) &&
1731 (to < to_le32(path[depth].extent->first_block) +
1732 ext4_ext_get_actual_len(ext: path[depth].extent) - 1)) {
1733
1734 struct ext4_extent *ex = path[depth].extent, newex;
1735 int unwritten = ext4_ext_is_unwritten(ext: ex);
1736 ext4_lblk_t ee_block = to_le32(ex->first_block);
1737 int32_t len = ext4_ext_get_actual_len(ext: ex);
1738 ext4_fsblk_t newblock = to + 1 - ee_block + ext4_ext_pblock(ex);
1739
1740 ex->block_count = to_le16(from - ee_block);
1741 if (unwritten)
1742 ext4_ext_mark_unwritten(ext: ex);
1743
1744 ext4_ext_dirty(inode_ref, path: path + depth);
1745
1746 newex.first_block = to_le32(to + 1);
1747 newex.block_count = to_le16(ee_block + len - 1 - to);
1748 ext4_ext_store_pblock(ex: &newex, pb: newblock);
1749 if (unwritten)
1750 ext4_ext_mark_unwritten(ext: &newex);
1751
1752 ret = ext4_ext_insert_extent(inode_ref, ppath: &path, newext: &newex, flags: 0);
1753 goto out;
1754 }
1755
1756 i = depth;
1757 while (i >= 0) {
1758 if (i == depth) {
1759 struct ext4_extent_header *eh;
1760 struct ext4_extent *first_ex, *last_ex;
1761 ext4_lblk_t leaf_from, leaf_to;
1762 eh = path[i].header;
1763 ext4_assert(to_le16(eh->entries_count) > 0);
1764 first_ex = EXT_FIRST_EXTENT(eh);
1765 last_ex = EXT_LAST_EXTENT(eh);
1766 leaf_from = to_le32(first_ex->first_block);
1767 leaf_to = to_le32(last_ex->first_block) +
1768 ext4_ext_get_actual_len(ext: last_ex) - 1;
1769 if (leaf_from < from)
1770 leaf_from = from;
1771
1772 if (leaf_to > to)
1773 leaf_to = to;
1774
1775 ext4_ext_remove_leaf(inode_ref, path, from: leaf_from,
1776 to: leaf_to);
1777 ext4_ext_drop_refs(inode_ref, path: path + i, keep_other: 0);
1778 i--;
1779 continue;
1780 }
1781
1782 struct ext4_extent_header *eh;
1783 eh = path[i].header;
1784 if (ext4_ext_more_to_rm(path: path + i, to)) {
1785 struct ext4_block bh = EXT4_BLOCK_ZERO();
1786 if (path[i + 1].block.lb_id)
1787 ext4_ext_drop_refs(inode_ref, path: path + i + 1, keep_other: 0);
1788
1789 ret = read_extent_tree_block(
1790 inode_ref, pblk: ext4_idx_pblock(ix: path[i].index),
1791 depth: depth - i - 1, bh: &bh, flags: 0);
1792 if (ret != EOK)
1793 goto out;
1794
1795 path[i].p_block = ext4_idx_pblock(ix: path[i].index);
1796 path[i + 1].block = bh;
1797 path[i + 1].header = ext_block_hdr(block: &bh);
1798 path[i + 1].depth = depth - i - 1;
1799 if (i + 1 == depth)
1800 path[i + 1].extent =
1801 EXT_FIRST_EXTENT(path[i + 1].header);
1802 else
1803 path[i + 1].index =
1804 EXT_FIRST_INDEX(path[i + 1].header);
1805
1806 i++;
1807 } else {
1808 if (i > 0) {
1809 /*
1810 * Garbage entries will finally be cleared here.
1811 */
1812 if (!eh->entries_count)
1813 ret = ext4_ext_remove_idx(inode_ref,
1814 path, depth: i - 1);
1815 else
1816 path[i - 1].index++;
1817 }
1818
1819 if (i)
1820 ext4_block_set(bdev: inode_ref->fs->bdev,
1821 b: &path[i].block);
1822
1823 i--;
1824 }
1825 }
1826
1827 /* TODO: flexible tree reduction should be here */
1828 if (path->header->entries_count == 0) {
1829 /*
1830 * truncate to zero freed all the tree,
1831 * so we need to correct eh_depth
1832 */
1833 ext_inode_hdr(inode: inode_ref->inode)->depth = 0;
1834 ext_inode_hdr(inode: inode_ref->inode)->max_entries_count =
1835 to_le16(ext4_ext_space_root(inode_ref));
1836 ret = ext4_ext_dirty(inode_ref, path);
1837 }
1838
1839out:
1840 ext4_ext_drop_refs(inode_ref, path, keep_other: 0);
1841 ext4_free(pointer: path);
1842 path = NULL;
1843 return ret;
1844}
1845
1846static int ext4_ext_split_extent_at(struct ext4_inode_ref *inode_ref,
1847 struct ext4_extent_path **ppath,
1848 ext4_lblk_t split, uint32_t split_flag)
1849{
1850 struct ext4_extent *ex, newex;
1851 ext4_fsblk_t newblock;
1852 ext4_lblk_t ee_block;
1853 int32_t ee_len;
1854 int32_t depth = ext_depth(inode: inode_ref->inode);
1855 int err = EOK;
1856
1857 ex = (*ppath)[depth].extent;
1858 ee_block = to_le32(ex->first_block);
1859 ee_len = ext4_ext_get_actual_len(ext: ex);
1860 newblock = split - ee_block + ext4_ext_pblock(ex);
1861
1862 if (split == ee_block) {
1863 /*
1864 * case b: block @split is the block that the extent begins with
1865 * then we just change the state of the extent, and splitting
1866 * is not needed.
1867 */
1868 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
1869 ext4_ext_mark_unwritten(ext: ex);
1870 else
1871 ext4_ext_mark_initialized(ext: ex);
1872
1873 err = ext4_ext_dirty(inode_ref, path: *ppath + depth);
1874 goto out;
1875 }
1876
1877 ex->block_count = to_le16(split - ee_block);
1878 if (split_flag & EXT4_EXT_MARK_UNWRIT1)
1879 ext4_ext_mark_unwritten(ext: ex);
1880
1881 err = ext4_ext_dirty(inode_ref, path: *ppath + depth);
1882 if (err != EOK)
1883 goto out;
1884
1885 newex.first_block = to_le32(split);
1886 newex.block_count = to_le16(ee_len - (split - ee_block));
1887 ext4_ext_store_pblock(ex: &newex, pb: newblock);
1888 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
1889 ext4_ext_mark_unwritten(ext: &newex);
1890 err = ext4_ext_insert_extent(inode_ref, ppath, newext: &newex,
1891 EXT4_EXT_NO_COMBINE);
1892 if (err != EOK)
1893 goto restore_extent_len;
1894
1895out:
1896 return err;
1897restore_extent_len:
1898 ex->block_count = to_le16(ee_len);
1899 err = ext4_ext_dirty(inode_ref, path: *ppath + depth);
1900 return err;
1901}
1902
1903static int ext4_ext_convert_to_initialized(struct ext4_inode_ref *inode_ref,
1904 struct ext4_extent_path **ppath,
1905 ext4_lblk_t split, uint32_t blocks)
1906{
1907 int32_t depth = ext_depth(inode: inode_ref->inode), err = EOK;
1908 struct ext4_extent *ex = (*ppath)[depth].extent;
1909
1910 ext4_assert(to_le32(ex->first_block) <= split);
1911
1912 if (split + blocks ==
1913 to_le32(ex->first_block) + ext4_ext_get_actual_len(ext: ex)) {
1914 /* split and initialize right part */
1915 err = ext4_ext_split_extent_at(inode_ref, ppath, split,
1916 EXT4_EXT_MARK_UNWRIT1);
1917 } else if (to_le32(ex->first_block) == split) {
1918 /* split and initialize left part */
1919 err = ext4_ext_split_extent_at(inode_ref, ppath, split: split + blocks,
1920 EXT4_EXT_MARK_UNWRIT2);
1921 } else {
1922 /* split 1 extent to 3 and initialize the 2nd */
1923 err = ext4_ext_split_extent_at(inode_ref, ppath, split: split + blocks,
1924 EXT4_EXT_MARK_UNWRIT1 |
1925 EXT4_EXT_MARK_UNWRIT2);
1926 if (err == EOK) {
1927 err = ext4_ext_split_extent_at(inode_ref, ppath, split,
1928 EXT4_EXT_MARK_UNWRIT1);
1929 }
1930 }
1931
1932 return err;
1933}
1934
1935static ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_extent_path *path)
1936{
1937 int32_t depth;
1938
1939 depth = path->depth;
1940
1941 if (depth == 0 && path->extent == NULL)
1942 return EXT_MAX_BLOCKS;
1943
1944 while (depth >= 0) {
1945 if (depth == path->depth) {
1946 /* leaf */
1947 if (path[depth].extent &&
1948 path[depth].extent !=
1949 EXT_LAST_EXTENT(path[depth].header))
1950 return to_le32(
1951 path[depth].extent[1].first_block);
1952 } else {
1953 /* index */
1954 if (path[depth].index !=
1955 EXT_LAST_INDEX(path[depth].header))
1956 return to_le32(
1957 path[depth].index[1].first_block);
1958 }
1959 depth--;
1960 }
1961
1962 return EXT_MAX_BLOCKS;
1963}
1964
1965static int ext4_ext_zero_unwritten_range(struct ext4_inode_ref *inode_ref,
1966 ext4_fsblk_t block,
1967 uint32_t blocks_count)
1968{
1969 int err = EOK;
1970 uint32_t i;
1971 uint32_t block_size = ext4_sb_get_block_size(s: &inode_ref->fs->sb);
1972 for (i = 0; i < blocks_count; i++) {
1973 struct ext4_block bh = EXT4_BLOCK_ZERO();
1974 err = ext4_trans_block_get_noread(bdev: inode_ref->fs->bdev, b: &bh,
1975 lba: block + i);
1976 if (err != EOK)
1977 break;
1978
1979 memset(dest: bh.data, c: 0, size: block_size);
1980 ext4_trans_set_block_dirty(buf: bh.buf);
1981 err = ext4_block_set(bdev: inode_ref->fs->bdev, b: &bh);
1982 if (err != EOK)
1983 break;
1984 }
1985 return err;
1986}
1987
1988__unused static void print_path(struct ext4_extent_path *path)
1989{
1990 int32_t i = path->depth;
1991 while (i >= 0) {
1992
1993 ptrdiff_t a =
1994 (path->extent)
1995 ? (path->extent - EXT_FIRST_EXTENT(path->header))
1996 : 0;
1997 ptrdiff_t b =
1998 (path->index)
1999 ? (path->index - EXT_FIRST_INDEX(path->header))
2000 : 0;
2001
2002 (void)a;
2003 (void)b;
2004 ext4_dbg(DEBUG_EXTENT,
2005 "depth %" PRId32 ", p_block: %" PRIu64 ","
2006 "p_ext offset: %td, p_idx offset: %td\n",
2007 i, path->p_block, a, b);
2008 i--;
2009 path++;
2010 }
2011}
2012
2013int ext4_extent_get_blocks(struct ext4_inode_ref *inode_ref, ext4_lblk_t iblock,
2014 uint32_t max_blocks, ext4_fsblk_t *result,
2015 bool create, uint32_t *blocks_count)
2016{
2017 struct ext4_extent_path *path = NULL;
2018 struct ext4_extent newex, *ex;
2019 ext4_fsblk_t goal;
2020 int err = EOK;
2021 int32_t depth;
2022 uint32_t allocated = 0;
2023 ext4_lblk_t next;
2024 ext4_fsblk_t newblock;
2025
2026 if (result)
2027 *result = 0;
2028
2029 if (blocks_count)
2030 *blocks_count = 0;
2031
2032 /* find extent for this block */
2033 err = ext4_find_extent(inode_ref, block: iblock, orig_path: &path, flags: 0);
2034 if (err != EOK) {
2035 path = NULL;
2036 goto out2;
2037 }
2038
2039 depth = ext_depth(inode: inode_ref->inode);
2040
2041 /*
2042 * consistent leaf must not be empty
2043 * this situations is possible, though, _during_ tree modification
2044 * this is why assert can't be put in ext4_ext_find_extent()
2045 */
2046 ex = path[depth].extent;
2047 if (ex) {
2048 ext4_lblk_t ee_block = to_le32(ex->first_block);
2049 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
2050 uint16_t ee_len = ext4_ext_get_actual_len(ext: ex);
2051 /* if found exent covers block, simple return it */
2052 if (IN_RANGE(iblock, ee_block, ee_len)) {
2053 /* number of remain blocks in the extent */
2054 allocated = ee_len - (iblock - ee_block);
2055
2056 if (!ext4_ext_is_unwritten(ext: ex)) {
2057 newblock = iblock - ee_block + ee_start;
2058 goto out;
2059 }
2060
2061 if (!create) {
2062 newblock = 0;
2063 goto out;
2064 }
2065
2066 uint32_t zero_range;
2067 zero_range = allocated;
2068 if (zero_range > max_blocks)
2069 zero_range = max_blocks;
2070
2071 newblock = iblock - ee_block + ee_start;
2072 err = ext4_ext_zero_unwritten_range(inode_ref, block: newblock,
2073 blocks_count: zero_range);
2074 if (err != EOK)
2075 goto out2;
2076
2077 err = ext4_ext_convert_to_initialized(
2078 inode_ref, ppath: &path, split: iblock, blocks: zero_range);
2079 if (err != EOK)
2080 goto out2;
2081
2082 goto out;
2083 }
2084 }
2085
2086 /*
2087 * requested block isn't allocated yet
2088 * we couldn't try to create block if create flag is zero
2089 */
2090 if (!create) {
2091 goto out2;
2092 }
2093
2094 /* find next allocated block so that we know how many
2095 * blocks we can allocate without ovelapping next extent */
2096 next = ext4_ext_next_allocated_block(path);
2097 allocated = next - iblock;
2098 if (allocated > max_blocks)
2099 allocated = max_blocks;
2100
2101 /* allocate new block */
2102 goal = ext4_ext_find_goal(inode_ref, path, block: iblock);
2103 newblock = ext4_new_meta_blocks(inode_ref, goal, flags: 0, count: &allocated, errp: &err);
2104 if (!newblock)
2105 goto out2;
2106
2107 /* try to insert new extent into found leaf and return */
2108 newex.first_block = to_le32(iblock);
2109 ext4_ext_store_pblock(ex: &newex, pb: newblock);
2110 newex.block_count = to_le16(allocated);
2111 err = ext4_ext_insert_extent(inode_ref, ppath: &path, newext: &newex, flags: 0);
2112 if (err != EOK) {
2113 /* free data blocks we just allocated */
2114 ext4_ext_free_blocks(inode_ref, block: ext4_ext_pblock(ex: &newex),
2115 to_le16(newex.block_count), flags: 0);
2116 goto out2;
2117 }
2118
2119 /* previous routine could use block we allocated */
2120 newblock = ext4_ext_pblock(ex: &newex);
2121
2122out:
2123 if (allocated > max_blocks)
2124 allocated = max_blocks;
2125
2126 if (result)
2127 *result = newblock;
2128
2129 if (blocks_count)
2130 *blocks_count = allocated;
2131
2132out2:
2133 if (path) {
2134 ext4_ext_drop_refs(inode_ref, path, keep_other: 0);
2135 ext4_free(pointer: path);
2136 }
2137
2138 return err;
2139}
2140#endif
2141