1 | /* |
2 | * Copyright (c) 2013 Grzegorz Kostka (kostka.grzegorz@gmail.com) |
3 | * |
4 | * |
5 | * HelenOS: |
6 | * Copyright (c) 2012 Martin Sucha |
7 | * Copyright (c) 2012 Frantisek Princ |
8 | * All rights reserved. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * |
14 | * - Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. |
16 | * - Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. |
19 | * - The name of the author may not be used to endorse or promote products |
20 | * derived from this software without specific prior written permission. |
21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
24 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
25 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
32 | */ |
33 | /** @addtogroup lwext4 |
34 | * @{ |
35 | */ |
36 | /** |
37 | * @file ext4_fs.c |
38 | * @brief More complex filesystem functions. |
39 | */ |
40 | |
41 | #include <ext4_config.h> |
42 | #include <ext4_types.h> |
43 | #include <ext4_misc.h> |
44 | #include <ext4_errno.h> |
45 | #include <ext4_debug.h> |
46 | |
47 | #include <ext4_trans.h> |
48 | #include <ext4_fs.h> |
49 | #include <ext4_blockdev.h> |
50 | #include <ext4_super.h> |
51 | #include <ext4_crc32.h> |
52 | #include <ext4_block_group.h> |
53 | #include <ext4_balloc.h> |
54 | #include <ext4_bitmap.h> |
55 | #include <ext4_inode.h> |
56 | #include <ext4_ialloc.h> |
57 | #include <ext4_extent.h> |
58 | |
59 | #include <string.h> |
60 | |
61 | int ext4_fs_init(struct ext4_fs *fs, struct ext4_blockdev *bdev, |
62 | bool read_only) |
63 | { |
64 | int r, i; |
65 | uint16_t tmp; |
66 | uint32_t bsize; |
67 | |
68 | ext4_assert(fs && bdev); |
69 | |
70 | fs->bdev = bdev; |
71 | |
72 | fs->read_only = read_only; |
73 | |
74 | r = ext4_sb_read(bdev: fs->bdev, s: &fs->sb); |
75 | if (r != EOK) |
76 | return r; |
77 | |
78 | if (!ext4_sb_check(s: &fs->sb)) |
79 | return ENOTSUP; |
80 | |
81 | bsize = ext4_sb_get_block_size(s: &fs->sb); |
82 | if (bsize > EXT4_MAX_BLOCK_SIZE) |
83 | return ENXIO; |
84 | |
85 | r = ext4_fs_check_features(fs, read_only: &read_only); |
86 | if (r != EOK) |
87 | return r; |
88 | |
89 | if (read_only) |
90 | fs->read_only = read_only; |
91 | |
92 | /* Compute limits for indirect block levels */ |
93 | uint32_t blocks_id = bsize / sizeof(uint32_t); |
94 | |
95 | fs->inode_block_limits[0] = EXT4_INODE_DIRECT_BLOCK_COUNT; |
96 | fs->inode_blocks_per_level[0] = 1; |
97 | |
98 | for (i = 1; i < 4; i++) { |
99 | fs->inode_blocks_per_level[i] = |
100 | fs->inode_blocks_per_level[i - 1] * blocks_id; |
101 | fs->inode_block_limits[i] = fs->inode_block_limits[i - 1] + |
102 | fs->inode_blocks_per_level[i]; |
103 | } |
104 | |
105 | /*Validate FS*/ |
106 | tmp = ext4_get16(&fs->sb, state); |
107 | if (tmp & EXT4_SUPERBLOCK_STATE_ERROR_FS) |
108 | ext4_dbg(DEBUG_FS, DBG_WARN |
109 | "last umount error: superblock fs_error flag\n" ); |
110 | |
111 | |
112 | if (!fs->read_only) { |
113 | /* Mark system as mounted */ |
114 | ext4_set16(&fs->sb, state, EXT4_SUPERBLOCK_STATE_ERROR_FS); |
115 | r = ext4_sb_write(bdev: fs->bdev, s: &fs->sb); |
116 | if (r != EOK) |
117 | return r; |
118 | |
119 | /*Update mount count*/ |
120 | ext4_set16(&fs->sb, mount_count, ext4_get16(&fs->sb, mount_count) + 1); |
121 | } |
122 | |
123 | return r; |
124 | } |
125 | |
126 | int ext4_fs_fini(struct ext4_fs *fs) |
127 | { |
128 | ext4_assert(fs); |
129 | |
130 | /*Set superblock state*/ |
131 | ext4_set16(&fs->sb, state, EXT4_SUPERBLOCK_STATE_VALID_FS); |
132 | |
133 | if (!fs->read_only) |
134 | return ext4_sb_write(bdev: fs->bdev, s: &fs->sb); |
135 | |
136 | return EOK; |
137 | } |
138 | |
139 | static void ext4_fs_debug_features_inc(uint32_t features_incompatible) |
140 | { |
141 | if (features_incompatible & EXT4_FINCOM_COMPRESSION) |
142 | ext4_dbg(DEBUG_FS, DBG_NONE "compression\n" ); |
143 | if (features_incompatible & EXT4_FINCOM_FILETYPE) |
144 | ext4_dbg(DEBUG_FS, DBG_NONE "filetype\n" ); |
145 | if (features_incompatible & EXT4_FINCOM_RECOVER) |
146 | ext4_dbg(DEBUG_FS, DBG_NONE "recover\n" ); |
147 | if (features_incompatible & EXT4_FINCOM_JOURNAL_DEV) |
148 | ext4_dbg(DEBUG_FS, DBG_NONE "journal_dev\n" ); |
149 | if (features_incompatible & EXT4_FINCOM_META_BG) |
150 | ext4_dbg(DEBUG_FS, DBG_NONE "meta_bg\n" ); |
151 | if (features_incompatible & EXT4_FINCOM_EXTENTS) |
152 | ext4_dbg(DEBUG_FS, DBG_NONE "extents\n" ); |
153 | if (features_incompatible & EXT4_FINCOM_64BIT) |
154 | ext4_dbg(DEBUG_FS, DBG_NONE "64bit\n" ); |
155 | if (features_incompatible & EXT4_FINCOM_MMP) |
156 | ext4_dbg(DEBUG_FS, DBG_NONE "mnp\n" ); |
157 | if (features_incompatible & EXT4_FINCOM_FLEX_BG) |
158 | ext4_dbg(DEBUG_FS, DBG_NONE "flex_bg\n" ); |
159 | if (features_incompatible & EXT4_FINCOM_EA_INODE) |
160 | ext4_dbg(DEBUG_FS, DBG_NONE "ea_inode\n" ); |
161 | if (features_incompatible & EXT4_FINCOM_DIRDATA) |
162 | ext4_dbg(DEBUG_FS, DBG_NONE "dirdata\n" ); |
163 | if (features_incompatible & EXT4_FINCOM_BG_USE_META_CSUM) |
164 | ext4_dbg(DEBUG_FS, DBG_NONE "meta_csum\n" ); |
165 | if (features_incompatible & EXT4_FINCOM_LARGEDIR) |
166 | ext4_dbg(DEBUG_FS, DBG_NONE "largedir\n" ); |
167 | if (features_incompatible & EXT4_FINCOM_INLINE_DATA) |
168 | ext4_dbg(DEBUG_FS, DBG_NONE "inline_data\n" ); |
169 | } |
170 | static void ext4_fs_debug_features_comp(uint32_t features_compatible) |
171 | { |
172 | if (features_compatible & EXT4_FCOM_DIR_PREALLOC) |
173 | ext4_dbg(DEBUG_FS, DBG_NONE "dir_prealloc\n" ); |
174 | if (features_compatible & EXT4_FCOM_IMAGIC_INODES) |
175 | ext4_dbg(DEBUG_FS, DBG_NONE "imagic_inodes\n" ); |
176 | if (features_compatible & EXT4_FCOM_HAS_JOURNAL) |
177 | ext4_dbg(DEBUG_FS, DBG_NONE "has_journal\n" ); |
178 | if (features_compatible & EXT4_FCOM_EXT_ATTR) |
179 | ext4_dbg(DEBUG_FS, DBG_NONE "ext_attr\n" ); |
180 | if (features_compatible & EXT4_FCOM_RESIZE_INODE) |
181 | ext4_dbg(DEBUG_FS, DBG_NONE "resize_inode\n" ); |
182 | if (features_compatible & EXT4_FCOM_DIR_INDEX) |
183 | ext4_dbg(DEBUG_FS, DBG_NONE "dir_index\n" ); |
184 | } |
185 | |
186 | static void ext4_fs_debug_features_ro(uint32_t features_ro) |
187 | { |
188 | if (features_ro & EXT4_FRO_COM_SPARSE_SUPER) |
189 | ext4_dbg(DEBUG_FS, DBG_NONE "sparse_super\n" ); |
190 | if (features_ro & EXT4_FRO_COM_LARGE_FILE) |
191 | ext4_dbg(DEBUG_FS, DBG_NONE "large_file\n" ); |
192 | if (features_ro & EXT4_FRO_COM_BTREE_DIR) |
193 | ext4_dbg(DEBUG_FS, DBG_NONE "btree_dir\n" ); |
194 | if (features_ro & EXT4_FRO_COM_HUGE_FILE) |
195 | ext4_dbg(DEBUG_FS, DBG_NONE "huge_file\n" ); |
196 | if (features_ro & EXT4_FRO_COM_GDT_CSUM) |
197 | ext4_dbg(DEBUG_FS, DBG_NONE "gtd_csum\n" ); |
198 | if (features_ro & EXT4_FRO_COM_DIR_NLINK) |
199 | ext4_dbg(DEBUG_FS, DBG_NONE "dir_nlink\n" ); |
200 | if (features_ro & EXT4_FRO_COM_EXTRA_ISIZE) |
201 | ext4_dbg(DEBUG_FS, DBG_NONE "extra_isize\n" ); |
202 | if (features_ro & EXT4_FRO_COM_QUOTA) |
203 | ext4_dbg(DEBUG_FS, DBG_NONE "quota\n" ); |
204 | if (features_ro & EXT4_FRO_COM_BIGALLOC) |
205 | ext4_dbg(DEBUG_FS, DBG_NONE "bigalloc\n" ); |
206 | if (features_ro & EXT4_FRO_COM_METADATA_CSUM) |
207 | ext4_dbg(DEBUG_FS, DBG_NONE "metadata_csum\n" ); |
208 | if (features_ro & EXT4_FRO_COM_ORPHAN_PRESENT) |
209 | ext4_dbg(DEBUG_FS, DBG_NONE "orphan_present\n" ); |
210 | } |
211 | |
212 | int ext4_fs_check_features(struct ext4_fs *fs, bool *read_only) |
213 | { |
214 | ext4_assert(fs && read_only); |
215 | uint32_t v; |
216 | if (ext4_get32(&fs->sb, rev_level) == 0) { |
217 | *read_only = false; |
218 | return EOK; |
219 | } |
220 | |
221 | ext4_dbg(DEBUG_FS, DBG_INFO "sblock features_incompatible:\n" ); |
222 | ext4_fs_debug_features_inc(ext4_get32(&fs->sb, features_incompatible)); |
223 | |
224 | ext4_dbg(DEBUG_FS, DBG_INFO "sblock features_compatible:\n" ); |
225 | ext4_fs_debug_features_comp(ext4_get32(&fs->sb, features_compatible)); |
226 | |
227 | ext4_dbg(DEBUG_FS, DBG_INFO "sblock features_read_only:\n" ); |
228 | ext4_fs_debug_features_ro(ext4_get32(&fs->sb, features_read_only)); |
229 | |
230 | /*Check features_incompatible*/ |
231 | v = (ext4_get32(&fs->sb, features_incompatible) & |
232 | (~CONFIG_SUPPORTED_FINCOM)); |
233 | if (v) { |
234 | ext4_dbg(DEBUG_FS, DBG_ERROR |
235 | "sblock has unsupported features incompatible:\n" ); |
236 | ext4_fs_debug_features_inc(features_incompatible: v); |
237 | return ENOTSUP; |
238 | } |
239 | |
240 | /*Check features_read_only*/ |
241 | v = ext4_get32(&fs->sb, features_read_only); |
242 | v &= ~CONFIG_SUPPORTED_FRO_COM; |
243 | if (v) { |
244 | ext4_dbg(DEBUG_FS, DBG_WARN |
245 | "sblock has unsupported features read only:\n" ); |
246 | ext4_fs_debug_features_ro(features_ro: v); |
247 | *read_only = true; |
248 | return EOK; |
249 | } |
250 | *read_only = false; |
251 | |
252 | return EOK; |
253 | } |
254 | |
255 | /**@brief Determine whether the block is inside the group. |
256 | * @param baddr block address |
257 | * @param bgid block group id |
258 | * @return Error code |
259 | */ |
260 | static bool ext4_block_in_group(struct ext4_sblock *s, ext4_fsblk_t baddr, |
261 | uint32_t bgid) |
262 | { |
263 | uint32_t actual_bgid; |
264 | actual_bgid = ext4_balloc_get_bgid_of_block(s, baddr); |
265 | if (actual_bgid == bgid) |
266 | return true; |
267 | return false; |
268 | } |
269 | |
270 | /**@brief To avoid calling the atomic setbit hundreds or thousands of times, we only |
271 | * need to use it within a single byte (to ensure we get endianness right). |
272 | * We can use memset for the rest of the bitmap as there are no other users. |
273 | */ |
274 | static void ext4_fs_mark_bitmap_end(int start_bit, int end_bit, void *bitmap) |
275 | { |
276 | int i; |
277 | |
278 | if (start_bit >= end_bit) |
279 | return; |
280 | |
281 | for (i = start_bit; (unsigned)i < ((start_bit + 7) & ~7UL); i++) |
282 | ext4_bmap_bit_set(bmap: bitmap, bit: i); |
283 | |
284 | if (i < end_bit) |
285 | memset(dest: (char *)bitmap + (i >> 3), c: 0xff, size: (end_bit - i) >> 3); |
286 | } |
287 | |
288 | /**@brief Initialize block bitmap in block group. |
289 | * @param bg_ref Reference to block group |
290 | * @return Error code |
291 | */ |
292 | static int ext4_fs_init_block_bitmap(struct ext4_block_group_ref *bg_ref) |
293 | { |
294 | struct ext4_sblock *sb = &bg_ref->fs->sb; |
295 | struct ext4_bgroup *bg = bg_ref->block_group; |
296 | int rc; |
297 | |
298 | uint32_t bit, bit_max; |
299 | uint32_t group_blocks; |
300 | uint16_t inode_size = ext4_get16(sb, inode_size); |
301 | uint32_t block_size = ext4_sb_get_block_size(s: sb); |
302 | uint32_t inodes_per_group = ext4_get32(sb, inodes_per_group); |
303 | |
304 | ext4_fsblk_t i; |
305 | ext4_fsblk_t bmp_blk = ext4_bg_get_block_bitmap(bg, s: sb); |
306 | ext4_fsblk_t bmp_inode = ext4_bg_get_inode_bitmap(bg, s: sb); |
307 | ext4_fsblk_t inode_table = ext4_bg_get_inode_table_first_block(bg, s: sb); |
308 | ext4_fsblk_t first_bg = ext4_balloc_get_block_of_bgid(s: sb, bgid: bg_ref->index); |
309 | |
310 | uint32_t dsc_per_block = block_size / ext4_sb_get_desc_size(s: sb); |
311 | |
312 | bool flex_bg = ext4_sb_feature_incom(s: sb, EXT4_FINCOM_FLEX_BG); |
313 | bool meta_bg = ext4_sb_feature_incom(s: sb, EXT4_FINCOM_META_BG); |
314 | |
315 | uint32_t inode_table_bcnt = inodes_per_group * inode_size / block_size; |
316 | |
317 | struct ext4_block block_bitmap; |
318 | rc = ext4_trans_block_get_noread(bdev: bg_ref->fs->bdev, b: &block_bitmap, lba: bmp_blk); |
319 | if (rc != EOK) |
320 | return rc; |
321 | |
322 | memset(dest: block_bitmap.data, c: 0, size: block_size); |
323 | bit_max = ext4_sb_is_super_in_bg(s: sb, block_group: bg_ref->index); |
324 | |
325 | uint32_t count = ext4_sb_first_meta_bg(s: sb) * dsc_per_block; |
326 | if (!meta_bg || bg_ref->index < count) { |
327 | if (bit_max) { |
328 | bit_max += ext4_bg_num_gdb(s: sb, group: bg_ref->index); |
329 | bit_max += ext4_get16(sb, s_reserved_gdt_blocks); |
330 | } |
331 | } else { /* For META_BG_BLOCK_GROUPS */ |
332 | bit_max += ext4_bg_num_gdb(s: sb, group: bg_ref->index); |
333 | } |
334 | for (bit = 0; bit < bit_max; bit++) |
335 | ext4_bmap_bit_set(bmap: block_bitmap.data, bit); |
336 | |
337 | if (bg_ref->index == ext4_block_group_cnt(s: sb) - 1) { |
338 | /* |
339 | * Even though mke2fs always initialize first and last group |
340 | * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need |
341 | * to make sure we calculate the right free blocks |
342 | */ |
343 | |
344 | group_blocks = (uint32_t)(ext4_sb_get_blocks_cnt(s: sb) - |
345 | ext4_get32(sb, first_data_block) - |
346 | ext4_get32(sb, blocks_per_group) * |
347 | (ext4_block_group_cnt(s: sb) - 1)); |
348 | } else { |
349 | group_blocks = ext4_get32(sb, blocks_per_group); |
350 | } |
351 | |
352 | bool in_bg; |
353 | in_bg = ext4_block_in_group(s: sb, baddr: bmp_blk, bgid: bg_ref->index); |
354 | if (!flex_bg || in_bg) |
355 | ext4_bmap_bit_set(bmap: block_bitmap.data, |
356 | bit: (uint32_t)(bmp_blk - first_bg)); |
357 | |
358 | in_bg = ext4_block_in_group(s: sb, baddr: bmp_inode, bgid: bg_ref->index); |
359 | if (!flex_bg || in_bg) |
360 | ext4_bmap_bit_set(bmap: block_bitmap.data, |
361 | bit: (uint32_t)(bmp_inode - first_bg)); |
362 | |
363 | for (i = inode_table; i < inode_table + inode_table_bcnt; i++) { |
364 | in_bg = ext4_block_in_group(s: sb, baddr: i, bgid: bg_ref->index); |
365 | if (!flex_bg || in_bg) |
366 | ext4_bmap_bit_set(bmap: block_bitmap.data, |
367 | bit: (uint32_t)(i - first_bg)); |
368 | } |
369 | /* |
370 | * Also if the number of blocks within the group is |
371 | * less than the blocksize * 8 ( which is the size |
372 | * of bitmap ), set rest of the block bitmap to 1 |
373 | */ |
374 | ext4_fs_mark_bitmap_end(start_bit: group_blocks, end_bit: block_size * 8, bitmap: block_bitmap.data); |
375 | ext4_trans_set_block_dirty(buf: block_bitmap.buf); |
376 | |
377 | ext4_balloc_set_bitmap_csum(sb, bg: bg_ref->block_group, bitmap: block_bitmap.data); |
378 | bg_ref->dirty = true; |
379 | |
380 | /* Save bitmap */ |
381 | return ext4_block_set(bdev: bg_ref->fs->bdev, b: &block_bitmap); |
382 | } |
383 | |
384 | /**@brief Initialize i-node bitmap in block group. |
385 | * @param bg_ref Reference to block group |
386 | * @return Error code |
387 | */ |
388 | static int ext4_fs_init_inode_bitmap(struct ext4_block_group_ref *bg_ref) |
389 | { |
390 | int rc; |
391 | struct ext4_sblock *sb = &bg_ref->fs->sb; |
392 | struct ext4_bgroup *bg = bg_ref->block_group; |
393 | |
394 | /* Load bitmap */ |
395 | ext4_fsblk_t bitmap_block_addr = ext4_bg_get_inode_bitmap(bg, s: sb); |
396 | |
397 | struct ext4_block b; |
398 | rc = ext4_trans_block_get_noread(bdev: bg_ref->fs->bdev, b: &b, lba: bitmap_block_addr); |
399 | if (rc != EOK) |
400 | return rc; |
401 | |
402 | /* Initialize all bitmap bits to zero */ |
403 | uint32_t block_size = ext4_sb_get_block_size(s: sb); |
404 | uint32_t inodes_per_group = ext4_get32(sb, inodes_per_group); |
405 | |
406 | memset(dest: b.data, c: 0, size: (inodes_per_group + 7) / 8); |
407 | |
408 | uint32_t start_bit = inodes_per_group; |
409 | uint32_t end_bit = block_size * 8; |
410 | |
411 | uint32_t i; |
412 | for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) |
413 | ext4_bmap_bit_set(bmap: b.data, bit: i); |
414 | |
415 | if (i < end_bit) |
416 | memset(dest: b.data + (i >> 3), c: 0xff, size: (end_bit - i) >> 3); |
417 | |
418 | ext4_trans_set_block_dirty(buf: b.buf); |
419 | |
420 | ext4_ialloc_set_bitmap_csum(sb, bg, bitmap: b.data); |
421 | bg_ref->dirty = true; |
422 | |
423 | /* Save bitmap */ |
424 | return ext4_block_set(bdev: bg_ref->fs->bdev, b: &b); |
425 | } |
426 | |
427 | /**@brief Initialize i-node table in block group. |
428 | * @param bg_ref Reference to block group |
429 | * @return Error code |
430 | */ |
431 | static int ext4_fs_init_inode_table(struct ext4_block_group_ref *bg_ref) |
432 | { |
433 | struct ext4_sblock *sb = &bg_ref->fs->sb; |
434 | struct ext4_bgroup *bg = bg_ref->block_group; |
435 | |
436 | uint32_t inode_size = ext4_get16(sb, inode_size); |
437 | uint32_t block_size = ext4_sb_get_block_size(s: sb); |
438 | uint32_t inodes_per_block = block_size / inode_size; |
439 | uint32_t inodes_in_group = ext4_inodes_in_group_cnt(s: sb, bgid: bg_ref->index); |
440 | uint32_t table_blocks = inodes_in_group / inodes_per_block; |
441 | ext4_fsblk_t fblock; |
442 | |
443 | if (inodes_in_group % inodes_per_block) |
444 | table_blocks++; |
445 | |
446 | /* Compute initialization bounds */ |
447 | ext4_fsblk_t first_block = ext4_bg_get_inode_table_first_block(bg, s: sb); |
448 | |
449 | ext4_fsblk_t last_block = first_block + table_blocks - 1; |
450 | |
451 | /* Initialization of all itable blocks */ |
452 | for (fblock = first_block; fblock <= last_block; ++fblock) { |
453 | struct ext4_block b; |
454 | int rc = ext4_trans_block_get_noread(bdev: bg_ref->fs->bdev, b: &b, lba: fblock); |
455 | if (rc != EOK) |
456 | return rc; |
457 | |
458 | memset(dest: b.data, c: 0, size: block_size); |
459 | ext4_trans_set_block_dirty(buf: b.buf); |
460 | |
461 | rc = ext4_block_set(bdev: bg_ref->fs->bdev, b: &b); |
462 | if (rc != EOK) |
463 | return rc; |
464 | } |
465 | |
466 | return EOK; |
467 | } |
468 | |
469 | static ext4_fsblk_t ext4_fs_get_descriptor_block(struct ext4_sblock *s, |
470 | uint32_t bgid, |
471 | uint32_t dsc_per_block) |
472 | { |
473 | uint32_t first_meta_bg, dsc_id; |
474 | int has_super = 0; |
475 | dsc_id = bgid / dsc_per_block; |
476 | first_meta_bg = ext4_sb_first_meta_bg(s); |
477 | |
478 | bool meta_bg = ext4_sb_feature_incom(s, EXT4_FINCOM_META_BG); |
479 | |
480 | if (!meta_bg || dsc_id < first_meta_bg) |
481 | return ext4_get32(s, first_data_block) + dsc_id + 1; |
482 | |
483 | if (ext4_sb_is_super_in_bg(s, block_group: bgid)) |
484 | has_super = 1; |
485 | |
486 | return (has_super + ext4_fs_first_bg_block_no(s, bgid)); |
487 | } |
488 | |
489 | /**@brief Compute checksum of block group descriptor. |
490 | * @param sb Superblock |
491 | * @param bgid Index of block group in the filesystem |
492 | * @param bg Block group to compute checksum for |
493 | * @return Checksum value |
494 | */ |
495 | static uint16_t ext4_fs_bg_checksum(struct ext4_sblock *sb, uint32_t bgid, |
496 | struct ext4_bgroup *bg) |
497 | { |
498 | /* If checksum not supported, 0 will be returned */ |
499 | uint16_t crc = 0; |
500 | #if CONFIG_META_CSUM_ENABLE |
501 | /* Compute the checksum only if the filesystem supports it */ |
502 | if (ext4_sb_feature_ro_com(s: sb, EXT4_FRO_COM_METADATA_CSUM)) { |
503 | /* Use metadata_csum algorithm instead */ |
504 | uint32_t le32_bgid = to_le32(bgid); |
505 | uint32_t orig_checksum, checksum; |
506 | |
507 | /* Preparation: temporarily set bg checksum to 0 */ |
508 | orig_checksum = bg->checksum; |
509 | bg->checksum = 0; |
510 | |
511 | /* First calculate crc32 checksum against fs uuid */ |
512 | checksum = ext4_crc32c(EXT4_CRC32_INIT, buf: sb->uuid, |
513 | size: sizeof(sb->uuid)); |
514 | /* Then calculate crc32 checksum against bgid */ |
515 | checksum = ext4_crc32c(crc: checksum, buf: &le32_bgid, size: sizeof(bgid)); |
516 | /* Finally calculate crc32 checksum against block_group_desc */ |
517 | checksum = ext4_crc32c(crc: checksum, buf: bg, size: ext4_sb_get_desc_size(s: sb)); |
518 | bg->checksum = orig_checksum; |
519 | |
520 | crc = checksum & 0xFFFF; |
521 | return crc; |
522 | } |
523 | #endif |
524 | if (ext4_sb_feature_ro_com(s: sb, EXT4_FRO_COM_GDT_CSUM)) { |
525 | uint8_t *base = (uint8_t *)bg; |
526 | uint8_t *checksum = (uint8_t *)&bg->checksum; |
527 | |
528 | uint32_t offset = (uint32_t)(checksum - base); |
529 | |
530 | /* Convert block group index to little endian */ |
531 | uint32_t group = to_le32(bgid); |
532 | |
533 | /* Initialization */ |
534 | crc = ext4_bg_crc16(crc: ~0, buffer: sb->uuid, len: sizeof(sb->uuid)); |
535 | |
536 | /* Include index of block group */ |
537 | crc = ext4_bg_crc16(crc, buffer: (uint8_t *)&group, len: sizeof(group)); |
538 | |
539 | /* Compute crc from the first part (stop before checksum field) |
540 | */ |
541 | crc = ext4_bg_crc16(crc, buffer: (uint8_t *)bg, len: offset); |
542 | |
543 | /* Skip checksum */ |
544 | offset += sizeof(bg->checksum); |
545 | |
546 | /* Checksum of the rest of block group descriptor */ |
547 | if ((ext4_sb_feature_incom(s: sb, EXT4_FINCOM_64BIT)) && |
548 | (offset < ext4_sb_get_desc_size(s: sb))) { |
549 | |
550 | const uint8_t *start = ((uint8_t *)bg) + offset; |
551 | size_t len = ext4_sb_get_desc_size(s: sb) - offset; |
552 | crc = ext4_bg_crc16(crc, buffer: start, len); |
553 | } |
554 | } |
555 | return crc; |
556 | } |
557 | |
558 | #if CONFIG_META_CSUM_ENABLE |
559 | static bool ext4_fs_verify_bg_csum(struct ext4_sblock *sb, |
560 | uint32_t bgid, |
561 | struct ext4_bgroup *bg) |
562 | { |
563 | if (!ext4_sb_feature_ro_com(s: sb, EXT4_FRO_COM_METADATA_CSUM)) |
564 | return true; |
565 | |
566 | return ext4_fs_bg_checksum(sb, bgid, bg) == to_le16(bg->checksum); |
567 | } |
568 | #else |
569 | #define ext4_fs_verify_bg_csum(...) true |
570 | #endif |
571 | |
572 | int ext4_fs_get_block_group_ref(struct ext4_fs *fs, uint32_t bgid, |
573 | struct ext4_block_group_ref *ref) |
574 | { |
575 | /* Compute number of descriptors, that fits in one data block */ |
576 | uint32_t block_size = ext4_sb_get_block_size(s: &fs->sb); |
577 | uint32_t dsc_cnt = block_size / ext4_sb_get_desc_size(s: &fs->sb); |
578 | |
579 | /* Block group descriptor table starts at the next block after |
580 | * superblock */ |
581 | uint64_t block_id = ext4_fs_get_descriptor_block(s: &fs->sb, bgid, dsc_per_block: dsc_cnt); |
582 | |
583 | uint32_t offset = (bgid % dsc_cnt) * ext4_sb_get_desc_size(s: &fs->sb); |
584 | |
585 | int rc = ext4_trans_block_get(bdev: fs->bdev, b: &ref->block, lba: block_id); |
586 | if (rc != EOK) |
587 | return rc; |
588 | |
589 | ref->block_group = (void *)(ref->block.data + offset); |
590 | ref->fs = fs; |
591 | ref->index = bgid; |
592 | ref->dirty = false; |
593 | struct ext4_bgroup *bg = ref->block_group; |
594 | |
595 | if (!ext4_fs_verify_bg_csum(sb: &fs->sb, bgid, bg)) { |
596 | ext4_dbg(DEBUG_FS, |
597 | DBG_WARN "Block group descriptor checksum failed." |
598 | "Block group index: %" PRIu32"\n" , |
599 | bgid); |
600 | } |
601 | |
602 | if (ext4_bg_has_flag(bg, EXT4_BLOCK_GROUP_BLOCK_UNINIT)) { |
603 | rc = ext4_fs_init_block_bitmap(bg_ref: ref); |
604 | if (rc != EOK) { |
605 | ext4_block_set(bdev: fs->bdev, b: &ref->block); |
606 | return rc; |
607 | } |
608 | ext4_bg_clear_flag(bg, EXT4_BLOCK_GROUP_BLOCK_UNINIT); |
609 | ref->dirty = true; |
610 | } |
611 | |
612 | if (ext4_bg_has_flag(bg, EXT4_BLOCK_GROUP_INODE_UNINIT)) { |
613 | rc = ext4_fs_init_inode_bitmap(bg_ref: ref); |
614 | if (rc != EOK) { |
615 | ext4_block_set(bdev: ref->fs->bdev, b: &ref->block); |
616 | return rc; |
617 | } |
618 | |
619 | ext4_bg_clear_flag(bg, EXT4_BLOCK_GROUP_INODE_UNINIT); |
620 | |
621 | if (!ext4_bg_has_flag(bg, EXT4_BLOCK_GROUP_ITABLE_ZEROED)) { |
622 | rc = ext4_fs_init_inode_table(bg_ref: ref); |
623 | if (rc != EOK) { |
624 | ext4_block_set(bdev: fs->bdev, b: &ref->block); |
625 | return rc; |
626 | } |
627 | |
628 | ext4_bg_set_flag(bg, EXT4_BLOCK_GROUP_ITABLE_ZEROED); |
629 | } |
630 | |
631 | ref->dirty = true; |
632 | } |
633 | |
634 | return EOK; |
635 | } |
636 | |
637 | int ext4_fs_put_block_group_ref(struct ext4_block_group_ref *ref) |
638 | { |
639 | /* Check if reference modified */ |
640 | if (ref->dirty) { |
641 | /* Compute new checksum of block group */ |
642 | uint16_t cs; |
643 | cs = ext4_fs_bg_checksum(sb: &ref->fs->sb, bgid: ref->index, |
644 | bg: ref->block_group); |
645 | ref->block_group->checksum = to_le16(cs); |
646 | |
647 | /* Mark block dirty for writing changes to physical device */ |
648 | ext4_trans_set_block_dirty(buf: ref->block.buf); |
649 | } |
650 | |
651 | /* Put back block, that contains block group descriptor */ |
652 | return ext4_block_set(bdev: ref->fs->bdev, b: &ref->block); |
653 | } |
654 | |
655 | #if CONFIG_META_CSUM_ENABLE |
656 | static uint32_t ext4_fs_inode_checksum(struct ext4_inode_ref *inode_ref) |
657 | { |
658 | uint32_t checksum = 0; |
659 | struct ext4_sblock *sb = &inode_ref->fs->sb; |
660 | uint16_t inode_size = ext4_get16(sb, inode_size); |
661 | |
662 | if (ext4_sb_feature_ro_com(s: sb, EXT4_FRO_COM_METADATA_CSUM)) { |
663 | uint32_t orig_checksum; |
664 | |
665 | uint32_t ino_index = to_le32(inode_ref->index); |
666 | uint32_t ino_gen = |
667 | to_le32(ext4_inode_get_generation(inode_ref->inode)); |
668 | |
669 | /* Preparation: temporarily set bg checksum to 0 */ |
670 | orig_checksum = ext4_inode_get_csum(sb, inode: inode_ref->inode); |
671 | ext4_inode_set_csum(sb, inode: inode_ref->inode, checksum: 0); |
672 | |
673 | /* First calculate crc32 checksum against fs uuid */ |
674 | checksum = ext4_crc32c(EXT4_CRC32_INIT, buf: sb->uuid, |
675 | size: sizeof(sb->uuid)); |
676 | /* Then calculate crc32 checksum against inode number |
677 | * and inode generation */ |
678 | checksum = ext4_crc32c(crc: checksum, buf: &ino_index, size: sizeof(ino_index)); |
679 | checksum = ext4_crc32c(crc: checksum, buf: &ino_gen, size: sizeof(ino_gen)); |
680 | /* Finally calculate crc32 checksum against |
681 | * the entire inode */ |
682 | checksum = ext4_crc32c(crc: checksum, buf: inode_ref->inode, size: inode_size); |
683 | ext4_inode_set_csum(sb, inode: inode_ref->inode, checksum: orig_checksum); |
684 | |
685 | /* If inode size is not large enough to hold the |
686 | * upper 16bit of the checksum */ |
687 | if (inode_size == EXT4_GOOD_OLD_INODE_SIZE) |
688 | checksum &= 0xFFFF; |
689 | |
690 | } |
691 | return checksum; |
692 | } |
693 | #else |
694 | #define ext4_fs_inode_checksum(...) 0 |
695 | #endif |
696 | |
697 | static void ext4_fs_set_inode_checksum(struct ext4_inode_ref *inode_ref) |
698 | { |
699 | struct ext4_sblock *sb = &inode_ref->fs->sb; |
700 | if (!ext4_sb_feature_ro_com(s: sb, EXT4_FRO_COM_METADATA_CSUM)) |
701 | return; |
702 | |
703 | uint32_t csum = ext4_fs_inode_checksum(inode_ref); |
704 | ext4_inode_set_csum(sb, inode: inode_ref->inode, checksum: csum); |
705 | } |
706 | |
707 | #if CONFIG_META_CSUM_ENABLE |
708 | static bool ext4_fs_verify_inode_csum(struct ext4_inode_ref *inode_ref) |
709 | { |
710 | struct ext4_sblock *sb = &inode_ref->fs->sb; |
711 | if (!ext4_sb_feature_ro_com(s: sb, EXT4_FRO_COM_METADATA_CSUM)) |
712 | return true; |
713 | |
714 | return ext4_inode_get_csum(sb, inode: inode_ref->inode) == |
715 | ext4_fs_inode_checksum(inode_ref); |
716 | } |
717 | #else |
718 | #define ext4_fs_verify_inode_csum(...) true |
719 | #endif |
720 | |
721 | static int |
722 | __ext4_fs_get_inode_ref(struct ext4_fs *fs, uint32_t index, |
723 | struct ext4_inode_ref *ref, |
724 | bool initialized) |
725 | { |
726 | /* Compute number of i-nodes, that fits in one data block */ |
727 | uint32_t inodes_per_group = ext4_get32(&fs->sb, inodes_per_group); |
728 | |
729 | /* |
730 | * Inode numbers are 1-based, but it is simpler to work with 0-based |
731 | * when computing indices |
732 | */ |
733 | index -= 1; |
734 | uint32_t block_group = index / inodes_per_group; |
735 | uint32_t offset_in_group = index % inodes_per_group; |
736 | |
737 | /* Load block group, where i-node is located */ |
738 | struct ext4_block_group_ref bg_ref; |
739 | |
740 | int rc = ext4_fs_get_block_group_ref(fs, bgid: block_group, ref: &bg_ref); |
741 | if (rc != EOK) { |
742 | return rc; |
743 | } |
744 | |
745 | /* Load block address, where i-node table is located */ |
746 | ext4_fsblk_t inode_table_start = |
747 | ext4_bg_get_inode_table_first_block(bg: bg_ref.block_group, s: &fs->sb); |
748 | |
749 | /* Put back block group reference (not needed more) */ |
750 | rc = ext4_fs_put_block_group_ref(ref: &bg_ref); |
751 | if (rc != EOK) { |
752 | return rc; |
753 | } |
754 | |
755 | /* Compute position of i-node in the block group */ |
756 | uint16_t inode_size = ext4_get16(&fs->sb, inode_size); |
757 | uint32_t block_size = ext4_sb_get_block_size(s: &fs->sb); |
758 | uint32_t byte_offset_in_group = offset_in_group * inode_size; |
759 | |
760 | /* Compute block address */ |
761 | ext4_fsblk_t block_id = |
762 | inode_table_start + (byte_offset_in_group / block_size); |
763 | |
764 | rc = ext4_trans_block_get(bdev: fs->bdev, b: &ref->block, lba: block_id); |
765 | if (rc != EOK) { |
766 | return rc; |
767 | } |
768 | |
769 | /* Compute position of i-node in the data block */ |
770 | uint32_t offset_in_block = byte_offset_in_group % block_size; |
771 | ref->inode = (struct ext4_inode *)(ref->block.data + offset_in_block); |
772 | |
773 | /* We need to store the original value of index in the reference */ |
774 | ref->index = index + 1; |
775 | ref->fs = fs; |
776 | ref->dirty = false; |
777 | |
778 | if (initialized && !ext4_fs_verify_inode_csum(inode_ref: ref)) { |
779 | ext4_dbg(DEBUG_FS, |
780 | DBG_WARN "Inode checksum failed." |
781 | "Inode: %" PRIu32"\n" , |
782 | ref->index); |
783 | } |
784 | |
785 | return EOK; |
786 | } |
787 | |
788 | int ext4_fs_get_inode_ref(struct ext4_fs *fs, uint32_t index, |
789 | struct ext4_inode_ref *ref) |
790 | { |
791 | return __ext4_fs_get_inode_ref(fs, index, ref, initialized: true); |
792 | } |
793 | |
794 | int ext4_fs_put_inode_ref(struct ext4_inode_ref *ref) |
795 | { |
796 | /* Check if reference modified */ |
797 | if (ref->dirty) { |
798 | /* Mark block dirty for writing changes to physical device */ |
799 | ext4_fs_set_inode_checksum(inode_ref: ref); |
800 | ext4_trans_set_block_dirty(buf: ref->block.buf); |
801 | } |
802 | |
803 | /* Put back block, that contains i-node */ |
804 | return ext4_block_set(bdev: ref->fs->bdev, b: &ref->block); |
805 | } |
806 | |
807 | void ext4_fs_inode_blocks_init(struct ext4_fs *fs, |
808 | struct ext4_inode_ref *inode_ref) |
809 | { |
810 | struct ext4_inode *inode = inode_ref->inode; |
811 | |
812 | /* Reset blocks array. For inode which is not directory or file, just |
813 | * fill in blocks with 0 */ |
814 | switch (ext4_inode_type(sb: &fs->sb, inode: inode_ref->inode)) { |
815 | case EXT4_INODE_MODE_FILE: |
816 | case EXT4_INODE_MODE_DIRECTORY: |
817 | break; |
818 | default: |
819 | return; |
820 | } |
821 | |
822 | #if CONFIG_EXTENT_ENABLE && CONFIG_EXTENTS_ENABLE |
823 | /* Initialize extents if needed */ |
824 | if (ext4_sb_feature_incom(s: &fs->sb, EXT4_FINCOM_EXTENTS)) { |
825 | ext4_inode_set_flag(inode, EXT4_INODE_FLAG_EXTENTS); |
826 | |
827 | /* Initialize extent root header */ |
828 | ext4_extent_tree_init(inode_ref); |
829 | } |
830 | |
831 | inode_ref->dirty = true; |
832 | #endif |
833 | } |
834 | |
835 | uint32_t ext4_fs_correspond_inode_mode(int filetype) |
836 | { |
837 | switch (filetype) { |
838 | case EXT4_DE_DIR: |
839 | return EXT4_INODE_MODE_DIRECTORY; |
840 | case EXT4_DE_REG_FILE: |
841 | return EXT4_INODE_MODE_FILE; |
842 | case EXT4_DE_SYMLINK: |
843 | return EXT4_INODE_MODE_SOFTLINK; |
844 | case EXT4_DE_CHRDEV: |
845 | return EXT4_INODE_MODE_CHARDEV; |
846 | case EXT4_DE_BLKDEV: |
847 | return EXT4_INODE_MODE_BLOCKDEV; |
848 | case EXT4_DE_FIFO: |
849 | return EXT4_INODE_MODE_FIFO; |
850 | case EXT4_DE_SOCK: |
851 | return EXT4_INODE_MODE_SOCKET; |
852 | } |
853 | /* FIXME: unsupported filetype */ |
854 | return EXT4_INODE_MODE_FILE; |
855 | } |
856 | |
857 | int ext4_fs_alloc_inode(struct ext4_fs *fs, struct ext4_inode_ref *inode_ref, |
858 | int filetype) |
859 | { |
860 | /* Check if newly allocated i-node will be a directory */ |
861 | bool is_dir; |
862 | uint16_t inode_size = ext4_get16(&fs->sb, inode_size); |
863 | |
864 | is_dir = (filetype == EXT4_DE_DIR); |
865 | |
866 | /* Allocate inode by allocation algorithm */ |
867 | uint32_t index; |
868 | int rc = ext4_ialloc_alloc_inode(fs, index: &index, is_dir); |
869 | if (rc != EOK) |
870 | return rc; |
871 | |
872 | /* Load i-node from on-disk i-node table */ |
873 | rc = __ext4_fs_get_inode_ref(fs, index, ref: inode_ref, initialized: false); |
874 | if (rc != EOK) { |
875 | ext4_ialloc_free_inode(fs, index, is_dir); |
876 | return rc; |
877 | } |
878 | |
879 | /* Initialize i-node */ |
880 | struct ext4_inode *inode = inode_ref->inode; |
881 | |
882 | memset(dest: inode, c: 0, size: inode_size); |
883 | |
884 | uint32_t mode; |
885 | if (is_dir) { |
886 | /* |
887 | * Default directory permissions to be compatible with other |
888 | * systems |
889 | * 0777 (octal) == rwxrwxrwx |
890 | */ |
891 | |
892 | mode = 0777; |
893 | mode |= EXT4_INODE_MODE_DIRECTORY; |
894 | } else if (filetype == EXT4_DE_SYMLINK) { |
895 | /* |
896 | * Default symbolic link permissions to be compatible with other systems |
897 | * 0777 (octal) == rwxrwxrwx |
898 | */ |
899 | |
900 | mode = 0777; |
901 | mode |= EXT4_INODE_MODE_SOFTLINK; |
902 | } else { |
903 | /* |
904 | * Default file permissions to be compatible with other systems |
905 | * 0666 (octal) == rw-rw-rw- |
906 | */ |
907 | |
908 | mode = 0666; |
909 | mode |= ext4_fs_correspond_inode_mode(filetype); |
910 | } |
911 | ext4_inode_set_mode(sb: &fs->sb, inode, mode); |
912 | |
913 | ext4_inode_set_links_cnt(inode, cnt: 0); |
914 | ext4_inode_set_uid(inode, uid: 0); |
915 | ext4_inode_set_gid(inode, gid: 0); |
916 | ext4_inode_set_size(inode, size: 0); |
917 | ext4_inode_set_access_time(inode, time: 0); |
918 | ext4_inode_set_change_inode_time(inode, time: 0); |
919 | ext4_inode_set_modif_time(inode, time: 0); |
920 | ext4_inode_set_del_time(inode, time: 0); |
921 | ext4_inode_set_blocks_count(sb: &fs->sb, inode, cnt: 0); |
922 | ext4_inode_set_flags(inode, flags: 0); |
923 | ext4_inode_set_generation(inode, gen: 0); |
924 | if (inode_size > EXT4_GOOD_OLD_INODE_SIZE) { |
925 | uint16_t size = ext4_get16(&fs->sb, want_extra_isize); |
926 | ext4_inode_set_extra_isize(sb: &fs->sb, inode, size); |
927 | } |
928 | |
929 | memset(dest: inode->blocks, c: 0, size: sizeof(inode->blocks)); |
930 | inode_ref->dirty = true; |
931 | |
932 | return EOK; |
933 | } |
934 | |
935 | int ext4_fs_free_inode(struct ext4_inode_ref *inode_ref) |
936 | { |
937 | struct ext4_fs *fs = inode_ref->fs; |
938 | uint32_t offset; |
939 | uint32_t suboff; |
940 | int rc; |
941 | #if CONFIG_EXTENT_ENABLE && CONFIG_EXTENTS_ENABLE |
942 | /* For extents must be data block destroyed by other way */ |
943 | if ((ext4_sb_feature_incom(s: &fs->sb, EXT4_FINCOM_EXTENTS)) && |
944 | (ext4_inode_has_flag(inode: inode_ref->inode, EXT4_INODE_FLAG_EXTENTS))) { |
945 | /* Data structures are released during truncate operation... */ |
946 | goto finish; |
947 | } |
948 | #endif |
949 | /* Release all indirect (no data) blocks */ |
950 | |
951 | /* 1) Single indirect */ |
952 | ext4_fsblk_t fblock = ext4_inode_get_indirect_block(inode: inode_ref->inode, idx: 0); |
953 | if (fblock != 0) { |
954 | int rc = ext4_balloc_free_block(inode_ref, baddr: fblock); |
955 | if (rc != EOK) |
956 | return rc; |
957 | |
958 | ext4_inode_set_indirect_block(inode: inode_ref->inode, idx: 0, block: 0); |
959 | } |
960 | |
961 | uint32_t block_size = ext4_sb_get_block_size(s: &fs->sb); |
962 | uint32_t count = block_size / sizeof(uint32_t); |
963 | |
964 | struct ext4_block block; |
965 | |
966 | /* 2) Double indirect */ |
967 | fblock = ext4_inode_get_indirect_block(inode: inode_ref->inode, idx: 1); |
968 | if (fblock != 0) { |
969 | int rc = ext4_trans_block_get(bdev: fs->bdev, b: &block, lba: fblock); |
970 | if (rc != EOK) |
971 | return rc; |
972 | |
973 | ext4_fsblk_t ind_block; |
974 | for (offset = 0; offset < count; ++offset) { |
975 | ind_block = to_le32(((uint32_t *)block.data)[offset]); |
976 | |
977 | if (ind_block == 0) |
978 | continue; |
979 | rc = ext4_balloc_free_block(inode_ref, baddr: ind_block); |
980 | if (rc != EOK) { |
981 | ext4_block_set(bdev: fs->bdev, b: &block); |
982 | return rc; |
983 | } |
984 | |
985 | } |
986 | |
987 | ext4_block_set(bdev: fs->bdev, b: &block); |
988 | rc = ext4_balloc_free_block(inode_ref, baddr: fblock); |
989 | if (rc != EOK) |
990 | return rc; |
991 | |
992 | ext4_inode_set_indirect_block(inode: inode_ref->inode, idx: 1, block: 0); |
993 | } |
994 | |
995 | /* 3) Tripple indirect */ |
996 | struct ext4_block subblock; |
997 | fblock = ext4_inode_get_indirect_block(inode: inode_ref->inode, idx: 2); |
998 | if (fblock == 0) |
999 | goto finish; |
1000 | rc = ext4_trans_block_get(bdev: fs->bdev, b: &block, lba: fblock); |
1001 | if (rc != EOK) |
1002 | return rc; |
1003 | |
1004 | ext4_fsblk_t ind_block; |
1005 | for (offset = 0; offset < count; ++offset) { |
1006 | ind_block = to_le32(((uint32_t *)block.data)[offset]); |
1007 | |
1008 | if (ind_block == 0) |
1009 | continue; |
1010 | rc = ext4_trans_block_get(bdev: fs->bdev, b: &subblock, |
1011 | lba: ind_block); |
1012 | if (rc != EOK) { |
1013 | ext4_block_set(bdev: fs->bdev, b: &block); |
1014 | return rc; |
1015 | } |
1016 | |
1017 | ext4_fsblk_t ind_subblk; |
1018 | for (suboff = 0; suboff < count; ++suboff) { |
1019 | ind_subblk = to_le32(((uint32_t *)subblock.data)[suboff]); |
1020 | |
1021 | if (ind_subblk == 0) |
1022 | continue; |
1023 | rc = ext4_balloc_free_block(inode_ref, baddr: ind_subblk); |
1024 | if (rc != EOK) { |
1025 | ext4_block_set(bdev: fs->bdev, b: &subblock); |
1026 | ext4_block_set(bdev: fs->bdev, b: &block); |
1027 | return rc; |
1028 | } |
1029 | |
1030 | } |
1031 | |
1032 | ext4_block_set(bdev: fs->bdev, b: &subblock); |
1033 | |
1034 | rc = ext4_balloc_free_block(inode_ref, |
1035 | baddr: ind_block); |
1036 | if (rc != EOK) { |
1037 | ext4_block_set(bdev: fs->bdev, b: &block); |
1038 | return rc; |
1039 | } |
1040 | |
1041 | } |
1042 | |
1043 | ext4_block_set(bdev: fs->bdev, b: &block); |
1044 | rc = ext4_balloc_free_block(inode_ref, baddr: fblock); |
1045 | if (rc != EOK) |
1046 | return rc; |
1047 | |
1048 | ext4_inode_set_indirect_block(inode: inode_ref->inode, idx: 2, block: 0); |
1049 | finish: |
1050 | /* Mark inode dirty for writing to the physical device */ |
1051 | inode_ref->dirty = true; |
1052 | |
1053 | /* Free block with extended attributes if present */ |
1054 | ext4_fsblk_t xattr_block = |
1055 | ext4_inode_get_file_acl(inode: inode_ref->inode, sb: &fs->sb); |
1056 | if (xattr_block) { |
1057 | int rc = ext4_balloc_free_block(inode_ref, baddr: xattr_block); |
1058 | if (rc != EOK) |
1059 | return rc; |
1060 | |
1061 | ext4_inode_set_file_acl(inode: inode_ref->inode, sb: &fs->sb, acl: 0); |
1062 | } |
1063 | |
1064 | /* Free inode by allocator */ |
1065 | if (ext4_inode_is_type(sb: &fs->sb, inode: inode_ref->inode, |
1066 | EXT4_INODE_MODE_DIRECTORY)) |
1067 | rc = ext4_ialloc_free_inode(fs, index: inode_ref->index, is_dir: true); |
1068 | else |
1069 | rc = ext4_ialloc_free_inode(fs, index: inode_ref->index, is_dir: false); |
1070 | |
1071 | return rc; |
1072 | } |
1073 | |
1074 | |
1075 | /**@brief Release data block from i-node |
1076 | * @param inode_ref I-node to release block from |
1077 | * @param iblock Logical block to be released |
1078 | * @return Error code |
1079 | */ |
1080 | static int ext4_fs_release_inode_block(struct ext4_inode_ref *inode_ref, |
1081 | ext4_lblk_t iblock) |
1082 | { |
1083 | ext4_fsblk_t fblock; |
1084 | |
1085 | struct ext4_fs *fs = inode_ref->fs; |
1086 | |
1087 | /* Extents are handled otherwise = there is not support in this function |
1088 | */ |
1089 | ext4_assert(!( |
1090 | ext4_sb_feature_incom(&fs->sb, EXT4_FINCOM_EXTENTS) && |
1091 | (ext4_inode_has_flag(inode_ref->inode, EXT4_INODE_FLAG_EXTENTS)))); |
1092 | |
1093 | struct ext4_inode *inode = inode_ref->inode; |
1094 | |
1095 | /* Handle simple case when we are dealing with direct reference */ |
1096 | if (iblock < EXT4_INODE_DIRECT_BLOCK_COUNT) { |
1097 | fblock = ext4_inode_get_direct_block(inode, idx: iblock); |
1098 | |
1099 | /* Sparse file */ |
1100 | if (fblock == 0) |
1101 | return EOK; |
1102 | |
1103 | ext4_inode_set_direct_block(inode, idx: iblock, block: 0); |
1104 | return ext4_balloc_free_block(inode_ref, baddr: fblock); |
1105 | } |
1106 | |
1107 | /* Determine the indirection level needed to get the desired block */ |
1108 | unsigned int level = 0; |
1109 | unsigned int i; |
1110 | for (i = 1; i < 4; i++) { |
1111 | if (iblock < fs->inode_block_limits[i]) { |
1112 | level = i; |
1113 | break; |
1114 | } |
1115 | } |
1116 | |
1117 | if (level == 0) |
1118 | return EIO; |
1119 | |
1120 | /* Compute offsets for the topmost level */ |
1121 | uint32_t block_offset_in_level = |
1122 | (uint32_t)(iblock - fs->inode_block_limits[level - 1]); |
1123 | ext4_fsblk_t current_block = |
1124 | ext4_inode_get_indirect_block(inode, idx: level - 1); |
1125 | uint32_t offset_in_block = |
1126 | (uint32_t)(block_offset_in_level / fs->inode_blocks_per_level[level - 1]); |
1127 | |
1128 | /* |
1129 | * Navigate through other levels, until we find the block number |
1130 | * or find null reference meaning we are dealing with sparse file |
1131 | */ |
1132 | struct ext4_block block; |
1133 | |
1134 | while (level > 0) { |
1135 | |
1136 | /* Sparse check */ |
1137 | if (current_block == 0) |
1138 | return EOK; |
1139 | |
1140 | int rc = ext4_trans_block_get(bdev: fs->bdev, b: &block, lba: current_block); |
1141 | if (rc != EOK) |
1142 | return rc; |
1143 | |
1144 | current_block = |
1145 | to_le32(((uint32_t *)block.data)[offset_in_block]); |
1146 | |
1147 | /* Set zero if physical data block address found */ |
1148 | if (level == 1) { |
1149 | ((uint32_t *)block.data)[offset_in_block] = to_le32(0); |
1150 | ext4_trans_set_block_dirty(buf: block.buf); |
1151 | } |
1152 | |
1153 | rc = ext4_block_set(bdev: fs->bdev, b: &block); |
1154 | if (rc != EOK) |
1155 | return rc; |
1156 | |
1157 | level--; |
1158 | |
1159 | /* |
1160 | * If we are on the last level, break here as |
1161 | * there is no next level to visit |
1162 | */ |
1163 | if (level == 0) |
1164 | break; |
1165 | |
1166 | /* Visit the next level */ |
1167 | block_offset_in_level %= fs->inode_blocks_per_level[level]; |
1168 | offset_in_block = (uint32_t)(block_offset_in_level / |
1169 | fs->inode_blocks_per_level[level - 1]); |
1170 | } |
1171 | |
1172 | fblock = current_block; |
1173 | if (fblock == 0) |
1174 | return EOK; |
1175 | |
1176 | /* Physical block is not referenced, it can be released */ |
1177 | return ext4_balloc_free_block(inode_ref, baddr: fblock); |
1178 | } |
1179 | |
1180 | int ext4_fs_truncate_inode(struct ext4_inode_ref *inode_ref, uint64_t new_size) |
1181 | { |
1182 | struct ext4_sblock *sb = &inode_ref->fs->sb; |
1183 | uint32_t i; |
1184 | int r; |
1185 | bool v; |
1186 | |
1187 | /* Check flags, if i-node can be truncated */ |
1188 | if (!ext4_inode_can_truncate(sb, inode: inode_ref->inode)) |
1189 | return EINVAL; |
1190 | |
1191 | /* If sizes are equal, nothing has to be done. */ |
1192 | uint64_t old_size = ext4_inode_get_size(sb, inode: inode_ref->inode); |
1193 | if (old_size == new_size) |
1194 | return EOK; |
1195 | |
1196 | /* It's not supported to make the larger file by truncate operation */ |
1197 | if (old_size < new_size) |
1198 | return EINVAL; |
1199 | |
1200 | /* For symbolic link which is small enough */ |
1201 | v = ext4_inode_is_type(sb, inode: inode_ref->inode, EXT4_INODE_MODE_SOFTLINK); |
1202 | if (v && old_size < sizeof(inode_ref->inode->blocks) && |
1203 | !ext4_inode_get_blocks_count(sb, inode: inode_ref->inode)) { |
1204 | char *content = (char *)inode_ref->inode->blocks + new_size; |
1205 | memset(dest: content, c: 0, |
1206 | size: sizeof(inode_ref->inode->blocks) - (uint32_t)new_size); |
1207 | ext4_inode_set_size(inode: inode_ref->inode, size: new_size); |
1208 | inode_ref->dirty = true; |
1209 | |
1210 | return EOK; |
1211 | } |
1212 | |
1213 | i = ext4_inode_type(sb, inode: inode_ref->inode); |
1214 | if (i == EXT4_INODE_MODE_CHARDEV || |
1215 | i == EXT4_INODE_MODE_BLOCKDEV || |
1216 | i == EXT4_INODE_MODE_SOCKET) { |
1217 | inode_ref->inode->blocks[0] = 0; |
1218 | inode_ref->inode->blocks[1] = 0; |
1219 | |
1220 | inode_ref->dirty = true; |
1221 | return EOK; |
1222 | } |
1223 | |
1224 | /* Compute how many blocks will be released */ |
1225 | uint32_t block_size = ext4_sb_get_block_size(s: sb); |
1226 | uint32_t new_blocks_cnt = (uint32_t)((new_size + block_size - 1) / block_size); |
1227 | uint32_t old_blocks_cnt = (uint32_t)((old_size + block_size - 1) / block_size); |
1228 | uint32_t diff_blocks_cnt = old_blocks_cnt - new_blocks_cnt; |
1229 | #if CONFIG_EXTENT_ENABLE && CONFIG_EXTENTS_ENABLE |
1230 | if ((ext4_sb_feature_incom(s: sb, EXT4_FINCOM_EXTENTS)) && |
1231 | (ext4_inode_has_flag(inode: inode_ref->inode, EXT4_INODE_FLAG_EXTENTS))) { |
1232 | |
1233 | /* Extents require special operation */ |
1234 | if (diff_blocks_cnt) { |
1235 | r = ext4_extent_remove_space(inode_ref, from: new_blocks_cnt, |
1236 | EXT_MAX_BLOCKS); |
1237 | if (r != EOK) |
1238 | return r; |
1239 | |
1240 | } |
1241 | } else |
1242 | #endif |
1243 | { |
1244 | /* Release data blocks from the end of file */ |
1245 | |
1246 | /* Starting from 1 because of logical blocks are numbered from 0 |
1247 | */ |
1248 | for (i = 0; i < diff_blocks_cnt; ++i) { |
1249 | r = ext4_fs_release_inode_block(inode_ref, |
1250 | iblock: new_blocks_cnt + i); |
1251 | if (r != EOK) |
1252 | return r; |
1253 | } |
1254 | } |
1255 | |
1256 | /* Update i-node */ |
1257 | ext4_inode_set_size(inode: inode_ref->inode, size: new_size); |
1258 | inode_ref->dirty = true; |
1259 | |
1260 | return EOK; |
1261 | } |
1262 | |
1263 | /**@brief Compute 'goal' for inode index |
1264 | * @param inode_ref Reference to inode, to allocate block for |
1265 | * @return goal |
1266 | */ |
1267 | ext4_fsblk_t ext4_fs_inode_to_goal_block(struct ext4_inode_ref *inode_ref) |
1268 | { |
1269 | uint32_t grp_inodes = ext4_get32(&inode_ref->fs->sb, inodes_per_group); |
1270 | return (inode_ref->index - 1) / grp_inodes; |
1271 | } |
1272 | |
1273 | /**@brief Compute 'goal' for allocation algorithm (For blockmap). |
1274 | * @param inode_ref Reference to inode, to allocate block for |
1275 | * @return error code |
1276 | */ |
1277 | int ext4_fs_indirect_find_goal(struct ext4_inode_ref *inode_ref, |
1278 | ext4_fsblk_t *goal) |
1279 | { |
1280 | int r; |
1281 | struct ext4_sblock *sb = &inode_ref->fs->sb; |
1282 | *goal = 0; |
1283 | |
1284 | uint64_t inode_size = ext4_inode_get_size(sb, inode: inode_ref->inode); |
1285 | uint32_t block_size = ext4_sb_get_block_size(s: sb); |
1286 | uint32_t iblock_cnt = (uint32_t)(inode_size / block_size); |
1287 | |
1288 | if (inode_size % block_size != 0) |
1289 | iblock_cnt++; |
1290 | |
1291 | /* If inode has some blocks, get last block address + 1 */ |
1292 | if (iblock_cnt > 0) { |
1293 | r = ext4_fs_get_inode_dblk_idx(inode_ref, iblock: iblock_cnt - 1, |
1294 | fblock: goal, support_unwritten: false); |
1295 | if (r != EOK) |
1296 | return r; |
1297 | |
1298 | if (*goal != 0) { |
1299 | (*goal)++; |
1300 | return r; |
1301 | } |
1302 | |
1303 | /* If goal == 0, sparse file -> continue */ |
1304 | } |
1305 | |
1306 | /* Identify block group of inode */ |
1307 | |
1308 | uint32_t inodes_per_bg = ext4_get32(sb, inodes_per_group); |
1309 | uint32_t block_group = (inode_ref->index - 1) / inodes_per_bg; |
1310 | block_size = ext4_sb_get_block_size(s: sb); |
1311 | |
1312 | /* Load block group reference */ |
1313 | struct ext4_block_group_ref bg_ref; |
1314 | r = ext4_fs_get_block_group_ref(fs: inode_ref->fs, bgid: block_group, ref: &bg_ref); |
1315 | if (r != EOK) |
1316 | return r; |
1317 | |
1318 | struct ext4_bgroup *bg = bg_ref.block_group; |
1319 | |
1320 | /* Compute indexes */ |
1321 | uint32_t bg_count = ext4_block_group_cnt(s: sb); |
1322 | ext4_fsblk_t itab_first_block = ext4_bg_get_inode_table_first_block(bg, s: sb); |
1323 | uint16_t itab_item_size = ext4_get16(sb, inode_size); |
1324 | uint32_t itab_bytes; |
1325 | |
1326 | /* Check for last block group */ |
1327 | if (block_group < bg_count - 1) { |
1328 | itab_bytes = inodes_per_bg * itab_item_size; |
1329 | } else { |
1330 | /* Last block group could be smaller */ |
1331 | uint32_t inodes_cnt = ext4_get32(sb, inodes_count); |
1332 | |
1333 | itab_bytes = (inodes_cnt - ((bg_count - 1) * inodes_per_bg)); |
1334 | itab_bytes *= itab_item_size; |
1335 | } |
1336 | |
1337 | ext4_fsblk_t inode_table_blocks = itab_bytes / block_size; |
1338 | |
1339 | if (itab_bytes % block_size) |
1340 | inode_table_blocks++; |
1341 | |
1342 | *goal = itab_first_block + inode_table_blocks; |
1343 | |
1344 | return ext4_fs_put_block_group_ref(ref: &bg_ref); |
1345 | } |
1346 | |
1347 | static int ext4_fs_get_inode_dblk_idx_internal(struct ext4_inode_ref *inode_ref, |
1348 | ext4_lblk_t iblock, ext4_fsblk_t *fblock, |
1349 | bool extent_create, |
1350 | bool support_unwritten __unused) |
1351 | { |
1352 | struct ext4_fs *fs = inode_ref->fs; |
1353 | |
1354 | /* For empty file is situation simple */ |
1355 | if (ext4_inode_get_size(sb: &fs->sb, inode: inode_ref->inode) == 0) { |
1356 | *fblock = 0; |
1357 | return EOK; |
1358 | } |
1359 | |
1360 | ext4_fsblk_t current_block; |
1361 | |
1362 | (void)extent_create; |
1363 | #if CONFIG_EXTENT_ENABLE && CONFIG_EXTENTS_ENABLE |
1364 | /* Handle i-node using extents */ |
1365 | if ((ext4_sb_feature_incom(s: &fs->sb, EXT4_FINCOM_EXTENTS)) && |
1366 | (ext4_inode_has_flag(inode: inode_ref->inode, EXT4_INODE_FLAG_EXTENTS))) { |
1367 | |
1368 | ext4_fsblk_t current_fsblk; |
1369 | int rc = ext4_extent_get_blocks(inode_ref, iblock, max_blocks: 1, |
1370 | result: ¤t_fsblk, create: extent_create, NULL); |
1371 | if (rc != EOK) |
1372 | return rc; |
1373 | |
1374 | current_block = current_fsblk; |
1375 | *fblock = current_block; |
1376 | |
1377 | ext4_assert(*fblock || support_unwritten); |
1378 | return EOK; |
1379 | } |
1380 | #endif |
1381 | |
1382 | struct ext4_inode *inode = inode_ref->inode; |
1383 | |
1384 | /* Direct block are read directly from array in i-node structure */ |
1385 | if (iblock < EXT4_INODE_DIRECT_BLOCK_COUNT) { |
1386 | current_block = |
1387 | ext4_inode_get_direct_block(inode, idx: (uint32_t)iblock); |
1388 | *fblock = current_block; |
1389 | return EOK; |
1390 | } |
1391 | |
1392 | /* Determine indirection level of the target block */ |
1393 | unsigned int l = 0; |
1394 | unsigned int i; |
1395 | for (i = 1; i < 4; i++) { |
1396 | if (iblock < fs->inode_block_limits[i]) { |
1397 | l = i; |
1398 | break; |
1399 | } |
1400 | } |
1401 | |
1402 | if (l == 0) |
1403 | return EIO; |
1404 | |
1405 | /* Compute offsets for the topmost level */ |
1406 | uint32_t blk_off_in_lvl = (uint32_t)(iblock - fs->inode_block_limits[l - 1]); |
1407 | current_block = ext4_inode_get_indirect_block(inode, idx: l - 1); |
1408 | uint32_t off_in_blk = (uint32_t)(blk_off_in_lvl / fs->inode_blocks_per_level[l - 1]); |
1409 | |
1410 | /* Sparse file */ |
1411 | if (current_block == 0) { |
1412 | *fblock = 0; |
1413 | return EOK; |
1414 | } |
1415 | |
1416 | struct ext4_block block; |
1417 | |
1418 | /* |
1419 | * Navigate through other levels, until we find the block number |
1420 | * or find null reference meaning we are dealing with sparse file |
1421 | */ |
1422 | while (l > 0) { |
1423 | /* Load indirect block */ |
1424 | int rc = ext4_trans_block_get(bdev: fs->bdev, b: &block, lba: current_block); |
1425 | if (rc != EOK) |
1426 | return rc; |
1427 | |
1428 | /* Read block address from indirect block */ |
1429 | current_block = |
1430 | to_le32(((uint32_t *)block.data)[off_in_blk]); |
1431 | |
1432 | /* Put back indirect block untouched */ |
1433 | rc = ext4_block_set(bdev: fs->bdev, b: &block); |
1434 | if (rc != EOK) |
1435 | return rc; |
1436 | |
1437 | /* Check for sparse file */ |
1438 | if (current_block == 0) { |
1439 | *fblock = 0; |
1440 | return EOK; |
1441 | } |
1442 | |
1443 | /* Jump to the next level */ |
1444 | l--; |
1445 | |
1446 | /* Termination condition - we have address of data block loaded |
1447 | */ |
1448 | if (l == 0) |
1449 | break; |
1450 | |
1451 | /* Visit the next level */ |
1452 | blk_off_in_lvl %= fs->inode_blocks_per_level[l]; |
1453 | off_in_blk = (uint32_t)(blk_off_in_lvl / fs->inode_blocks_per_level[l - 1]); |
1454 | } |
1455 | |
1456 | *fblock = current_block; |
1457 | |
1458 | return EOK; |
1459 | } |
1460 | |
1461 | |
1462 | int ext4_fs_get_inode_dblk_idx(struct ext4_inode_ref *inode_ref, |
1463 | ext4_lblk_t iblock, ext4_fsblk_t *fblock, |
1464 | bool support_unwritten) |
1465 | { |
1466 | return ext4_fs_get_inode_dblk_idx_internal(inode_ref, iblock, fblock, |
1467 | extent_create: false, support_unwritten); |
1468 | } |
1469 | |
1470 | int ext4_fs_init_inode_dblk_idx(struct ext4_inode_ref *inode_ref, |
1471 | ext4_lblk_t iblock, ext4_fsblk_t *fblock) |
1472 | { |
1473 | return ext4_fs_get_inode_dblk_idx_internal(inode_ref, iblock, fblock, |
1474 | extent_create: true, support_unwritten: true); |
1475 | } |
1476 | |
1477 | static int ext4_fs_set_inode_data_block_index(struct ext4_inode_ref *inode_ref, |
1478 | ext4_lblk_t iblock, ext4_fsblk_t fblock) |
1479 | { |
1480 | struct ext4_fs *fs = inode_ref->fs; |
1481 | |
1482 | #if CONFIG_EXTENT_ENABLE && CONFIG_EXTENTS_ENABLE |
1483 | /* Handle inode using extents */ |
1484 | if ((ext4_sb_feature_incom(s: &fs->sb, EXT4_FINCOM_EXTENTS)) && |
1485 | (ext4_inode_has_flag(inode: inode_ref->inode, EXT4_INODE_FLAG_EXTENTS))) { |
1486 | /* Not reachable */ |
1487 | return ENOTSUP; |
1488 | } |
1489 | #endif |
1490 | |
1491 | /* Handle simple case when we are dealing with direct reference */ |
1492 | if (iblock < EXT4_INODE_DIRECT_BLOCK_COUNT) { |
1493 | ext4_inode_set_direct_block(inode: inode_ref->inode, idx: (uint32_t)iblock, |
1494 | block: (uint32_t)fblock); |
1495 | inode_ref->dirty = true; |
1496 | |
1497 | return EOK; |
1498 | } |
1499 | |
1500 | /* Determine the indirection level needed to get the desired block */ |
1501 | unsigned int l = 0; |
1502 | unsigned int i; |
1503 | for (i = 1; i < 4; i++) { |
1504 | if (iblock < fs->inode_block_limits[i]) { |
1505 | l = i; |
1506 | break; |
1507 | } |
1508 | } |
1509 | |
1510 | if (l == 0) |
1511 | return EIO; |
1512 | |
1513 | uint32_t block_size = ext4_sb_get_block_size(s: &fs->sb); |
1514 | |
1515 | /* Compute offsets for the topmost level */ |
1516 | uint32_t blk_off_in_lvl = (uint32_t)(iblock - fs->inode_block_limits[l - 1]); |
1517 | ext4_fsblk_t current_block = |
1518 | ext4_inode_get_indirect_block(inode: inode_ref->inode, idx: l - 1); |
1519 | uint32_t off_in_blk = (uint32_t)(blk_off_in_lvl / fs->inode_blocks_per_level[l - 1]); |
1520 | |
1521 | ext4_fsblk_t new_blk; |
1522 | |
1523 | struct ext4_block block; |
1524 | struct ext4_block new_block; |
1525 | |
1526 | /* Is needed to allocate indirect block on the i-node level */ |
1527 | if (current_block == 0) { |
1528 | /* Allocate new indirect block */ |
1529 | ext4_fsblk_t goal; |
1530 | int rc = ext4_fs_indirect_find_goal(inode_ref, goal: &goal); |
1531 | if (rc != EOK) |
1532 | return rc; |
1533 | |
1534 | rc = ext4_balloc_alloc_block(inode_ref, goal, baddr: &new_blk); |
1535 | if (rc != EOK) |
1536 | return rc; |
1537 | |
1538 | /* Update i-node */ |
1539 | ext4_inode_set_indirect_block(inode: inode_ref->inode, idx: l - 1, |
1540 | block: (uint32_t)new_blk); |
1541 | inode_ref->dirty = true; |
1542 | |
1543 | /* Load newly allocated block */ |
1544 | rc = ext4_trans_block_get_noread(bdev: fs->bdev, b: &new_block, lba: new_blk); |
1545 | if (rc != EOK) { |
1546 | ext4_balloc_free_block(inode_ref, baddr: new_blk); |
1547 | return rc; |
1548 | } |
1549 | |
1550 | /* Initialize new block */ |
1551 | memset(dest: new_block.data, c: 0, size: block_size); |
1552 | ext4_trans_set_block_dirty(buf: new_block.buf); |
1553 | |
1554 | /* Put back the allocated block */ |
1555 | rc = ext4_block_set(bdev: fs->bdev, b: &new_block); |
1556 | if (rc != EOK) |
1557 | return rc; |
1558 | |
1559 | current_block = new_blk; |
1560 | } |
1561 | |
1562 | /* |
1563 | * Navigate through other levels, until we find the block number |
1564 | * or find null reference meaning we are dealing with sparse file |
1565 | */ |
1566 | while (l > 0) { |
1567 | int rc = ext4_trans_block_get(bdev: fs->bdev, b: &block, lba: current_block); |
1568 | if (rc != EOK) |
1569 | return rc; |
1570 | |
1571 | current_block = to_le32(((uint32_t *)block.data)[off_in_blk]); |
1572 | if ((l > 1) && (current_block == 0)) { |
1573 | ext4_fsblk_t goal; |
1574 | rc = ext4_fs_indirect_find_goal(inode_ref, goal: &goal); |
1575 | if (rc != EOK) { |
1576 | ext4_block_set(bdev: fs->bdev, b: &block); |
1577 | return rc; |
1578 | } |
1579 | |
1580 | /* Allocate new block */ |
1581 | rc = |
1582 | ext4_balloc_alloc_block(inode_ref, goal, baddr: &new_blk); |
1583 | if (rc != EOK) { |
1584 | ext4_block_set(bdev: fs->bdev, b: &block); |
1585 | return rc; |
1586 | } |
1587 | |
1588 | /* Load newly allocated block */ |
1589 | rc = ext4_trans_block_get_noread(bdev: fs->bdev, b: &new_block, |
1590 | lba: new_blk); |
1591 | |
1592 | if (rc != EOK) { |
1593 | ext4_block_set(bdev: fs->bdev, b: &block); |
1594 | return rc; |
1595 | } |
1596 | |
1597 | /* Initialize allocated block */ |
1598 | memset(dest: new_block.data, c: 0, size: block_size); |
1599 | ext4_trans_set_block_dirty(buf: new_block.buf); |
1600 | |
1601 | rc = ext4_block_set(bdev: fs->bdev, b: &new_block); |
1602 | if (rc != EOK) { |
1603 | ext4_block_set(bdev: fs->bdev, b: &block); |
1604 | return rc; |
1605 | } |
1606 | |
1607 | /* Write block address to the parent */ |
1608 | uint32_t * p = (uint32_t * )block.data; |
1609 | p[off_in_blk] = to_le32((uint32_t)new_blk); |
1610 | ext4_trans_set_block_dirty(buf: block.buf); |
1611 | current_block = new_blk; |
1612 | } |
1613 | |
1614 | /* Will be finished, write the fblock address */ |
1615 | if (l == 1) { |
1616 | uint32_t * p = (uint32_t * )block.data; |
1617 | p[off_in_blk] = to_le32((uint32_t)fblock); |
1618 | ext4_trans_set_block_dirty(buf: block.buf); |
1619 | } |
1620 | |
1621 | rc = ext4_block_set(bdev: fs->bdev, b: &block); |
1622 | if (rc != EOK) |
1623 | return rc; |
1624 | |
1625 | l--; |
1626 | |
1627 | /* |
1628 | * If we are on the last level, break here as |
1629 | * there is no next level to visit |
1630 | */ |
1631 | if (l == 0) |
1632 | break; |
1633 | |
1634 | /* Visit the next level */ |
1635 | blk_off_in_lvl %= fs->inode_blocks_per_level[l]; |
1636 | off_in_blk = (uint32_t)(blk_off_in_lvl / fs->inode_blocks_per_level[l - 1]); |
1637 | } |
1638 | |
1639 | return EOK; |
1640 | } |
1641 | |
1642 | |
1643 | int ext4_fs_append_inode_dblk(struct ext4_inode_ref *inode_ref, |
1644 | ext4_fsblk_t *fblock, ext4_lblk_t *iblock) |
1645 | { |
1646 | #if CONFIG_EXTENT_ENABLE && CONFIG_EXTENTS_ENABLE |
1647 | /* Handle extents separately */ |
1648 | if ((ext4_sb_feature_incom(s: &inode_ref->fs->sb, EXT4_FINCOM_EXTENTS)) && |
1649 | (ext4_inode_has_flag(inode: inode_ref->inode, EXT4_INODE_FLAG_EXTENTS))) { |
1650 | int rc; |
1651 | ext4_fsblk_t current_fsblk; |
1652 | struct ext4_sblock *sb = &inode_ref->fs->sb; |
1653 | uint64_t inode_size = ext4_inode_get_size(sb, inode: inode_ref->inode); |
1654 | uint32_t block_size = ext4_sb_get_block_size(s: sb); |
1655 | *iblock = (uint32_t)((inode_size + block_size - 1) / block_size); |
1656 | |
1657 | rc = ext4_extent_get_blocks(inode_ref, iblock: *iblock, max_blocks: 1, |
1658 | result: ¤t_fsblk, create: true, NULL); |
1659 | if (rc != EOK) |
1660 | return rc; |
1661 | |
1662 | *fblock = current_fsblk; |
1663 | ext4_assert(*fblock); |
1664 | |
1665 | ext4_inode_set_size(inode: inode_ref->inode, size: inode_size + block_size); |
1666 | inode_ref->dirty = true; |
1667 | |
1668 | |
1669 | return rc; |
1670 | } |
1671 | #endif |
1672 | struct ext4_sblock *sb = &inode_ref->fs->sb; |
1673 | |
1674 | /* Compute next block index and allocate data block */ |
1675 | uint64_t inode_size = ext4_inode_get_size(sb, inode: inode_ref->inode); |
1676 | uint32_t block_size = ext4_sb_get_block_size(s: sb); |
1677 | |
1678 | /* Align size i-node size */ |
1679 | if ((inode_size % block_size) != 0) |
1680 | inode_size += block_size - (inode_size % block_size); |
1681 | |
1682 | /* Logical blocks are numbered from 0 */ |
1683 | uint32_t new_block_idx = (uint32_t)(inode_size / block_size); |
1684 | |
1685 | /* Allocate new physical block */ |
1686 | ext4_fsblk_t goal, phys_block; |
1687 | int rc = ext4_fs_indirect_find_goal(inode_ref, goal: &goal); |
1688 | if (rc != EOK) |
1689 | return rc; |
1690 | |
1691 | rc = ext4_balloc_alloc_block(inode_ref, goal, baddr: &phys_block); |
1692 | if (rc != EOK) |
1693 | return rc; |
1694 | |
1695 | /* Add physical block address to the i-node */ |
1696 | rc = ext4_fs_set_inode_data_block_index(inode_ref, iblock: new_block_idx, |
1697 | fblock: phys_block); |
1698 | if (rc != EOK) { |
1699 | ext4_balloc_free_block(inode_ref, baddr: phys_block); |
1700 | return rc; |
1701 | } |
1702 | |
1703 | /* Update i-node */ |
1704 | ext4_inode_set_size(inode: inode_ref->inode, size: inode_size + block_size); |
1705 | inode_ref->dirty = true; |
1706 | |
1707 | *fblock = phys_block; |
1708 | *iblock = new_block_idx; |
1709 | |
1710 | return EOK; |
1711 | } |
1712 | |
1713 | void ext4_fs_inode_links_count_inc(struct ext4_inode_ref *inode_ref) |
1714 | { |
1715 | uint16_t link; |
1716 | bool is_dx; |
1717 | link = ext4_inode_get_links_cnt(inode: inode_ref->inode); |
1718 | link++; |
1719 | ext4_inode_set_links_cnt(inode: inode_ref->inode, cnt: link); |
1720 | |
1721 | is_dx = ext4_sb_feature_com(s: &inode_ref->fs->sb, EXT4_FCOM_DIR_INDEX) && |
1722 | ext4_inode_has_flag(inode: inode_ref->inode, EXT4_INODE_FLAG_INDEX); |
1723 | |
1724 | if (is_dx && link > 1) { |
1725 | if (link >= EXT4_LINK_MAX || link == 2) { |
1726 | ext4_inode_set_links_cnt(inode: inode_ref->inode, cnt: 1); |
1727 | |
1728 | uint32_t v; |
1729 | v = ext4_get32(&inode_ref->fs->sb, features_read_only); |
1730 | v |= EXT4_FRO_COM_DIR_NLINK; |
1731 | ext4_set32(&inode_ref->fs->sb, features_read_only, v); |
1732 | } |
1733 | } |
1734 | } |
1735 | |
1736 | void ext4_fs_inode_links_count_dec(struct ext4_inode_ref *inode_ref) |
1737 | { |
1738 | uint16_t links = ext4_inode_get_links_cnt(inode: inode_ref->inode); |
1739 | if (!ext4_inode_is_type(sb: &inode_ref->fs->sb, inode: inode_ref->inode, |
1740 | EXT4_INODE_MODE_DIRECTORY)) { |
1741 | if (links > 0) |
1742 | ext4_inode_set_links_cnt(inode: inode_ref->inode, cnt: links - 1); |
1743 | return; |
1744 | } |
1745 | |
1746 | if (links > 2) |
1747 | ext4_inode_set_links_cnt(inode: inode_ref->inode, cnt: links - 1); |
1748 | } |
1749 | |
1750 | /** |
1751 | * @} |
1752 | */ |
1753 | |