Issue #133: support repack ext4 system_dlkm etc.

Done:
  "unpack" and "pack" tasks are supported for sparse/raw ext4 images
TODO:
  sparse/raw erofs images are not supported yet
pull/140/head v14_r1
cfig 1 year ago
parent 08f1d3b548
commit 05b4b4cab0
No known key found for this signature in database
GPG Key ID: B104C307F0FDABB7

@ -7,7 +7,7 @@ A tool for reverse engineering Android ROM images.
## Requirements
Make sure you have [JDK11+](https://www.oracle.com/java/technologies/downloads/#java17) and [Python3](https://www.python.org/downloads/).
* Linux / WSL: `sudo apt install git device-tree-compiler lz4 xz-utils zlib1g-dev openjdk-17-jdk gcc g++ python3 python-is-python3 p7zip-full android-sdk-libsparse-utils`
* Linux / WSL: `sudo apt install git device-tree-compiler lz4 xz-utils zlib1g-dev openjdk-17-jdk gcc g++ python3 python-is-python3 p7zip-full android-sdk-libsparse-utils erofs-utils`
* Mac: `brew install lz4 xz dtc`
@ -275,6 +275,46 @@ Then flash vbmeta.img.signed to your device.
</details>
<details>
<summary>work with payload.bin</summary>
- extract everything
Usage:
```
gradle unpack
```
- extract only 1 specified partition
Usage:
```
gradle unpack -Dpart=<part_name>
```
Example:
```
gradle unpack -Dpart=boot
gradle unpack -Dpart=system
```
Note:
"build/payload/" will be deleted before each "unpack" task
</details>
<details>
<summary>work with apex images</summary>
AOSP already has tools like apexer, deapexer, sign_apex.py, these should suffice the needs on .apex and .capex.
Refer to Issue https://github.com/cfig/Android_boot_image_editor/issues/120
- For those who may be interested in apex generation flow, there is a graph here
![image](doc/apexer_generate_flow.png)
</details>
## boot.img layout
Read [boot layout](doc/layout.md) of Android boot.img and vendor\_boot.img.
Read [misc layout](doc/misc_image_layout.md) of misc\.img

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

@ -0,0 +1 @@
../../build/soong/scripts/system-clang-format-2

@ -0,0 +1 @@
../../build/soong/scripts/system-clang-format

@ -0,0 +1,7 @@
# This clang-format configuration may be included in subdirectories to disable
# any warning.
DisableFormat: true
# This extra settings is required because of https://reviews.llvm.org/D67843.
SortIncludes: false

@ -0,0 +1,53 @@
[defaults]
base_features = sparse_super,large_file,filetype,dir_index,ext_attr
default_mntopts = acl,user_xattr
enable_periodic_fsck = 0
blocksize = 4096
inode_size = 256
inode_ratio = 16384
reserved_ratio = 1.0
[fs_types]
ext3 = {
features = has_journal
}
ext4 = {
features = has_journal,extent,huge_file,dir_nlink,extra_isize,uninit_bg
inode_size = 256
}
ext4dev = {
features = has_journal,extent,huge_file,flex_bg,inline_data,64bit,dir_nlink,extra_isize
inode_size = 256
options = test_fs=1
}
small = {
blocksize = 1024
inode_size = 128
inode_ratio = 4096
}
floppy = {
blocksize = 1024
inode_size = 128
inode_ratio = 8192
}
big = {
inode_ratio = 32768
}
huge = {
inode_ratio = 65536
}
news = {
inode_ratio = 4096
}
largefile = {
inode_ratio = 1048576
blocksize = -1
}
largefile4 = {
inode_ratio = 4194304
blocksize = -1
}
hurd = {
blocksize = 4096
inode_size = 128
}

@ -0,0 +1,265 @@
#!/usr/bin/env python3
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import pkgutil
import subprocess
import sys
import tempfile
def RunCommand(cmd, env):
"""Runs the given command.
Args:
cmd: the command represented as a list of strings.
env: a dictionary of additional environment variables.
Returns:
A tuple of the output and the exit code.
"""
env_copy = os.environ.copy()
env_copy.update(env)
cmd[0] = FindProgram(cmd[0])
logging.info("Env: %s", env)
logging.info("Running: " + " ".join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env_copy, text=True)
output, _ = p.communicate()
return output, p.returncode
def FindProgram(prog_name):
"""Finds the path to prog_name.
Args:
prog_name: the program name to find.
Returns:
path to the progName if found. The program is searched in the same directory
where this script is located at. If not found, progName is returned.
"""
exec_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
prog_path = os.path.join(exec_dir, prog_name)
if os.path.exists(prog_path):
return prog_path
else:
return prog_name
def ParseArguments(argv):
"""Parses the input arguments to the program."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("src_dir", help="The source directory for user image.")
parser.add_argument("output_file", help="The path of the output image file.")
parser.add_argument("ext_variant", choices=["ext2", "ext4"],
help="Variant of the extended filesystem.")
parser.add_argument("mount_point", help="The mount point for user image.")
parser.add_argument("fs_size", help="Size of the file system.")
parser.add_argument("file_contexts", nargs='?',
help="The selinux file context.")
parser.add_argument("--android_sparse", "-s", action="store_true",
help="Outputs an android sparse image (mke2fs).")
parser.add_argument("--journal_size", "-j",
help="Journal size (mke2fs).")
parser.add_argument("--timestamp", "-T",
help="Fake timetamp for the output image.")
parser.add_argument("--fs_config", "-C",
help="Path to the fs config file (e2fsdroid).")
parser.add_argument("--product_out", "-D",
help="Path to the directory with device specific fs"
" config files (e2fsdroid).")
parser.add_argument("--block_list_file", "-B",
help="Path to the block list file (e2fsdroid).")
parser.add_argument("--base_alloc_file_in", "-d",
help="Path to the input base fs file (e2fsdroid).")
parser.add_argument("--base_alloc_file_out", "-A",
help="Path to the output base fs file (e2fsdroid).")
parser.add_argument("--label", "-L",
help="The mount point (mke2fs).")
parser.add_argument("--inodes", "-i",
help="The extfs inodes count (mke2fs).")
parser.add_argument("--inode_size", "-I",
help="The extfs inode size (mke2fs).")
parser.add_argument("--reserved_percent", "-M",
help="The reserved blocks percentage (mke2fs).")
parser.add_argument("--flash_erase_block_size", "-e",
help="The flash erase block size (mke2fs).")
parser.add_argument("--flash_logical_block_size", "-o",
help="The flash logical block size (mke2fs).")
parser.add_argument("--mke2fs_uuid", "-U",
help="The mke2fs uuid (mke2fs) .")
parser.add_argument("--mke2fs_hash_seed", "-S",
help="The mke2fs hash seed (mke2fs).")
parser.add_argument("--share_dup_blocks", "-c", action="store_true",
help="ext4 share dup blocks (e2fsdroid).")
args, remainder = parser.parse_known_args(argv)
# The current argparse doesn't handle intermixed arguments well. Checks
# manually whether the file_contexts exists as the last argument.
# TODO(xunchang) use parse_intermixed_args() when we switch to python 3.7.
if len(remainder) == 1 and remainder[0] == argv[-1]:
args.file_contexts = remainder[0]
elif remainder:
parser.print_usage()
sys.exit(1)
return args
def ConstructE2fsCommands(args):
"""Builds the mke2fs & e2fsdroid command based on the input arguments.
Args:
args: The result of ArgumentParser after parsing the command line arguments.
Returns:
A tuple of two lists that serve as the command for mke2fs and e2fsdroid.
"""
BLOCKSIZE = 4096
e2fsdroid_opts = []
mke2fs_extended_opts = []
mke2fs_opts = []
if args.android_sparse:
mke2fs_extended_opts.append("android_sparse")
else:
e2fsdroid_opts.append("-e")
if args.timestamp:
e2fsdroid_opts += ["-T", args.timestamp]
if args.fs_config:
e2fsdroid_opts += ["-C", args.fs_config]
if args.product_out:
e2fsdroid_opts += ["-p", args.product_out]
if args.block_list_file:
e2fsdroid_opts += ["-B", args.block_list_file]
if args.base_alloc_file_in:
e2fsdroid_opts += ["-d", args.base_alloc_file_in]
if args.base_alloc_file_out:
e2fsdroid_opts += ["-D", args.base_alloc_file_out]
if args.share_dup_blocks:
e2fsdroid_opts.append("-s")
if args.file_contexts:
e2fsdroid_opts += ["-S", args.file_contexts]
if args.flash_erase_block_size:
mke2fs_extended_opts.append("stripe_width={}".format(
int(args.flash_erase_block_size) // BLOCKSIZE))
if args.flash_logical_block_size:
# stride should be the max of 8kb and the logical block size
stride = max(int(args.flash_logical_block_size), 8192)
mke2fs_extended_opts.append("stride={}".format(stride // BLOCKSIZE))
if args.mke2fs_hash_seed:
mke2fs_extended_opts.append("hash_seed=" + args.mke2fs_hash_seed)
if args.journal_size:
if args.journal_size == "0":
mke2fs_opts += ["-O", "^has_journal"]
else:
mke2fs_opts += ["-J", "size=" + args.journal_size]
if args.label:
mke2fs_opts += ["-L", args.label]
if args.inodes:
mke2fs_opts += ["-N", args.inodes]
if args.inode_size:
mke2fs_opts += ["-I", args.inode_size]
if args.mount_point:
mke2fs_opts += ["-M", args.mount_point]
if args.reserved_percent:
mke2fs_opts += ["-m", args.reserved_percent]
if args.mke2fs_uuid:
mke2fs_opts += ["-U", args.mke2fs_uuid]
if mke2fs_extended_opts:
mke2fs_opts += ["-E", ','.join(mke2fs_extended_opts)]
# Round down the filesystem length to be a multiple of the block size
blocks = int(args.fs_size) // BLOCKSIZE
mke2fs_cmd = (["mke2fs"] + mke2fs_opts +
["-t", args.ext_variant, "-b", str(BLOCKSIZE), args.output_file,
str(blocks)])
e2fsdroid_cmd = (["e2fsdroid"] + e2fsdroid_opts +
["-f", args.src_dir, "-a", args.mount_point,
args.output_file])
return mke2fs_cmd, e2fsdroid_cmd
def main(argv):
logging_format = '%(asctime)s %(filename)s %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=logging_format,
datefmt='%H:%M:%S')
args = ParseArguments(argv)
if not os.path.isdir(args.src_dir):
logging.error("Can not find directory %s", args.src_dir)
sys.exit(2)
if not args.mount_point:
logging.error("Mount point is required")
sys.exit(2)
if args.mount_point[0] != '/':
args.mount_point = '/' + args.mount_point
if not args.fs_size:
logging.error("Size of the filesystem is required")
sys.exit(2)
mke2fs_cmd, e2fsdroid_cmd = ConstructE2fsCommands(args)
# truncate output file since mke2fs will keep verity section in existing file
with open(args.output_file, 'w') as output:
output.truncate()
# run mke2fs
with tempfile.NamedTemporaryFile() as conf_file:
conf_data = pkgutil.get_data('mkuserimg_mke2fs', 'mke2fs.conf')
conf_file.write(conf_data)
conf_file.flush()
mke2fs_env = {"MKE2FS_CONFIG" : conf_file.name}
if args.timestamp:
mke2fs_env["E2FSPROGS_FAKE_TIME"] = args.timestamp
output, ret = RunCommand(mke2fs_cmd, mke2fs_env)
print(output)
if ret != 0:
logging.error("Failed to run mke2fs: " + output)
sys.exit(4)
# run e2fsdroid
e2fsdroid_env = {}
if args.timestamp:
e2fsdroid_env["E2FSPROGS_FAKE_TIME"] = args.timestamp
output, ret = RunCommand(e2fsdroid_cmd, e2fsdroid_env)
# The build script is parsing the raw output of e2fsdroid; keep the pattern
# unchanged for now.
print(output)
if ret != 0:
logging.error("Failed to run e2fsdroid_cmd: " + output)
os.remove(args.output_file)
sys.exit(4)
if __name__ == '__main__':
main(sys.argv[1:])

@ -0,0 +1,63 @@
// Copyright 2017 The Android Open Source Project
package {
default_applicable_licenses: ["system_extras_f2fs_utils_license"],
}
// Added automatically by a large-scale-change
// See: http://go/android-license-faq
license {
name: "system_extras_f2fs_utils_license",
visibility: [":__subpackages__"],
license_kinds: [
"SPDX-license-identifier-Apache-2.0",
],
license_text: [
"NOTICE",
],
}
cc_library_shared {
name: "libf2fs_sparseblock",
cflags: ["-Werror"],
srcs: ["f2fs_sparseblock.c"],
shared_libs: [
"liblog",
"libcutils",
],
include_dirs: [
"external/f2fs-tools/include",
"bionic/libc",
],
export_include_dirs: ["."],
}
cc_binary {
name: "f2fs_sparseblock",
cflags: ["-Werror"],
srcs: ["f2fs_sparseblock.c"],
shared_libs: [
"liblog",
"libcutils",
],
include_dirs: [
"external/f2fs-tools/include",
"bionic/libc",
],
}
sh_binary_host {
name: "mkf2fsuserimg",
src: "mkf2fsuserimg.sh",
required: [
"make_f2fs",
"sload_f2fs",
],
}

@ -0,0 +1,190 @@
Copyright (c) 2010, The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

@ -0,0 +1,3 @@
jaegeuk@google.com
daehojeong@google.com
drosen@google.com

@ -0,0 +1,610 @@
#define _LARGEFILE64_SOURCE
#define LOG_TAG "f2fs_sparseblock"
#include "f2fs_sparseblock.h"
#include <errno.h>
#include <f2fs_fs.h>
#include <fcntl.h>
#include <linux/types.h>
#include <malloc.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <log/log.h>
#define D_DISP_u32(ptr, member) \
do { \
SLOGV("%-30s" \
"\t\t[0x%#08x : %u]\n", \
#member, le32_to_cpu((ptr)->member), le32_to_cpu((ptr)->member)); \
} while (0);
#define D_DISP_u64(ptr, member) \
do { \
SLOGV("%-30s" \
"\t\t[0x%#016" PRIx64 " : %" PRIu64 "]\n", \
#member, le64_to_cpu((ptr)->member), le64_to_cpu((ptr)->member)); \
} while (0);
#define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
#define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
static void dbg_print_raw_sb_info(struct f2fs_super_block* sb) {
SLOGV("\n");
SLOGV("+--------------------------------------------------------+\n");
SLOGV("| Super block |\n");
SLOGV("+--------------------------------------------------------+\n");
D_DISP_u32(sb, magic);
D_DISP_u32(sb, major_ver);
D_DISP_u32(sb, minor_ver);
D_DISP_u32(sb, log_sectorsize);
D_DISP_u32(sb, log_sectors_per_block);
D_DISP_u32(sb, log_blocksize);
D_DISP_u32(sb, log_blocks_per_seg);
D_DISP_u32(sb, segs_per_sec);
D_DISP_u32(sb, secs_per_zone);
D_DISP_u32(sb, checksum_offset);
D_DISP_u64(sb, block_count);
D_DISP_u32(sb, section_count);
D_DISP_u32(sb, segment_count);
D_DISP_u32(sb, segment_count_ckpt);
D_DISP_u32(sb, segment_count_sit);
D_DISP_u32(sb, segment_count_nat);
D_DISP_u32(sb, segment_count_ssa);
D_DISP_u32(sb, segment_count_main);
D_DISP_u32(sb, segment0_blkaddr);
D_DISP_u32(sb, cp_blkaddr);
D_DISP_u32(sb, sit_blkaddr);
D_DISP_u32(sb, nat_blkaddr);
D_DISP_u32(sb, ssa_blkaddr);
D_DISP_u32(sb, main_blkaddr);
D_DISP_u32(sb, root_ino);
D_DISP_u32(sb, node_ino);
D_DISP_u32(sb, meta_ino);
D_DISP_u32(sb, cp_payload);
SLOGV("\n");
}
static void dbg_print_raw_ckpt_struct(struct f2fs_checkpoint* cp) {
SLOGV("\n");
SLOGV("+--------------------------------------------------------+\n");
SLOGV("| Checkpoint |\n");
SLOGV("+--------------------------------------------------------+\n");
D_DISP_u64(cp, checkpoint_ver);
D_DISP_u64(cp, user_block_count);
D_DISP_u64(cp, valid_block_count);
D_DISP_u32(cp, rsvd_segment_count);
D_DISP_u32(cp, overprov_segment_count);
D_DISP_u32(cp, free_segment_count);
D_DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
D_DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
D_DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
D_DISP_u32(cp, cur_node_segno[0]);
D_DISP_u32(cp, cur_node_segno[1]);
D_DISP_u32(cp, cur_node_segno[2]);
D_DISP_u32(cp, cur_node_blkoff[0]);
D_DISP_u32(cp, cur_node_blkoff[1]);
D_DISP_u32(cp, cur_node_blkoff[2]);
D_DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
D_DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
D_DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
D_DISP_u32(cp, cur_data_segno[0]);
D_DISP_u32(cp, cur_data_segno[1]);
D_DISP_u32(cp, cur_data_segno[2]);
D_DISP_u32(cp, cur_data_blkoff[0]);
D_DISP_u32(cp, cur_data_blkoff[1]);
D_DISP_u32(cp, cur_data_blkoff[2]);
D_DISP_u32(cp, ckpt_flags);
D_DISP_u32(cp, cp_pack_total_block_count);
D_DISP_u32(cp, cp_pack_start_sum);
D_DISP_u32(cp, valid_node_count);
D_DISP_u32(cp, valid_inode_count);
D_DISP_u32(cp, next_free_nid);
D_DISP_u32(cp, sit_ver_bitmap_bytesize);
D_DISP_u32(cp, nat_ver_bitmap_bytesize);
D_DISP_u32(cp, checksum_offset);
D_DISP_u64(cp, elapsed_time);
D_DISP_u32(cp, sit_nat_version_bitmap[0]);
SLOGV("\n\n");
}
static void dbg_print_info_struct(struct f2fs_info* info) {
SLOGV("\n");
SLOGV("+--------------------------------------------------------+\n");
SLOGV("| F2FS_INFO |\n");
SLOGV("+--------------------------------------------------------+\n");
SLOGV("blocks_per_segment: %" PRIu64, info->blocks_per_segment);
SLOGV("block_size: %d", info->block_size);
SLOGV("sit_bmp loc: %p", info->sit_bmp);
SLOGV("sit_bmp_size: %d", info->sit_bmp_size);
SLOGV("blocks_per_sit: %" PRIu64, info->blocks_per_sit);
SLOGV("sit_blocks loc: %p", info->sit_blocks);
SLOGV("sit_sums loc: %p", info->sit_sums);
SLOGV("sit_sums num: %d", le16_to_cpu(info->sit_sums->journal.n_sits));
unsigned int i;
for (i = 0; i < (le16_to_cpu(info->sit_sums->journal.n_sits)); i++) {
SLOGV("entry %d in journal entries is for segment %d", i,
le32_to_cpu(segno_in_journal(&info->sit_sums->journal, i)));
}
SLOGV("cp_blkaddr: %" PRIu64, info->cp_blkaddr);
SLOGV("cp_valid_cp_blkaddr: %" PRIu64, info->cp_valid_cp_blkaddr);
SLOGV("sit_blkaddr: %" PRIu64, info->sit_blkaddr);
SLOGV("nat_blkaddr: %" PRIu64, info->nat_blkaddr);
SLOGV("ssa_blkaddr: %" PRIu64, info->ssa_blkaddr);
SLOGV("main_blkaddr: %" PRIu64, info->main_blkaddr);
SLOGV("total_user_used: %" PRIu64, info->total_user_used);
SLOGV("total_blocks: %" PRIu64, info->total_blocks);
SLOGV("\n\n");
}
/* read blocks */
static int read_structure(int fd, unsigned long long start, void* buf, ssize_t len) {
off64_t ret;
ret = lseek64(fd, start, SEEK_SET);
if (ret < 0) {
SLOGE("failed to seek\n");
return ret;
}
ret = read(fd, buf, len);
if (ret < 0) {
SLOGE("failed to read\n");
return ret;
}
if (ret != len) {
SLOGE("failed to read all\n");
return -1;
}
return 0;
}
static int read_structure_blk(int fd, unsigned long long start_blk, void* buf, size_t len) {
return read_structure(fd, F2FS_BLKSIZE * start_blk, buf, F2FS_BLKSIZE * len);
}
static int read_f2fs_sb(int fd, struct f2fs_super_block* sb) {
int rc;
rc = read_structure(fd, F2FS_SUPER_OFFSET, sb, sizeof(*sb));
if (le32_to_cpu(sb->magic) != F2FS_SUPER_MAGIC) {
SLOGE("Not a valid F2FS super block. Magic:%#08x != %#08x", le32_to_cpu(sb->magic),
F2FS_SUPER_MAGIC);
return -1;
}
return 0;
}
unsigned int get_f2fs_filesystem_size_sec(char* dev) {
int fd;
if ((fd = open(dev, O_RDONLY)) < 0) {
SLOGE("Cannot open device to get filesystem size ");
return 0;
}
struct f2fs_super_block sb;
if (read_f2fs_sb(fd, &sb)) return 0;
return (unsigned int)(le64_to_cpu(sb.block_count) * F2FS_BLKSIZE / DEFAULT_SECTOR_SIZE);
}
static struct f2fs_checkpoint* validate_checkpoint(block_t cp_addr, unsigned long long* version,
int fd) {
unsigned char *cp_block_1, *cp_block_2;
struct f2fs_checkpoint* cp_block;
uint64_t cp1_version = 0, cp2_version = 0;
cp_block_1 = malloc(F2FS_BLKSIZE);
if (!cp_block_1) return NULL;
/* Read the 1st cp block in this CP pack */
if (read_structure_blk(fd, cp_addr, cp_block_1, 1)) goto invalid_cp1;
/* get the version number */
cp_block = (struct f2fs_checkpoint*)cp_block_1;
cp1_version = le64_to_cpu(cp_block->checkpoint_ver);
cp_block_2 = malloc(F2FS_BLKSIZE);
if (!cp_block_2) {
goto invalid_cp1;
}
/* Read the 2nd cp block in this CP pack */
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
if (read_structure_blk(fd, cp_addr, cp_block_2, 1)) {
goto invalid_cp2;
}
cp_block = (struct f2fs_checkpoint*)cp_block_2;
cp2_version = le64_to_cpu(cp_block->checkpoint_ver);
if (cp2_version == cp1_version) {
*version = cp2_version;
free(cp_block_2);
return (struct f2fs_checkpoint*)cp_block_1;
}
/* There must be something wrong with this checkpoint */
invalid_cp2:
free(cp_block_2);
invalid_cp1:
free(cp_block_1);
return NULL;
}
int get_valid_checkpoint_info(int fd, struct f2fs_super_block* sb, struct f2fs_checkpoint** cp,
struct f2fs_info* info) {
struct f2fs_checkpoint *cp1, *cp2, *cur_cp;
unsigned long blk_size;
unsigned long long cp1_version = 0, cp2_version = 0;
unsigned long long cp1_start_blk_no;
unsigned long long cp2_start_blk_no;
blk_size = 1U << le32_to_cpu(sb->log_blocksize);
/*
* Find valid cp by reading both packs and finding most recent one.
*/
cp1_start_blk_no = le32_to_cpu(sb->cp_blkaddr);
cp1 = validate_checkpoint(cp1_start_blk_no, &cp1_version, fd);
/* The second checkpoint pack should start at the next segment */
cp2_start_blk_no = cp1_start_blk_no + (1 << le32_to_cpu(sb->log_blocks_per_seg));
cp2 = validate_checkpoint(cp2_start_blk_no, &cp2_version, fd);
if (cp1 && cp2) {
if (ver_after(cp2_version, cp1_version)) {
cur_cp = cp2;
info->cp_valid_cp_blkaddr = cp2_start_blk_no;
free(cp1);
} else {
cur_cp = cp1;
info->cp_valid_cp_blkaddr = cp1_start_blk_no;
free(cp2);
}
} else if (cp1) {
cur_cp = cp1;
info->cp_valid_cp_blkaddr = cp1_start_blk_no;
} else if (cp2) {
cur_cp = cp2;
info->cp_valid_cp_blkaddr = cp2_start_blk_no;
} else {
goto fail_no_cp;
}
*cp = cur_cp;
return 0;
fail_no_cp:
SLOGE("Valid Checkpoint not found!!");
return -EINVAL;
}
static int gather_sit_info(int fd, struct f2fs_info* info) {
uint64_t num_segments =
(info->total_blocks - info->main_blkaddr + info->blocks_per_segment - 1) /
info->blocks_per_segment;
uint64_t num_sit_blocks = (num_segments + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK;
uint64_t sit_block;
info->sit_blocks = malloc(num_sit_blocks * sizeof(struct f2fs_sit_block));
if (!info->sit_blocks) return -1;
for (sit_block = 0; sit_block < num_sit_blocks; sit_block++) {
off64_t address = info->sit_blkaddr + sit_block;
if (f2fs_test_bit(sit_block, info->sit_bmp)) address += info->blocks_per_sit;
SLOGV("Reading cache block starting at block %" PRIu64, address);
if (read_structure(fd, address * F2FS_BLKSIZE, &info->sit_blocks[sit_block],
sizeof(struct f2fs_sit_block))) {
SLOGE("Could not read sit block at block %" PRIu64, address);
free(info->sit_blocks);
info->sit_blocks = NULL;
return -1;
}
}
return 0;
}
static inline int is_set_ckpt_flags(struct f2fs_checkpoint* cp, unsigned int f) {
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
return !!(ckpt_flags & f);
}
static inline uint64_t sum_blk_addr(struct f2fs_checkpoint* cp, struct f2fs_info* info, int base,
int type) {
return info->cp_valid_cp_blkaddr + le32_to_cpu(cp->cp_pack_total_block_count) - (base + 1) +
type;
}
static int get_sit_summary(int fd, struct f2fs_info* info, struct f2fs_checkpoint* cp) {
char buffer[F2FS_BLKSIZE];
info->sit_sums = calloc(1, sizeof(struct f2fs_summary_block));
if (!info->sit_sums) return -1;
/* CURSEG_COLD_DATA where the journaled SIT entries are. */
if (is_set_ckpt_flags(cp, CP_COMPACT_SUM_FLAG)) {
if (read_structure_blk(fd, info->cp_valid_cp_blkaddr + le32_to_cpu(cp->cp_pack_start_sum),
buffer, 1))
return -1;
memcpy(&info->sit_sums->journal.n_sits, &buffer[SUM_JOURNAL_SIZE], SUM_JOURNAL_SIZE);
} else {
uint64_t blk_addr;
if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
blk_addr = sum_blk_addr(cp, info, NR_CURSEG_TYPE, CURSEG_COLD_DATA);
else
blk_addr = sum_blk_addr(cp, info, NR_CURSEG_DATA_TYPE, CURSEG_COLD_DATA);
if (read_structure_blk(fd, blk_addr, buffer, 1)) return -1;
memcpy(info->sit_sums, buffer, sizeof(struct f2fs_summary_block));
}
return 0;
}
struct f2fs_info* generate_f2fs_info(int fd) {
struct f2fs_super_block* sb = NULL;
struct f2fs_checkpoint* cp = NULL;
struct f2fs_info* info;
info = calloc(1, sizeof(*info));
if (!info) {
SLOGE("Out of memory!");
return NULL;
}
sb = malloc(sizeof(*sb));
if (!sb) {
SLOGE("Out of memory!");
free(info);
return NULL;
}
if (read_f2fs_sb(fd, sb)) {
SLOGE("Failed to read superblock");
free(info);
free(sb);
return NULL;
}
dbg_print_raw_sb_info(sb);
info->cp_blkaddr = le32_to_cpu(sb->cp_blkaddr);
info->sit_blkaddr = le32_to_cpu(sb->sit_blkaddr);
info->nat_blkaddr = le32_to_cpu(sb->nat_blkaddr);
info->ssa_blkaddr = le32_to_cpu(sb->ssa_blkaddr);
info->main_blkaddr = le32_to_cpu(sb->main_blkaddr);
info->block_size = F2FS_BLKSIZE;
info->total_blocks = sb->block_count;
info->blocks_per_sit = (le32_to_cpu(sb->segment_count_sit) >> 1)
<< le32_to_cpu(sb->log_blocks_per_seg);
info->blocks_per_segment = 1U << le32_to_cpu(sb->log_blocks_per_seg);
if (get_valid_checkpoint_info(fd, sb, &cp, info)) goto error;
dbg_print_raw_ckpt_struct(cp);
info->total_user_used = le32_to_cpu(cp->valid_block_count);
u32 bmp_size = le32_to_cpu(cp->sit_ver_bitmap_bytesize);
/* get sit validity bitmap */
info->sit_bmp = malloc(bmp_size);
if (!info->sit_bmp) {
SLOGE("Out of memory!");
goto error;
}
info->sit_bmp_size = bmp_size;
if (read_structure(fd,
info->cp_valid_cp_blkaddr * F2FS_BLKSIZE +
offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap),
info->sit_bmp, bmp_size)) {
SLOGE("Error getting SIT validity bitmap");
goto error;
}
if (gather_sit_info(fd, info)) {
SLOGE("Error getting SIT information");
goto error;
}
if (get_sit_summary(fd, info, cp)) {
SLOGE("Error getting SIT entries in summary area");
goto error;
}
dbg_print_info_struct(info);
return info;
error:
free(sb);
free(cp);
free_f2fs_info(info);
return NULL;
}
void free_f2fs_info(struct f2fs_info* info) {
if (info) {
free(info->sit_blocks);
info->sit_blocks = NULL;
free(info->sit_bmp);
info->sit_bmp = NULL;
free(info->sit_sums);
info->sit_sums = NULL;
}
free(info);
}
uint64_t get_num_blocks_used(struct f2fs_info* info) {
return info->main_blkaddr + info->total_user_used;
}
int f2fs_test_bit(unsigned int nr, const char* p) {
int mask;
char* addr = (char*)p;
addr += (nr >> 3);
mask = 1 << (7 - (nr & 0x07));
return (mask & *addr) != 0;
}
int run_on_used_blocks(uint64_t startblock, struct f2fs_info* info,
int (*func)(uint64_t pos, void* data), void* data) {
struct f2fs_sit_entry* sit_entry;
uint64_t sit_block_num_cur = 0, segnum = 0, block_offset;
uint64_t block;
unsigned int used, found, i;
block = startblock;
while (block < info->total_blocks) {
/* TODO: Save only relevant portions of metadata */
if (block < info->main_blkaddr) {
if (func(block, data)) {
SLOGI("func error");
return -1;
}
} else {
/* Main Section */
segnum = (block - info->main_blkaddr) / info->blocks_per_segment;
/* check the SIT entries in the journal */
found = 0;
for (i = 0; i < le16_to_cpu(info->sit_sums->journal.n_sits); i++) {
if (le32_to_cpu(segno_in_journal(&info->sit_sums->journal, i)) == segnum) {
sit_entry = &sit_in_journal(&info->sit_sums->journal, i);
found = 1;
break;
}
}
/* get SIT entry from SIT section */
if (!found) {
sit_block_num_cur = segnum / SIT_ENTRY_PER_BLOCK;
sit_entry =
&info->sit_blocks[sit_block_num_cur].entries[segnum % SIT_ENTRY_PER_BLOCK];
}
block_offset = (block - info->main_blkaddr) % info->blocks_per_segment;
if (block_offset == 0 && GET_SIT_VBLOCKS(sit_entry) == 0) {
block += info->blocks_per_segment;
continue;
}
used = f2fs_test_bit(block_offset, (char*)sit_entry->valid_map);
if (used)
if (func(block, data)) return -1;
}
block++;
}
return 0;
}
struct privdata {
int count;
int infd;
int outfd;
char* buf;
char* zbuf;
int done;
struct f2fs_info* info;
};
/*
* This is a simple test program. It performs a block to block copy of a
* filesystem, replacing blocks identified as unused with 0's.
*/
int copy_used(uint64_t pos, void* data) {
struct privdata* d = data;
char* buf;
int pdone = (pos * 100) / d->info->total_blocks;
if (pdone > d->done) {
d->done = pdone;
printf("Done with %d percent\n", d->done);
}
d->count++;
buf = d->buf;
if (read_structure_blk(d->infd, (unsigned long long)pos, d->buf, 1)) {
printf("Error reading!!!\n");
return -1;
}
off64_t ret;
ret = lseek64(d->outfd, pos * F2FS_BLKSIZE, SEEK_SET);
if (ret < 0) {
SLOGE("failed to seek\n");
return ret;
}
ret = write(d->outfd, d->buf, F2FS_BLKSIZE);
if (ret < 0) {
SLOGE("failed to write\n");
return ret;
}
if (ret != F2FS_BLKSIZE) {
SLOGE("failed to read all\n");
return -1;
}
return 0;
}
int main(int argc, char** argv) {
if (argc != 3) printf("Usage: %s fs_file_in fs_file_out\n", argv[0]);
char* in = argv[1];
char* out = argv[2];
int infd, outfd;
if ((infd = open(in, O_RDONLY)) < 0) {
SLOGE("Cannot open device");
return 0;
}
if ((outfd = open(out, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR)) < 0) {
SLOGE("Cannot open output");
return 0;
}
struct privdata d;
d.infd = infd;
d.outfd = outfd;
d.count = 0;
struct f2fs_info* info = generate_f2fs_info(infd);
if (!info) {
printf("Failed to generate info!");
return -1;
}
char* buf = malloc(F2FS_BLKSIZE);
char* zbuf = calloc(1, F2FS_BLKSIZE);
d.buf = buf;
d.zbuf = zbuf;
d.done = 0;
d.info = info;
int expected_count = get_num_blocks_used(info);
run_on_used_blocks(0, info, &copy_used, &d);
printf("Copied %d blocks. Expected to copy %d\n", d.count, expected_count);
ftruncate64(outfd, info->total_blocks * F2FS_BLKSIZE);
free_f2fs_info(info);
free(buf);
free(zbuf);
close(infd);
close(outfd);
return 0;
}

@ -0,0 +1,73 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef F2FS_UTILS_F2F2_UTILS_H_
#define F2FS_UTILS_F2F2_UTILS_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ver_after(a, b) \
(typecheck(unsigned long long, a) && typecheck(unsigned long long, b) && \
((long long)((a) - (b)) > 0))
#define ver_equal(a, b) \
(typecheck(unsigned long long, a) && typecheck(unsigned long long, b) && \
((long long)((a) - (b)) == 0))
struct f2fs_sit_block;
struct f2fs_summary_block;
struct f2fs_info {
uint64_t blocks_per_segment;
uint32_t block_size;
char* sit_bmp;
uint32_t sit_bmp_size;
uint64_t blocks_per_sit;
struct f2fs_sit_block* sit_blocks;
struct f2fs_summary_block* sit_sums;
uint64_t cp_blkaddr;
uint64_t cp_valid_cp_blkaddr;
uint64_t sit_blkaddr;
uint64_t nat_blkaddr;
uint64_t ssa_blkaddr;
uint64_t main_blkaddr;
uint64_t total_user_used;
uint64_t total_blocks;
};
uint64_t get_num_blocks_used(struct f2fs_info* info);
struct f2fs_info* generate_f2fs_info(int fd);
void free_f2fs_info(struct f2fs_info* info);
unsigned int get_f2fs_filesystem_size_sec(char* dev);
int run_on_used_blocks(uint64_t startblock, struct f2fs_info* info,
int (*func)(uint64_t pos, void* data), void* data);
#ifdef __cplusplus
}
#endif
#endif // F2FS_UTILS_F2F2_UTILS_H_

@ -0,0 +1,193 @@
#!/bin/bash
#
# To call this script, make sure make_f2fs is somewhere in PATH
function usage() {
cat<<EOT
Usage:
${0##*/} OUTPUT_FILE SIZE
[-S] [-C FS_CONFIG] [-f SRC_DIR] [-D PRODUCT_OUT]
[-s FILE_CONTEXTS] [-t MOUNT_POINT] [-T TIMESTAMP] [-B block_map]
[-L LABEL] [--prjquota] [--casefold] [--compression] [--readonly]
[--sldc <num> [sload compression sub-options]]
<num>: number of the sload compression args, e.g. -a LZ4 counts as 2
when sload compression args are not given, <num> must be 0,
and the default flags will be used.
Note: must conserve the option order
EOT
}
echo "in mkf2fsuserimg.sh PATH=$PATH"
MKFS_OPTS=""
SLOAD_OPTS=""
BLOCK_MAP_FILE=""
BLOCK_MAP_OPT=""
if [ $# -lt 2 ]; then
usage
exit 1
fi
OUTPUT_FILE=$1
SIZE=$2
shift; shift
SPARSE_IMG="false"
if [[ "$1" == "-S" ]]; then
MKFS_OPTS+=" -S $SIZE"
SLOAD_OPTS+=" -S"
BLOCK_MAP_OPT+=" -S -M"
SPARSE_IMG="true"
shift
fi
if [[ "$1" == "-C" ]]; then
SLOAD_OPTS+=" -C $2"
shift; shift
fi
if [[ "$1" == "-f" ]]; then
SLOAD_OPTS+=" -f $2"
shift; shift
fi
if [[ "$1" == "-D" ]]; then
SLOAD_OPTS+=" -p $2"
shift; shift
fi
if [[ "$1" == "-s" ]]; then
SLOAD_OPTS+=" -s $2"
shift; shift
fi
if [[ "$1" == "-t" ]]; then
MOUNT_POINT=$2
shift; shift
fi
if [ -z $MOUNT_POINT ]; then
echo "Mount point is required"
exit 2
fi
if [[ ${MOUNT_POINT:0:1} != "/" ]]; then
MOUNT_POINT="/"$MOUNT_POINT
fi
SLOAD_OPTS+=" -t $MOUNT_POINT"
if [[ "$1" == "-T" ]]; then
SLOAD_OPTS+=" -T $2"
shift; shift
fi
if [[ "$1" == "-B" ]]; then
BLOCK_MAP_FILE="$2"
shift; shift
fi
if [[ "$1" == "-L" ]]; then
MKFS_OPTS+=" -l $2"
shift; shift
fi
if [[ "$1" == "--prjquota" ]]; then
MKFS_OPTS+=" -O project_quota,extra_attr"
shift;
fi
if [[ "$1" == "--casefold" ]]; then
MKFS_OPTS+=" -O casefold -C utf8"
shift;
fi
if [[ "$1" == "--compression" ]]; then
COMPRESS_SUPPORT=1
MKFS_OPTS+=" -O compression,extra_attr"
shift;
fi
if [[ "$1" == "--readonly" ]]; then
MKFS_OPTS+=" -O ro"
READONLY=1
shift;
fi
if [[ "$1" == "--sldc" ]]; then
if [ -z "$COMPRESS_SUPPORT" ]; then
echo "--sldc needs --compression flag"
exit 3
fi
SLOAD_OPTS+=" -c"
shift
SLDC_NUM_ARGS=$1
case $SLDC_NUM_ARGS in
''|*[!0-9]*)
echo "--sldc needs a number"
exit 3 ;;
esac
shift
while [ $SLDC_NUM_ARGS -gt 0 ]; do
SLOAD_OPTS+=" $1"
shift
(( SLDC_NUM_ARGS-- ))
done
fi
if [ -z $SIZE ]; then
echo "Need size of filesystem"
exit 2
fi
function _truncate()
{
if [ "$SPARSE_IMG" = "true" ]; then
return
fi
TRUNCATE_CMD="truncate -s $SIZE $OUTPUT_FILE"
echo $TRUNCATE_CMD
$TRUNCATE_CMD
if [ $? -ne 0 ]; then
exit 3
fi
}
function _build()
{
MAKE_F2FS_CMD="make_f2fs -g android $MKFS_OPTS $OUTPUT_FILE"
echo $MAKE_F2FS_CMD
$MAKE_F2FS_CMD
if [ $? -ne 0 ]; then
if [ "$SPARSE_IMG" = "false" ]; then
rm -f $OUTPUT_FILE
fi
exit 4
fi
SLOAD_F2FS_CMD="sload_f2fs $SLOAD_OPTS $OUTPUT_FILE"
echo $SLOAD_F2FS_CMD
SLOAD_LOG=`$SLOAD_F2FS_CMD`
# allow 1: Filesystem errors corrected
ret=$?
if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
rm -f $OUTPUT_FILE
exit 4
fi
MB_SIZE=`echo "$SLOAD_LOG" | grep "Max image size" | awk '{print $5}'`
SIZE=$(((MB_SIZE + 6) * 1024 * 1024))
}
_truncate
_build
# readonly can reduce the image
if [ "$READONLY" ]; then
if [ "$SPARSE_IMG" = "true" ]; then
MKFS_OPTS+=" -S $SIZE"
rm -f $OUTPUT_FILE && touch $OUTPUT_FILE
fi
_truncate
_build
# build block map
if [ "$BLOCK_MAP_FILE" ]; then
fsck.f2fs $BLOCK_MAP_OPT $OUTPUT_FILE > $BLOCK_MAP_FILE
fi
fi
exit 0

@ -21,10 +21,11 @@ import avb.blob.AuxBlob
import avb.blob.Footer
import avb.blob.Header
import avb.desc.HashDescriptor
import avb.desc.HashTreeDescriptor
import cfig.helper.CryptoHelper
import cfig.helper.Dumpling
import cfig.helper.Helper
import cfig.helper.Helper.Companion.paddingWith
import cfig.helper.Dumpling
import com.fasterxml.jackson.core.type.TypeReference
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.commons.codec.binary.Hex
@ -198,38 +199,77 @@ class Avb {
return true
}
fun updateVbmeta(fileName: String) {
fun updateVbmeta(fileName: String, desc: Any = HashDescriptor::class) {
if (File("vbmeta.img").exists()) {
log.info("Updating vbmeta.img side by side ...")
val partitionName =
ObjectMapper().readValue(File(getJsonFileName(fileName)), AVBInfo::class.java).let {
it.auxBlob!!.hashDescriptors.get(0).partition_name
}
//read hashDescriptor from image
val readBackInfo = ObjectMapper().readValue(File(getJsonFileName(fileName)), AVBInfo::class.java)
val newHashDesc = AVBInfo.parseFrom(Dumpling("$fileName.signed"))
check(newHashDesc.auxBlob!!.hashDescriptors.size == 1)
var seq = -1 //means not found
//main vbmeta
ObjectMapper().readValue(File(getJsonFileName("vbmeta.img")), AVBInfo::class.java).apply {
val itr = this.auxBlob!!.hashDescriptors.iterator()
while (itr.hasNext()) {
val itrValue = itr.next()
if (itrValue.partition_name == partitionName) {
log.info("Found $partitionName in vbmeta, update it")
seq = itrValue.sequence
itr.remove()
break
when (desc) {
HashDescriptor::class -> {
val partitionName = readBackInfo.auxBlob!!.hashDescriptors.get(0).partition_name
log.warn("partitionName=$partitionName")
//read hashDescriptor from image
check(newHashDesc.auxBlob!!.hashDescriptors.size == 1)
var seq = -1 //means not found
//main vbmeta
ObjectMapper().readValue(File(getJsonFileName("vbmeta.img")), AVBInfo::class.java).apply {
val itr = this.auxBlob!!.hashDescriptors.iterator()
while (itr.hasNext()) {
val itrValue = itr.next()
if (itrValue.partition_name == partitionName) {
log.info("Found $partitionName in vbmeta, update it")
seq = itrValue.sequence
itr.remove()
break
}
}
if (-1 == seq) {
log.warn("main vbmeta doesn't have $partitionName hashDescriptor, won't update vbmeta.img")
} else {
//add hashDescriptor back to main vbmeta
val hd = newHashDesc.auxBlob!!.hashDescriptors.get(0).apply { this.sequence = seq }
this.auxBlob!!.hashDescriptors.add(hd)
log.info("Writing padded vbmeta to file: vbmeta.img.signed")
Files.write(Paths.get("vbmeta.img.signed"), encodePadded(), StandardOpenOption.CREATE)
log.info("Updating vbmeta.img side by side (partition=$partitionName, seq=$seq) done")
}
}
}
if (-1 == seq) {
log.warn("main vbmeta doesn't have $partitionName hashDescriptor, won't update vbmeta.img")
} else {
//add hashDescriptor back to main vbmeta
val hd = newHashDesc.auxBlob!!.hashDescriptors.get(0).apply { this.sequence = seq }
this.auxBlob!!.hashDescriptors.add(hd)
log.info("Writing padded vbmeta to file: vbmeta.img.signed")
Files.write(Paths.get("vbmeta.img.signed"), encodePadded(), StandardOpenOption.CREATE)
log.info("Updating vbmeta.img side by side (partition=$partitionName, seq=$seq) done")
HashTreeDescriptor::class -> {
val partitionName = readBackInfo.auxBlob!!.hashTreeDescriptors.get(0).partition_name
log.warn("partitionName=$partitionName")
//read hashTreeDescriptor from image
check(newHashDesc.auxBlob!!.hashTreeDescriptors.size == 1)
var seq = -1 //means not found
//main vbmeta
ObjectMapper().readValue(File(getJsonFileName("vbmeta.img")), AVBInfo::class.java).apply {
val itr = this.auxBlob!!.hashTreeDescriptors.iterator()
while (itr.hasNext()) {
val itrValue = itr.next()
if (itrValue.partition_name == partitionName) {
log.info("Found $partitionName (HashTreeDescriptor) in vbmeta, update it")
log.info("Original: " + itrValue.toString())
seq = itrValue.sequence
itr.remove()
break
}
}
if (-1 == seq) {
log.warn("main vbmeta doesn't have $partitionName hashTreeDescriptor, won't update vbmeta.img")
} else {
//add hashTreeDescriptor back to main vbmeta
val hd = newHashDesc.auxBlob!!.hashTreeDescriptors.get(0).apply { this.sequence = seq }
log.info("Updated: " + hd.toString())
this.auxBlob!!.hashTreeDescriptors.add(hd)
log.info("Writing padded vbmeta to file: vbmeta.img.signed")
Files.write(Paths.get("vbmeta.img.signed"), encodePadded(), StandardOpenOption.CREATE)
log.info("Updating vbmeta.img side by side (partition=$partitionName, seq=$seq) done")
}
}
}
else -> {
throw IllegalArgumentException("unknown descriptor type: $desc")
}
}
} else {

@ -14,7 +14,7 @@
package cfig.init
import miscimg.MiscImage
import rom.misc.MiscImage
import org.slf4j.LoggerFactory
import java.util.*

@ -25,6 +25,7 @@ import java.nio.file.Files
import java.nio.file.Paths
import java.nio.file.StandardOpenOption
import java.util.*
import kotlin.io.path.Path
import kotlin.system.exitProcess
@OptIn(ExperimentalUnsignedTypes::class)
@ -116,7 +117,7 @@ class PayloadGenerator {
if (fileHeader[0] as UInt == 0x3aff26ed.toUInt()) {
log.debug("$part is sparse, convert to raw image")
"simg2img $workDir/$part.img $workDir/tmp.img".check_call()
File("$workDir/tmp.img").renameTo(File("$workDir/$part.img"))
Files.move(Path("$workDir/tmp.img"), Path("$workDir/$part.img"))
}
}
}

@ -15,7 +15,7 @@
package cfig.packable
import cfig.helper.Helper
import miscimg.MiscImage
import rom.misc.MiscImage
import cfig.helper.Helper.Companion.deleteIfExists
import com.fasterxml.jackson.databind.ObjectMapper
import org.slf4j.LoggerFactory

@ -14,7 +14,7 @@
package cfig.packable
import cfig.utils.SparseImgParser
import rom.sparse.SparseImgParser
import org.slf4j.LoggerFactory
import packable.DeviceTreeParser
import java.io.File

@ -1,4 +1,4 @@
package miscimg
package rom.misc
import cc.cfig.io.Struct
import cfig.helper.Helper

@ -0,0 +1,169 @@
package rom.sparse
import cfig.helper.Helper
import org.slf4j.LoggerFactory
import java.io.BufferedReader
import java.io.InputStreamReader
open class BaseGenerator(
val partitionName: String = "NA",
val footerType: String = AVB_HASHTREE_FOOTER
) {
var partitionSize: Long = 0
var imageSize: Long = 0
var keyPath: String = ""
var algorithm: String = ""
var salt: String = ""
val avbtool = String.format(Helper.prop("avbtool"), "v1.2")
var signingArgs = "--hash_algorithm sha256 " +
"--prop com.android.build.the_partition_name.os_version:14 " +
"--prop com.android.build.the_partition_name.fingerprint:anonymous/device/device:14/UD1A.230803.041/buildid:userdebug/test-keys"
val workDir = Helper.prop("workDir")
fun calculateMinPartitionSize(imageSize: Long, sizeCalculator: ((Long) -> Long)? = null): Long {
var sizeCalculatorCopy = sizeCalculator
if (sizeCalculatorCopy == null) {
sizeCalculatorCopy = this::calculateMaxImageSize
}
// Use image size as partition size to approximate final partition size.
val calculated = sizeCalculatorCopy(imageSize)
var imageRatio = calculated / imageSize.toDouble()
log.info("image_ratio = $imageRatio, image_size = $imageSize, calc = $calculated")
// Prepare a binary search for the optimal partition size.
var lo = (imageSize / imageRatio).toLong() / ErofsGenerator.BLOCK_SIZE * ErofsGenerator.BLOCK_SIZE - ErofsGenerator.BLOCK_SIZE
// Ensure lo is small enough: max_image_size should <= image_size.
var delta = ErofsGenerator.BLOCK_SIZE
var maxImageSize = sizeCalculatorCopy(lo)
while (maxImageSize > imageSize) {
imageRatio = maxImageSize / lo.toDouble()
lo = (imageSize / imageRatio).toLong() / ErofsGenerator.BLOCK_SIZE * ErofsGenerator.BLOCK_SIZE - delta
delta *= 2
maxImageSize = sizeCalculatorCopy(lo)
}
var hi = lo + ErofsGenerator.BLOCK_SIZE
// Ensure hi is large enough: max_image_size should >= image_size.
delta = ErofsGenerator.BLOCK_SIZE
maxImageSize = sizeCalculatorCopy(hi)
while (maxImageSize < imageSize) {
imageRatio = maxImageSize / hi.toDouble()
hi = (imageSize / imageRatio).toLong() / ErofsGenerator.BLOCK_SIZE * ErofsGenerator.BLOCK_SIZE + delta
delta *= 2
maxImageSize = sizeCalculatorCopy(hi)
}
var partitionSize = hi
// Start the binary search.
while (lo < hi) {
val mid = ((lo + hi) / (2 * ErofsGenerator.BLOCK_SIZE)) * ErofsGenerator.BLOCK_SIZE
maxImageSize = sizeCalculatorCopy(mid)
if (maxImageSize >= imageSize) {
if (mid < partitionSize) {
partitionSize = mid
}
hi = mid
} else {
lo = mid + ErofsGenerator.BLOCK_SIZE
}
}
log.info("CalculateMinPartitionSize($imageSize): partition_size $partitionSize.")
return partitionSize
}
fun calculateMaxImageSize(partitionSize: Long? = null): Long {
val actualPartitionSize = partitionSize ?: this.partitionSize
require(actualPartitionSize > 0) { "Invalid partition size: $actualPartitionSize" }
val addFooter = if (footerType == AVB_HASH_FOOTER) "add_hash_footer" else "add_hashtree_footer"
val cmd = mutableListOf(
avbtool,
addFooter,
"--partition_size",
actualPartitionSize.toString(),
"--calc_max_image_size"
)
cmd.addAll(signingArgs.split(" "))
val processBuilder = ProcessBuilder(cmd)
log.info(cmd.joinToString(" "))
setProcessEnvironment(processBuilder)
processBuilder.redirectErrorStream(true)
val proc = processBuilder.start()
val reader = BufferedReader(InputStreamReader(proc.inputStream))
val output = reader.readText()
val exitCode = proc.waitFor()
if (exitCode != 0) {
throw BuildVerityImageError("Failed to calculate max image size:\n$output")
}
val imageSize = output.trim().toLong()
require(imageSize > 0) { "Invalid max image size: $imageSize" }
this.imageSize = imageSize
return imageSize
}
fun calculateDynamicPartitionSize(imageSize: Long): Long {
log.info("verity_utils: CalculateDynamicPartitionSize")
partitionSize = calculateMinPartitionSize(imageSize)
return partitionSize
}
protected fun setProcessEnvironment(processBuilder: ProcessBuilder) {
processBuilder.environment().apply {
put("PATH", "aosp/plugged/bin:" + System.getenv("PATH"))
put("LD_LIBRARY_PATH", "aosp/plugged/lib:" + System.getenv("LD_LIBRARY_PATH"))
}
}
protected open fun calculateSizeAndReserved(size: Long): Long {
throw IllegalAccessException("not implemented")
}
fun addFooter(outFile: String) {
val addFooter = if (footerType == AVB_HASH_FOOTER) "add_hash_footer" else "add_hashtree_footer"
val cmd = mutableListOf(
avbtool, addFooter,
"--partition_size", partitionSize.toString(),
"--partition_name", partitionName,
"--image", outFile
)
if (keyPath.isNotEmpty() && algorithm.isNotEmpty()) {
cmd.addAll(listOf("--key", keyPath, "--algorithm", algorithm))
}
if (salt.isNotEmpty()) {
cmd.addAll(listOf("--salt", salt))
}
cmd.addAll(signingArgs.split(" "))
val processBuilder = ProcessBuilder(cmd)
log.info(cmd.joinToString(" "))
setProcessEnvironment(processBuilder)
processBuilder.redirectErrorStream(true)
val proc = processBuilder.start()
val reader = BufferedReader(InputStreamReader(proc.inputStream))
val output = reader.readText()
val exitCode = proc.waitFor()
if (exitCode != 0) {
throw BuildVerityImageError("Failed to add AVB footer: $output")
}
}
class BuildVerityImageError(message: String) : Exception(message)
companion object {
private val log = LoggerFactory.getLogger(BaseGenerator::class.java)
const val AVB_HASH_FOOTER = "avb_hash_footer"
const val AVB_HASHTREE_FOOTER = "avb_hashtree_footer"
}
}

@ -0,0 +1,75 @@
package rom.sparse
import avb.AVBInfo
import cfig.helper.Helper
import org.apache.commons.exec.CommandLine
import org.apache.commons.exec.DefaultExecutor
import org.apache.commons.exec.environment.EnvironmentUtils
import org.slf4j.LoggerFactory
import java.io.File
class ErofsGenerator(inPartitionName: String) : BaseGenerator(inPartitionName, AVB_HASHTREE_FOOTER) {
fun pack(
ai: AVBInfo,
mount_point: String,
productOut: String = "dlkm/system",
srcDir: String,
image_file: String
) {
log.warn("pack: $mount_point, $productOut, $srcDir")
val newArgs =
StringBuilder("--hash_algorithm " + ai.auxBlob!!.hashTreeDescriptors.get(0).hash_algorithm)
ai.auxBlob!!.propertyDescriptors.forEach {
newArgs.append(" ")
newArgs.append("--prop ${it.key}:${it.value}")
}
log.info("newArgs: $newArgs")
signingArgs = newArgs.toString()
val mkfsBin = "aosp/plugged/bin/mkfs.erofs"
val fc = "${workDir}file_contexts"
val cmd = CommandLine.parse(mkfsBin).apply {
addArguments("-z lz4hc,9")
addArguments("--mount-point $mount_point")
addArguments("--product-out $productOut")
addArguments("--file-contexts $fc")
addArgument(image_file)
addArgument(srcDir)
}
val env = EnvironmentUtils.getProcEnvironment().apply {
put("PATH", "aosp/plugged/bin:" + System.getenv("PATH"))
put("LD_LIBRARY_PATH", "aosp/plugged/lib:" + System.getenv("LD_LIBRARY_PATH"))
}
DefaultExecutor().execute(cmd, env)
val ret2 = Helper.powerRun("du -b -k -s $image_file", null)
var partition_size = String(ret2.get(0)).split("\\s".toRegex()).get(0).toLong() * 1024
log.info("partition_size(raw): $partition_size")
partition_size = Helper.round_to_multiple(partition_size, 4096)
log.info("partition_size(round 4k): $partition_size")
val verityImageBuilder = ErofsGenerator(mount_point)
partition_size = verityImageBuilder.calculateSizeAndReserved(partition_size)
log.info("partition_size(calc reserve): $partition_size")
partition_size = verityImageBuilder.calculateDynamicPartitionSize(partition_size)
log.info("partition_size(calc dynamic): $partition_size")
log.info("Allocating $partition_size for $partitionName")
partition_size = verityImageBuilder.calculateMaxImageSize(partition_size)
log.info("partition_size(calc max): $partition_size")
val image_size = File(image_file).length()
verityImageBuilder.addFooter(image_file)
}
override fun calculateSizeAndReserved(size: Long): Long {
return maxOf((size * 1003 / 1000), (256 * 1024))
}
companion object {
private val log = LoggerFactory.getLogger(ErofsGenerator::class.java)
const val BLOCK_SIZE = 4096 // Replace this with the actual block size.
}
}

@ -0,0 +1,165 @@
package rom.sparse
import avb.AVBInfo
import cfig.helper.Helper
import org.apache.commons.exec.CommandLine
import org.apache.commons.exec.DefaultExecutor
import org.apache.commons.exec.PumpStreamHandler
import org.slf4j.LoggerFactory
import java.io.File
import java.nio.file.Files
import java.nio.file.Paths
import java.util.Properties
import kotlin.math.ceil
class Ext4Generator(inPartitionName: String = "NA") : BaseGenerator(partitionName = inPartitionName, footerType = AVB_HASHTREE_FOOTER) {
var theInodeCount = 0L
var theSize = 0L
fun pack(
ai: AVBInfo,
mount_point: String, productOut: String = "dlkm/system", srcDir: String, outFile: String
) {
log.warn("pack: $mount_point, $productOut, $srcDir")
val newArgs = StringBuilder("--hash_algorithm " + ai.auxBlob!!.hashTreeDescriptors.get(0).hash_algorithm)
ai.auxBlob!!.propertyDescriptors.forEach {
newArgs.append(" ")
newArgs.append("--prop ${it.key}:${it.value}")
}
log.info("newArgs: $newArgs")
signingArgs = newArgs.toString()
//XXXX: calc src dir
val ret2 = Helper.powerRun("du -b -k -s $srcDir", null)
theSize = String(ret2.get(0)).split("\\s".toRegex()).get(0).toLong() * 1024
log.info("theSize(raw): $theSize")
val verityImageBuilder = Ext4Generator(mount_point)
theSize = verityImageBuilder.calculateSizeAndReserved(theSize)
log.info("theSize(calc reserve): $theSize")
theSize = Helper.round_to_multiple(theSize, 4096)
log.info("theSize(round 4k): $theSize")
theInodeCount = getInodeUsage(srcDir)
log.info("extfs_inode_count: $theInodeCount")
//build fs
verityImageBuilder.makeFileSystem(outFile, theSize, theInodeCount, mount_point, partitionName)
//addFooter(image_file)
val fs_dict = getFilesystemCharacteristics(outFile)
log.warn("removing intermediate $outFile")
File(outFile).delete()
log.info("XX: free blocks=" + fs_dict.getProperty("Free blocks"))
log.info("XX: free_size = " + fs_dict.getProperty("Free blocks").toLong() * BLOCK_SZ)
theSize -= fs_dict.getProperty("Free blocks", "0").toLong() * BLOCK_SZ
val reservedSize = 0
theSize += reservedSize
if (reservedSize == 0) {
// add 0.3% margin
theSize = theSize * 1003 / 1000
}
// Use a minimum size, otherwise, we will fail to calculate an AVB footer
// or fail to construct an ext4 image.
theSize = maxOf(theSize, 256 * 1024)
val blockSize = fs_dict.getProperty("Block size", "4096").toLong()
if (blockSize <= 4096) {
theSize = Helper.round_to_multiple(theSize, 4096)
} else {
theSize = ((theSize + blockSize - 1) / blockSize) * blockSize
}
var inodes = fs_dict.getProperty("Inode count", "-1").toLong()
if (inodes == -1L) {
inodes = theInodeCount
}
inodes -= fs_dict.getProperty("Free inodes", "0").toLong()
// add 0.2% margin or 1 inode, whichever is greater
val spareInodes = inodes * 2 / 1000
val minSpareInodes = 1
if (spareInodes < minSpareInodes) {
inodes += minSpareInodes
} else {
inodes += spareInodes
}
theInodeCount = inodes
log.info("Allocating $inodes Inodes for $outFile.")
log.info("theSize = $theSize")
theSize = verityImageBuilder.calculateDynamicPartitionSize(theSize)
log.info("theSize(calc dynamic): $theSize")
log.info("Allocating $theSize for $partitionName")
theSize = verityImageBuilder.calculateMaxImageSize(theSize)
log.info("theSize(calc max): $theSize")
//build fs again
verityImageBuilder.makeFileSystem(outFile, theSize, theInodeCount, mount_point, partitionName)
val image_size = File(outFile).length()
log.info("image size: $image_size")
addFooter(outFile)
}
fun makeFileSystem(outFile: String, fs_size: Long, inodes: Long, mount_point: String, label: String) {
DefaultExecutor().apply {
streamHandler = PumpStreamHandler(System.out, System.err)
}.execute(CommandLine.parse("mke2fs").apply {
addArguments("-O ^has_journal")
addArguments("-L $label")
addArguments("-N $inodes")
addArguments("-I 256")
addArguments("-M $mount_point")
addArguments("-m 0")
addArguments("-t ext4")
addArguments("-b $BLOCK_SZ")
addArgument(outFile)
addArgument((fs_size / BLOCK_SZ).toString())
}.also { log.warn(it.toString()) })
DefaultExecutor().apply {
streamHandler = PumpStreamHandler(System.out, System.err)
}.execute(CommandLine.parse("aosp/plugged/bin/e2fsdroid").apply {
addArguments("-e")
addArguments("-p out/target/product/shiba/system")
addArgument("-s")
addArguments("-S ${workDir}file_contexts.bin")
addArguments("-f " + Helper.prop("workDir") + "/$mount_point")
addArguments("-a /$mount_point")
addArgument(outFile)
}.also { log.warn(it.toString()) })
}
fun getInodeUsage(path: String): Long {
// Increase by > 6% as the number of files and directories is not the whole picture.
val inodes = Files.walk(Paths.get(path)).count()
val spareInodes = ceil(inodes * 0.06).toInt()
val minSpareInodes = 12
return inodes + maxOf(spareInodes, minSpareInodes)
}
override fun calculateSizeAndReserved(size: Long): Long {
return 16 * 1024 * 1024 + size
}
private fun getFilesystemCharacteristics(file: String): Properties {
val fsDict = Helper.powerRun("tune2fs -l $file", null).get(0)
return Properties().apply {
String(fsDict).split("\n".toRegex()).forEach {
//get the key and value separately in "key : value" format from line
val keyValue = it.split(":".toRegex(), 2)
if (keyValue.size == 2) {
val key = keyValue[0].trim { it <= ' ' }
val value = keyValue[1].trim { it <= ' ' }
log.debug("X: $key=$value")
setProperty(key, value)
}
}
}
}
companion object {
private val log = LoggerFactory.getLogger(Ext4Generator::class.java)
private val BLOCK_SZ = 4096
}
}

@ -0,0 +1,266 @@
package rom.sparse
import avb.AVBInfo
import avb.desc.HashTreeDescriptor
import cc.cfig.io.Struct
import cfig.Avb
import cfig.bootimg.Common.Companion.deleleIfExists
import cfig.helper.Dumpling
import cfig.helper.Helper
import cfig.helper.Helper.Companion.check_call
import cfig.helper.Helper.Companion.check_output
import cfig.packable.VBMetaParser
import cfig.utils.EnvironmentVerifier
import com.fasterxml.jackson.databind.ObjectMapper
import de.vandermeer.asciitable.AsciiTable
import org.apache.commons.exec.CommandLine
import org.apache.commons.exec.DefaultExecutor
import org.apache.commons.exec.PumpStreamHandler
import org.apache.commons.exec.environment.EnvironmentUtils
import org.slf4j.LoggerFactory
import java.io.File
import java.nio.file.Files
import java.util.*
import kotlin.io.path.Path
data class SparseImage(var info: SparseInfo = SparseInfo()) {
fun pack(): SparseImage {
val readBackAi = ObjectMapper().readValue(File(Avb.getJsonFileName(info.pulp)), AVBInfo::class.java)
val partName = readBackAi.auxBlob!!.hashTreeDescriptors.get(0).partition_name
when (info.innerFsType) {
"ext4" -> {
Ext4Generator(partName).pack(
readBackAi,
partName,
workDir,
workDir + File(info.output).nameWithoutExtension,
workDir + File(info.pulp).name + ".signed"
)
}
"erofs" -> {
ErofsGenerator(partName).pack(
readBackAi,
partName,
workDir,
workDir + File(info.output).nameWithoutExtension,
"${info.output}.signed"
)
}
else -> {
log.warn("unsuported image type: ${info.innerFsType}")
}
}
return this
}
fun printSummary(fileName: String) {
val stem = File(fileName).nameWithoutExtension
val tail = AsciiTable().apply {
addRule()
addRow("To view erofs contents:")
}
val tab = AsciiTable().apply {
addRule()
addRow("What", "Where")
addRule()
addRow("image (${info.outerFsType})", fileName)
("${workDir}$stem.ext4").let { ext4 ->
if (File(ext4).exists()) {
addRule()
addRow("converted image (ext4)", ext4)
}
}
("${workDir}$stem.erofs").let {
if (File(it).exists()) {
addRule()
addRow("converted image (erofs)", it)
tail.addRule()
tail.addRow("sudo mount $it -o loop -t erofs ${workDir}mount")
tail.addRule()
} else if (info.innerFsType == "erofs") {
tail.addRule()
tail.addRow("sudo mount $fileName -o loop -t erofs ${workDir}mount")
tail.addRule()
}
}
("${workDir}$stem").let {
if (File(it).exists()) {
addRule()
if (File(it).isFile) {
addRow("converted image (raw)", it)
} else {
addRow("extracted content", it)
}
}
}
("${workDir}$stem.log").let {
if (File(it).exists()) {
addRule()
addRow("log", it)
}
}
if (info.innerFsType == "erofs") {
addRule()
addRow("mount point", "${workDir}mount")
}
addRule()
}
log.info("\n" + tab.render() + "\n" + if (info.innerFsType == "erofs") tail.render() else "")
}
fun updateVbmeta(): SparseImage {
Avb.updateVbmeta(info.pulp, HashTreeDescriptor::class)
return this
}
fun unwrap(): SparseImage {
if (info.outerFsType == "sparse") {
img2simg(workDir + File(info.pulp).name + ".signed", File(info.output).name + ".signed")
} else {
val s = info.pulp + ".signed"
val t = info.output + ".signed"
log.info("Moving $s -> $t")
Files.move(Path(s), Path(t), java.nio.file.StandardCopyOption.REPLACE_EXISTING)
File(info.pulp).deleleIfExists()
}
return this
}
data class SparseInfo(
var output: String = "",
var pulp: String = "",
var json: String = "",
var outerFsType: String = "raw",
var innerFsType: String = "raw"
)
companion object {
private val SPARSE_MAGIC: UInt = 0x3aff26edu
private val log = LoggerFactory.getLogger(SparseImage::class.java)
private val workDir = Helper.prop("workDir")
private val simg2imgBin = "simg2img"
private val img2simgBin = "img2simg"
fun parse(fileName: String): SparseImage {
val ret = SparseImage()
ret.info.json = File(fileName).name.removeSuffix(".img") + ".json"
ret.info.output = fileName
ret.info.pulp = workDir + fileName
if (isSparse(fileName)) {
val tempFile = UUID.randomUUID().toString()
ret.info.outerFsType = "sparse"
val rawFile = "${workDir}${File(fileName).nameWithoutExtension}"
simg2img(fileName, tempFile)
ret.info.pulp = if (isExt4(tempFile)) {
ret.info.innerFsType = "ext4"
"$rawFile.ext4"
} else if (isErofs(tempFile)) {
ret.info.innerFsType = "erofs"
"$rawFile.erofs"
} else {
"$rawFile.raw"
}
Files.move(Path(tempFile), Path(ret.info.pulp))
} else if (isExt4(fileName)) {
ret.info.outerFsType = "ext4"
ret.info.innerFsType = "ext4"
File(fileName).copyTo(File(ret.info.pulp))
} else if (isErofs(fileName)) {
ret.info.outerFsType = "erofs"
ret.info.innerFsType = "erofs"
File(fileName).copyTo(File(ret.info.pulp))
}
when (ret.info.innerFsType) {
"ext4" -> {
extractExt4(ret.info.pulp)
}
"erofs" -> {
extractErofs(ret.info.pulp)
}
else -> {
log.warn("unsuported image type: ${ret.info.innerFsType}")
}
}
ObjectMapper().writerWithDefaultPrettyPrinter().writeValue(File("${workDir}/${ret.info.json}"), ret)
File("${workDir}mount").mkdir()
extractVBMeta(ret.info.pulp)
generateFileContexts()
return ret
}
private fun isSparse(fileName: String): Boolean {
val magic = Helper.Companion.readFully(fileName, 0, 4)
return Struct(">I").pack(SPARSE_MAGIC).contentEquals(magic)
}
private fun isExt4(fileName: String): Boolean {
val superBlock = Helper.readFully(fileName, 1024, 64)
val magic = byteArrayOf(superBlock[0x38], superBlock[0x39])
return Struct(">h").pack(0x53ef).contentEquals(magic)
}
// https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/magic.h#L23
private fun isErofs(fileName: String): Boolean {
val magic = Helper.readFully(fileName, 1024, 4)
return Struct(">I").pack(0xe2e1f5e0).contentEquals(magic)
}
private fun extractExt4(fileName: String) {
if (EnvironmentVerifier().has7z) {
val stem = File(fileName).nameWithoutExtension
val outStr = "7z x $fileName -y -o${workDir}$stem".check_output()
File("${workDir}/$stem.log").writeText(outStr)
} else {
log.warn("Please install 7z for ext4 extraction")
}
}
private fun extractErofs(fileName: String) {
log.info("sudo mount $fileName -o loop -t erofs ${workDir}mount")
}
private fun simg2img(sparseIn: String, flatOut: String) {
log.info("parsing Android sparse image $sparseIn ...")
"$simg2imgBin $sparseIn $flatOut".check_call()
log.info("parsed Android sparse image $sparseIn -> $flatOut")
}
private fun img2simg(flatIn: String, sparseOut: String) {
log.info("transforming image to Android sparse format: $flatIn ...")
"$img2simgBin $flatIn $sparseOut".check_call()
log.info("transformed Android sparse image: $flatIn -> $sparseOut")
}
fun extractVBMeta(fileName: String) {
// vbmeta in image
try {
val ai = AVBInfo.parseFrom(Dumpling(fileName)).dumpDefault(fileName)
if (File("vbmeta.img").exists()) {
log.warn("Found vbmeta.img, parsing ...")
VBMetaParser().unpack("vbmeta.img")
}
} catch (e: IllegalArgumentException) {
log.warn(e.message)
log.warn("failed to parse vbmeta info")
}
}
fun generateFileContexts() {
val env = EnvironmentUtils.getProcEnvironment().apply {
put("PATH", "aosp/plugged/bin:" + System.getenv("PATH"))
put("LD_LIBRARY_PATH", "aosp/plugged/lib:" + System.getenv("LD_LIBRARY_PATH"))
}
DefaultExecutor().apply {
streamHandler = PumpStreamHandler(System.out, System.err)
}.execute(CommandLine.parse("aosp/plugged/bin/sefcontext_compile").apply {
addArguments("-o " + Helper.prop("workDir") + "file_contexts.bin")
addArgument("aosp/plugged/res/file_contexts.concat")
}.also { log.warn(it.toString()) }, env)
}
}
}

@ -0,0 +1,101 @@
// Copyright 2021 yuyezhong@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rom.sparse
import avb.AVBInfo
import avb.blob.Footer
import cfig.Avb
import cfig.bootimg.Common.Companion.deleleIfExists
import cfig.helper.Dumpling
import cfig.helper.Helper
import cfig.helper.Helper.Companion.deleteIfExists
import cfig.packable.IPackable
import cfig.packable.VBMetaParser
import com.fasterxml.jackson.databind.ObjectMapper
import de.vandermeer.asciitable.AsciiTable
import org.slf4j.LoggerFactory
import java.io.File
import java.io.FileInputStream
class SparseImgParser : IPackable {
override val loopNo: Int
get() = 0
private val log = LoggerFactory.getLogger(SparseImgParser::class.java)
override fun capabilities(): List<String> {
return listOf(
"^(system|system_ext|system_other|system_dlkm)\\.img$",
"^(vendor|vendor_dlkm|product|cache|userdata|super|oem|odm|odm_dlkm)\\.img$"
)
}
override fun unpack(fileName: String) {
clear()
SparseImage
.parse(fileName)
.printSummary(fileName)
}
override fun pack(fileName: String) {
//TODO("not implemented: refer to https://github.com/cfig/Android_boot_image_editor/issues/133")
val cfgFile = outDir + fileName.removeSuffix(".img") + ".json"
val readBackSi = ObjectMapper().readValue(File(cfgFile), SparseImage::class.java)
readBackSi
.pack()
.updateVbmeta()
.unwrap()
}
// invoked solely by reflection
fun `@footer`(fileName: String) {
FileInputStream(fileName).use { fis ->
fis.skip(File(fileName).length() - Footer.SIZE)
try {
val footer = Footer(fis)
log.info("\n" + ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(footer))
//val vbmetaData =
// Dumpling(Dumpling(fileName).readFully(Pair(footer.vbMetaOffset, footer.vbMetaSize.toInt())))
//val ai = AVBInfo.parseFrom(vbmetaData)
//log.warn("XX")
//ai.dumpDefault("temp")
} catch (e: IllegalArgumentException) {
log.info("image $fileName has no AVB Footer")
}
}
}
override fun `@verify`(fileName: String) {
super.`@verify`(fileName)
}
override fun flash(fileName: String, deviceName: String) {
TODO("not implemented")
}
fun clear(fileName: String) {
super.clear()
listOf("", ".clear", ".signed").forEach {
"$fileName$it".deleteIfExists()
}
VBMetaParser().clear("vbmeta.img")
}
companion object {
private val workDir = Helper.prop("workDir")
private var outerFsType = "raw"
private var innerFsType = "raw"
}
}

@ -1,221 +0,0 @@
// Copyright 2021 yuyezhong@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cfig.utils
import avb.blob.Footer
import cc.cfig.io.Struct
import cfig.bootimg.Common.Companion.deleleIfExists
import cfig.helper.Helper
import cfig.helper.Helper.Companion.check_call
import cfig.helper.Helper.Companion.check_output
import cfig.packable.IPackable
import com.fasterxml.jackson.databind.ObjectMapper
import de.vandermeer.asciitable.AsciiTable
import org.slf4j.LoggerFactory
import java.io.File
import java.io.FileInputStream
import java.util.*
class SparseImgParser : IPackable {
override val loopNo: Int
get() = 0
private val log = LoggerFactory.getLogger(SparseImgParser::class.java)
private val simg2imgBin = "simg2img"
private val img2simgBin = "img2simg"
override fun capabilities(): List<String> {
return listOf(
"^(system|system_ext|system_other|system_dlkm)\\.img$",
"^(vendor|vendor_dlkm|product|cache|userdata|super|oem|odm|odm_dlkm)\\.img$"
)
}
override fun unpack(fileName: String) {
clear()
var target = fileName
if (isSparse(fileName)) {
val tempFile = UUID.randomUUID().toString()
outerFsType = "sparse"
val rawFile = "$workDir${File(fileName).nameWithoutExtension}"
simg2img(fileName, tempFile)
target = if (isExt4(tempFile)) {
innerFsType = "ext4"
"$rawFile.ext4"
} else if (isErofs(tempFile)) {
innerFsType = "erofs"
"$rawFile.erofs"
} else {
"$rawFile.raw"
}
File(tempFile).renameTo(File(target))
} else if (isExt4(fileName)) {
outerFsType = "ext4"
innerFsType = "ext4"
} else if (isErofs(fileName)) {
outerFsType = "erofs"
innerFsType = "erofs"
}
when (innerFsType) {
"ext4" -> {
extractExt4(target)
}
"erofs" -> {
extraceErofs(target)
}
else -> {
log.warn("unsuported image type: $innerFsType")
}
}
File("${workDir}mount").mkdir()
printSummary(fileName)
}
override fun pack(fileName: String) {
TODO("not implemented")
}
// invoked solely by reflection
fun `@footer`(fileName: String) {
FileInputStream(fileName).use { fis ->
fis.skip(File(fileName).length() - Footer.SIZE)
try {
val footer = Footer(fis)
log.info("\n" + ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(footer))
} catch (e: IllegalArgumentException) {
log.info("image $fileName has no AVB Footer")
}
}
}
override fun `@verify`(fileName: String) {
super.`@verify`(fileName)
}
private fun simg2img(sparseIn: String, flatOut: String) {
log.info("parsing Android sparse image $sparseIn ...")
"$simg2imgBin $sparseIn $flatOut".check_call()
log.info("parsed Android sparse image $sparseIn -> $flatOut")
}
private fun img2simg(flatIn: String, sparseOut: String) {
log.info("transforming image to Android sparse format: $flatIn ...")
"$img2simgBin $flatIn $sparseOut".check_call()
log.info("transformed Android sparse image: $flatIn -> $sparseOut")
}
override fun flash(fileName: String, deviceName: String) {
TODO("not implemented")
}
fun clear(fileName: String) {
super.clear()
File(fileName).deleleIfExists()
}
private fun isSparse(fileName: String): Boolean {
val magic = Helper.Companion.readFully(fileName, 0, 4)
return Struct(">I").pack(SPARSE_MAGIC).contentEquals(magic)
}
private fun isExt4(fileName: String): Boolean {
val superBlock = Helper.readFully(fileName, 1024, 64)
val magic = byteArrayOf(superBlock[0x38], superBlock[0x39])
return Struct(">h").pack(0x53ef).contentEquals(magic)
}
// https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/magic.h#L23
private fun isErofs(fileName: String): Boolean {
val magic = Helper.readFully(fileName, 1024, 4)
return Struct(">I").pack(0xe2e1f5e0).contentEquals(magic)
}
private fun extractExt4(fileName: String) {
if (EnvironmentVerifier().has7z) {
val stem = File(fileName).nameWithoutExtension
val outStr = "7z x $fileName -y -o$workDir$stem".check_output()
File("$workDir/$stem.log").writeText(outStr)
} else {
log.warn("Please install 7z for ext4 extraction")
}
}
private fun extraceErofs(fileName: String) {
log.info("sudo mount $fileName -o loop -t erofs ${workDir}mount")
}
private fun printSummary(fileName: String) {
val stem = File(fileName).nameWithoutExtension
val tail = AsciiTable().apply {
addRule()
addRow("To view erofs contents:")
}
val tab = AsciiTable().apply {
addRule()
addRow("What", "Where")
addRule()
addRow("image ($outerFsType)", fileName)
("$workDir$stem.ext4").let { ext4 ->
if (File(ext4).exists()) {
addRule()
addRow("converted image (ext4)", ext4)
}
}
("$workDir$stem.erofs").let {
if (File(it).exists()) {
addRule()
addRow("converted image (erofs)", it)
tail.addRule()
tail.addRow("sudo mount $it -o loop -t erofs ${workDir}mount")
tail.addRule()
} else if (innerFsType == "erofs") {
tail.addRule()
tail.addRow("sudo mount $fileName -o loop -t erofs ${workDir}mount")
tail.addRule()
}
}
("$workDir$stem").let {
if (File(it).exists()) {
addRule()
if (File(it).isFile) {
addRow("converted image (raw)", it)
} else {
addRow("extracted content", it)
}
}
}
("$workDir$stem.log").let {
if (File(it).exists()) {
addRule()
addRow("log", it)
}
}
if (innerFsType == "erofs") {
addRule()
addRow("mount point", "${workDir}mount")
}
addRule()
}
log.info("\n" + tab.render() + "\n" + if (innerFsType == "erofs") tail.render() else "")
}
companion object {
private val SPARSE_MAGIC: UInt = 0x3aff26edu
private val workDir = Helper.prop("workDir")
private var outerFsType = "raw"
private var innerFsType = "raw"
}
}

@ -14,7 +14,7 @@
package init
import miscimg.MiscImage
import rom.misc.MiscImage
import org.junit.Test
import org.junit.After
import java.util.*

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

@ -0,0 +1,14 @@
@startuml
'https://plantuml.com/activity-diagram-beta
start
#lightblue:directory \nsystem_dlkm/;
-> mkfs.erofs;
#lightgreen :system_dlkm.img;
:calculate image size|
-> avbtool;
#lightgreen:(avb enabled)\nsystem_dlkm.img;
stop
@enduml

@ -185,6 +185,11 @@ def main():
verifySingleDir(resDir2, "issue_123_dtb")
# Issue 125: cmdline trim
verifySingleDir(resDir3, "issue_125_trim_cmdline")
# Issue 133: repack ext4
if platform.system() == "Linux":
verifySingleDir(resDir3, "issue_133_repack_ext4")
else:
log.info("sparse image is not supported on OS other than Linux, skip testing")
log.info(successLogo)

@ -0,0 +1,4 @@
package cfig.lazybox
class CompileCommand {
}

@ -1 +1 @@
Subproject commit 1cd001925f550d82c5dc1b8b6f69611bf4c6490b
Subproject commit 631af3d1c11343c0a2df1d044cabd44e78fdf865
Loading…
Cancel
Save