4 * Code for reading blobs and resources, including compressed WIM resources.
8 * Copyright (C) 2012, 2013, 2015 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see https://www.gnu.org/licenses/.
32 #include "wimlib/alloca.h"
33 #include "wimlib/assert.h"
34 #include "wimlib/bitops.h"
35 #include "wimlib/blob_table.h"
36 #include "wimlib/endianness.h"
37 #include "wimlib/error.h"
38 #include "wimlib/file_io.h"
39 #include "wimlib/ntfs_3g.h"
40 #include "wimlib/resource.h"
41 #include "wimlib/sha1.h"
42 #include "wimlib/wim.h"
43 #include "wimlib/win32.h"
46 * Compressed WIM resources
48 * A compressed resource in a WIM consists of a sequence of chunks. Each chunk
49 * decompresses to the same size except possibly for the last, which
50 * decompresses to the remaining size. Chunks that did not compress to less
51 * than their original size are stored uncompressed.
53 * We support three variations on this resource format, independently of the
54 * compression type and chunk size which can vary as well:
56 * - Original resource format: immediately before the compressed chunks, the
57 * "chunk table" provides the offset, in bytes relative to the end of the
58 * chunk table, of the start of each compressed chunk, except for the first
59 * chunk which is omitted as it always has an offset of 0. Chunk table
60 * entries are 32-bit for resources < 4 GiB uncompressed and 64-bit for
61 * resources >= 4 GiB uncompressed.
63 * - Solid resource format (distinguished by the use of WIM_RESHDR_FLAG_SOLID
64 * instead of WIM_RESHDR_FLAG_COMPRESSED): similar to the original format, but
65 * the resource begins with a 16-byte header which specifies the uncompressed
66 * size of the resource, the compression type, and the chunk size. (In the
67 * original format, these values were instead determined from outside the
68 * resource itself, from the blob table and the WIM file header.) In addition,
69 * in this format the entries in the chunk table contain compressed chunk
70 * sizes rather than offsets. As a consequence of this, the chunk table
71 * entries are always 32-bit and there is an entry for chunk 0.
73 * - Pipable resource format (wimlib extension; all resources in a pipable WIM
74 * have this format): similar to the original format, but the chunk table is
75 * at the end of the resource rather than the beginning, and each compressed
76 * chunk is prefixed with its compressed size as a 32-bit integer. This
77 * format allows a resource to be written without rewinding.
87 decompress_chunk(const void *cbuf, u32 chunk_csize, u8 *ubuf, u32 chunk_usize,
88 struct wimlib_decompressor *decompressor, bool recover_data)
90 int res = wimlib_decompress(cbuf, chunk_csize, ubuf, chunk_usize,
96 WARNING("Failed to decompress data! Continuing anyway since data recovery mode is enabled.");
98 /* Continue on with *something*. In the worst case just use a
99 * zeroed buffer. But, try to fill as much of it with
100 * decompressed data as we can. This works because if the
101 * corruption isn't located right at the beginning of the
102 * compressed chunk, wimlib_decompress() may write some correct
103 * output at the beginning even if it fails later. */
104 memset(ubuf, 0, chunk_usize);
105 (void)wimlib_decompress(cbuf, chunk_csize, ubuf,
106 chunk_usize, decompressor);
109 ERROR("Failed to decompress data!");
111 return WIMLIB_ERR_DECOMPRESSION;
115 * Read data from a compressed WIM resource.
118 * Description of the compressed WIM resource to read from.
120 * Nonoverlapping, nonempty ranges of the uncompressed resource data to
121 * read, sorted by increasing offset.
123 * Number of ranges in @ranges; must be at least 1.
125 * Structure which provides the consume_chunk callback into which to feed
126 * the data being read. Each call provides the next chunk of the requested
127 * data, uncompressed. Each chunk will be nonempty and will not cross
128 * range boundaries but otherwise will be of unspecified size.
130 * If a chunk can't be fully decompressed due to being corrupted, continue
131 * with whatever data can be recovered rather than return an error.
133 * Possible return values:
135 * WIMLIB_ERR_SUCCESS (0)
136 * WIMLIB_ERR_READ (errno set)
137 * WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to EINVAL)
138 * WIMLIB_ERR_NOMEM (errno set to ENOMEM)
139 * WIMLIB_ERR_DECOMPRESSION (errno set to EINVAL)
140 * WIMLIB_ERR_INVALID_CHUNK_SIZE (errno set to EINVAL)
142 * or other error code returned by the callback function.
145 read_compressed_wim_resource(const struct wim_resource_descriptor * const rdesc,
146 const struct data_range * const ranges,
147 const size_t num_ranges,
148 const struct consume_chunk_callback *cb,
152 u64 *chunk_offsets = NULL;
155 bool chunk_offsets_malloced = false;
156 bool ubuf_malloced = false;
157 bool cbuf_malloced = false;
158 struct wimlib_decompressor *decompressor = NULL;
161 wimlib_assert(num_ranges != 0);
162 for (size_t i = 0; i < num_ranges; i++) {
163 wimlib_assert(ranges[i].offset + ranges[i].size > ranges[i].offset &&
164 ranges[i].offset + ranges[i].size <= rdesc->uncompressed_size);
166 for (size_t i = 0; i < num_ranges - 1; i++)
167 wimlib_assert(ranges[i].offset + ranges[i].size <= ranges[i + 1].offset);
169 /* Get the offsets of the first and last bytes of the read. */
170 const u64 first_offset = ranges[0].offset;
171 const u64 last_offset = ranges[num_ranges - 1].offset + ranges[num_ranges - 1].size - 1;
173 /* Get the file descriptor for the WIM. */
174 struct filedes * const in_fd = &rdesc->wim->in_fd;
176 /* Determine if we're reading a pipable resource from a pipe or not. */
177 const bool is_pipe_read = (rdesc->is_pipable && !filedes_is_seekable(in_fd));
179 /* Determine if the chunk table is in an alternate format. */
180 const bool alt_chunk_table = (rdesc->flags & WIM_RESHDR_FLAG_SOLID)
183 /* Get the maximum size of uncompressed chunks in this resource, which
184 * we require be a power of 2. */
185 u64 cur_read_offset = rdesc->offset_in_wim;
186 int ctype = rdesc->compression_type;
187 u32 chunk_size = rdesc->chunk_size;
188 if (alt_chunk_table) {
189 /* Alternate chunk table format. Its header specifies the chunk
190 * size and compression format. Note: it could be read here;
191 * however, the relevant data was already loaded into @rdesc by
192 * read_blob_table(). */
193 cur_read_offset += sizeof(struct alt_chunk_table_header_disk);
196 if (unlikely(!is_power_of_2(chunk_size))) {
197 ERROR("Invalid compressed resource: "
198 "expected power-of-2 chunk size (got %"PRIu32")",
200 ret = WIMLIB_ERR_INVALID_CHUNK_SIZE;
205 /* Get valid decompressor. */
206 if (likely(ctype == rdesc->wim->decompressor_ctype &&
207 chunk_size == rdesc->wim->decompressor_max_block_size))
209 /* Cached decompressor. */
210 decompressor = rdesc->wim->decompressor;
211 rdesc->wim->decompressor_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
212 rdesc->wim->decompressor = NULL;
214 ret = wimlib_create_decompressor(ctype, chunk_size,
217 if (ret != WIMLIB_ERR_NOMEM)
223 const u32 chunk_order = bsr32(chunk_size);
225 /* Calculate the total number of chunks the resource is divided into. */
226 const u64 num_chunks = (rdesc->uncompressed_size + chunk_size - 1) >> chunk_order;
228 /* Calculate the 0-based indices of the first and last chunks containing
229 * data that needs to be passed to the callback. */
230 const u64 first_needed_chunk = first_offset >> chunk_order;
231 const u64 last_needed_chunk = last_offset >> chunk_order;
233 /* Calculate the 0-based index of the first chunk that actually needs to
234 * be read. This is normally first_needed_chunk, but for pipe reads we
235 * must always start from the 0th chunk. */
236 const u64 read_start_chunk = (is_pipe_read ? 0 : first_needed_chunk);
238 /* Calculate the number of chunk offsets that are needed for the chunks
240 const u64 num_needed_chunk_offsets =
241 last_needed_chunk - read_start_chunk + 1 +
242 (last_needed_chunk < num_chunks - 1);
244 /* Calculate the number of entries in the chunk table. Normally, it's
245 * one less than the number of chunks, since the first chunk has no
246 * entry. But in the alternate chunk table format, the chunk entries
247 * contain chunk sizes, not offsets, and there is one per chunk. */
248 const u64 num_chunk_entries = (alt_chunk_table ? num_chunks : num_chunks - 1);
250 /* Set the size of each chunk table entry based on the resource's
251 * uncompressed size. */
252 const u64 chunk_entry_size = get_chunk_entry_size(rdesc->uncompressed_size,
255 /* Calculate the size of the chunk table in bytes. */
256 const u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
258 /* Calculate the size of the chunk table in bytes, including the header
259 * in the case of the alternate chunk table format. */
260 const u64 chunk_table_full_size =
261 (alt_chunk_table) ? chunk_table_size + sizeof(struct alt_chunk_table_header_disk)
265 /* Read the needed chunk table entries into memory and use them
266 * to initialize the chunk_offsets array. */
268 u64 first_chunk_entry_to_read;
269 u64 num_chunk_entries_to_read;
271 if (alt_chunk_table) {
272 /* The alternate chunk table contains chunk sizes, not
273 * offsets, so we always must read all preceding entries
274 * in order to determine offsets. */
275 first_chunk_entry_to_read = 0;
276 num_chunk_entries_to_read = last_needed_chunk + 1;
279 num_chunk_entries_to_read = last_needed_chunk - read_start_chunk + 1;
281 /* The first chunk has no explicit chunk table entry. */
282 if (read_start_chunk == 0) {
283 num_chunk_entries_to_read--;
284 first_chunk_entry_to_read = 0;
286 first_chunk_entry_to_read = read_start_chunk - 1;
289 /* Unless we're reading the final chunk of the resource,
290 * we need the offset of the chunk following the last
291 * needed chunk so that the compressed size of the last
292 * needed chunk can be computed. */
293 if (last_needed_chunk < num_chunks - 1)
294 num_chunk_entries_to_read++;
297 const u64 chunk_offsets_alloc_size =
298 max(num_chunk_entries_to_read,
299 num_needed_chunk_offsets) * sizeof(chunk_offsets[0]);
301 if (unlikely((size_t)chunk_offsets_alloc_size != chunk_offsets_alloc_size)) {
306 if (likely(chunk_offsets_alloc_size <= STACK_MAX)) {
307 chunk_offsets = alloca(chunk_offsets_alloc_size);
309 chunk_offsets = MALLOC(chunk_offsets_alloc_size);
310 if (unlikely(!chunk_offsets))
312 chunk_offsets_malloced = true;
315 const size_t chunk_table_size_to_read =
316 num_chunk_entries_to_read * chunk_entry_size;
318 const u64 file_offset_of_needed_chunk_entries =
320 + (first_chunk_entry_to_read * chunk_entry_size)
321 + (rdesc->is_pipable ? (rdesc->size_in_wim - chunk_table_size) : 0);
323 void * const chunk_table_data =
325 chunk_offsets_alloc_size -
326 chunk_table_size_to_read;
328 ret = full_pread(in_fd, chunk_table_data, chunk_table_size_to_read,
329 file_offset_of_needed_chunk_entries);
333 /* Now fill in chunk_offsets from the entries we have read in
334 * chunk_tab_data. We break aliasing rules here to avoid having
335 * to allocate yet another array. */
336 typedef le64 __attribute__((may_alias)) aliased_le64_t;
337 typedef le32 __attribute__((may_alias)) aliased_le32_t;
338 u64 * chunk_offsets_p = chunk_offsets;
340 if (alt_chunk_table) {
342 aliased_le32_t *raw_entries = chunk_table_data;
344 for (size_t i = 0; i < num_chunk_entries_to_read; i++) {
345 u32 entry = le32_to_cpu(raw_entries[i]);
346 if (i >= read_start_chunk)
347 *chunk_offsets_p++ = cur_offset;
350 if (last_needed_chunk < num_chunks - 1)
351 *chunk_offsets_p = cur_offset;
353 if (read_start_chunk == 0)
354 *chunk_offsets_p++ = 0;
356 if (chunk_entry_size == 4) {
357 aliased_le32_t *raw_entries = chunk_table_data;
358 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
359 *chunk_offsets_p++ = le32_to_cpu(raw_entries[i]);
361 aliased_le64_t *raw_entries = chunk_table_data;
362 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
363 *chunk_offsets_p++ = le64_to_cpu(raw_entries[i]);
367 /* Set offset to beginning of first chunk to read. */
368 cur_read_offset += chunk_offsets[0];
369 if (rdesc->is_pipable)
370 cur_read_offset += read_start_chunk * sizeof(struct pwm_chunk_hdr);
372 cur_read_offset += chunk_table_size;
375 /* Allocate buffer for holding the uncompressed data of each chunk. */
376 if (chunk_size <= STACK_MAX) {
377 ubuf = alloca(chunk_size);
379 ubuf = MALLOC(chunk_size);
382 ubuf_malloced = true;
385 /* Allocate a temporary buffer for reading compressed chunks, each of
386 * which can be at most @chunk_size - 1 bytes. This excludes compressed
387 * chunks that are a full @chunk_size bytes, which are actually stored
389 if (chunk_size - 1 <= STACK_MAX) {
390 cbuf = alloca(chunk_size - 1);
392 cbuf = MALLOC(chunk_size - 1);
395 cbuf_malloced = true;
398 /* Set current data range. */
399 const struct data_range *cur_range = ranges;
400 const struct data_range * const end_range = &ranges[num_ranges];
401 u64 cur_range_pos = cur_range->offset;
402 u64 cur_range_end = cur_range->offset + cur_range->size;
404 /* Read and process each needed chunk. */
405 for (u64 i = read_start_chunk; i <= last_needed_chunk; i++) {
407 /* Calculate uncompressed size of next chunk. */
409 if ((i == num_chunks - 1) && (rdesc->uncompressed_size & (chunk_size - 1)))
410 chunk_usize = (rdesc->uncompressed_size & (chunk_size - 1));
412 chunk_usize = chunk_size;
414 /* Calculate compressed size of next chunk. */
417 struct pwm_chunk_hdr chunk_hdr;
419 ret = full_pread(in_fd, &chunk_hdr,
420 sizeof(chunk_hdr), cur_read_offset);
423 chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
425 if (i == num_chunks - 1) {
426 chunk_csize = rdesc->size_in_wim -
427 chunk_table_full_size -
428 chunk_offsets[i - read_start_chunk];
429 if (rdesc->is_pipable)
430 chunk_csize -= num_chunks * sizeof(struct pwm_chunk_hdr);
432 chunk_csize = chunk_offsets[i + 1 - read_start_chunk] -
433 chunk_offsets[i - read_start_chunk];
436 if (unlikely(chunk_csize == 0 || chunk_csize > chunk_usize)) {
437 ERROR("Invalid chunk size in compressed resource!");
439 ret = WIMLIB_ERR_DECOMPRESSION;
442 if (rdesc->is_pipable)
443 cur_read_offset += sizeof(struct pwm_chunk_hdr);
445 /* Offsets in the uncompressed resource at which this chunk
446 * starts and ends. */
447 const u64 chunk_start_offset = i << chunk_order;
448 const u64 chunk_end_offset = chunk_start_offset + chunk_usize;
450 if (chunk_end_offset <= cur_range_pos) {
452 /* The next range does not require data in this chunk,
454 cur_read_offset += chunk_csize;
458 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
464 /* Read the chunk and feed data to the callback
468 if (chunk_csize == chunk_usize)
473 ret = full_pread(in_fd,
480 if (read_buf == cbuf) {
481 ret = decompress_chunk(cbuf, chunk_csize,
488 cur_read_offset += chunk_csize;
490 /* At least one range requires data in this chunk. */
492 size_t start, end, size;
494 /* Calculate how many bytes of data should be
495 * sent to the callback function, taking into
496 * account that data sent to the callback
497 * function must not overlap range boundaries.
499 start = cur_range_pos - chunk_start_offset;
500 end = min(cur_range_end, chunk_end_offset) - chunk_start_offset;
503 ret = consume_chunk(cb, &ubuf[start], size);
507 cur_range_pos += size;
508 if (cur_range_pos == cur_range_end) {
509 /* Advance to next range. */
510 if (++cur_range == end_range) {
511 cur_range_pos = ~0ULL;
513 cur_range_pos = cur_range->offset;
514 cur_range_end = cur_range->offset + cur_range->size;
517 } while (cur_range_pos < chunk_end_offset);
522 last_offset == rdesc->uncompressed_size - 1 &&
526 /* If reading a pipable resource from a pipe and the full data
527 * was requested, skip the chunk table at the end so that the
528 * file descriptor is fully clear of the resource after this
530 cur_read_offset += chunk_table_size;
531 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
539 wimlib_free_decompressor(rdesc->wim->decompressor);
540 rdesc->wim->decompressor = decompressor;
541 rdesc->wim->decompressor_ctype = ctype;
542 rdesc->wim->decompressor_max_block_size = chunk_size;
544 if (chunk_offsets_malloced)
553 ERROR("Out of memory while reading compressed WIM resource");
554 ret = WIMLIB_ERR_NOMEM;
558 ERROR_WITH_ERRNO("Error reading data from WIM file");
562 /* Read raw data from a file descriptor at the specified offset, feeding the
563 * data in nonempty chunks into the specified callback function. */
565 read_raw_file_data(struct filedes *in_fd, u64 offset, u64 size,
566 const struct consume_chunk_callback *cb,
567 const tchar *filename)
570 size_t bytes_to_read;
574 bytes_to_read = min(sizeof(buf), size);
575 ret = full_pread(in_fd, buf, bytes_to_read, offset);
578 ret = consume_chunk(cb, buf, bytes_to_read);
581 size -= bytes_to_read;
582 offset += bytes_to_read;
588 ERROR_WITH_ERRNO("Error reading data from WIM file");
589 } else if (ret == WIMLIB_ERR_UNEXPECTED_END_OF_FILE) {
590 ERROR("\"%"TS"\": File was concurrently truncated", filename);
591 ret = WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
593 ERROR_WITH_ERRNO("\"%"TS"\": Error reading data", filename);
598 /* A consume_chunk implementation which simply concatenates all chunks into an
599 * in-memory buffer. */
601 bufferer_cb(const void *chunk, size_t size, void *_ctx)
605 *buf_p = mempcpy(*buf_p, chunk, size);
610 * Read @size bytes at @offset in the WIM resource described by @rdesc and feed
611 * the data into the @cb callback function.
613 * @offset and @size are assumed to have already been validated against the
614 * resource's uncompressed size.
616 * Returns 0 on success; or the first nonzero value returned by the callback
617 * function; or a nonzero wimlib error code with errno set as well.
620 read_partial_wim_resource(const struct wim_resource_descriptor *rdesc,
621 const u64 offset, const u64 size,
622 const struct consume_chunk_callback *cb,
625 if (rdesc->flags & (WIM_RESHDR_FLAG_COMPRESSED |
626 WIM_RESHDR_FLAG_SOLID))
628 /* Compressed resource */
631 struct data_range range = {
635 return read_compressed_wim_resource(rdesc, &range, 1, cb,
639 /* Uncompressed resource */
640 return read_raw_file_data(&rdesc->wim->in_fd,
641 rdesc->offset_in_wim + offset,
645 /* Read the specified range of uncompressed data from the specified blob, which
646 * must be located in a WIM file, into the specified buffer. */
648 read_partial_wim_blob_into_buf(const struct blob_descriptor *blob,
649 u64 offset, size_t size, void *buf)
651 struct consume_chunk_callback cb = {
655 return read_partial_wim_resource(blob->rdesc,
656 blob->offset_in_res + offset,
662 noop_cb(const void *chunk, size_t size, void *_ctx)
667 /* Skip over the data of the specified WIM resource. */
669 skip_wim_resource(const struct wim_resource_descriptor *rdesc)
671 static const struct consume_chunk_callback cb = {
674 return read_partial_wim_resource(rdesc, 0,
675 rdesc->uncompressed_size, &cb, false);
679 read_wim_blob_prefix(const struct blob_descriptor *blob, u64 size,
680 const struct consume_chunk_callback *cb, bool recover_data)
682 return read_partial_wim_resource(blob->rdesc, blob->offset_in_res,
683 size, cb, recover_data);
686 /* This function handles reading blob data that is located in an external file,
687 * such as a file that has been added to the WIM image through execution of a
688 * wimlib_add_command.
690 * This assumes the file can be accessed using the standard POSIX open(),
691 * read(), and close(). On Windows this will not necessarily be the case (since
692 * the file may need FILE_FLAG_BACKUP_SEMANTICS to be opened, or the file may be
693 * encrypted), so Windows uses its own code for its equivalent case. */
695 read_file_on_disk_prefix(const struct blob_descriptor *blob, u64 size,
696 const struct consume_chunk_callback *cb,
703 raw_fd = topen(blob->file_on_disk, O_BINARY | O_RDONLY);
704 if (unlikely(raw_fd < 0)) {
705 ERROR_WITH_ERRNO("Can't open \"%"TS"\"", blob->file_on_disk);
706 return WIMLIB_ERR_OPEN;
708 filedes_init(&fd, raw_fd);
709 ret = read_raw_file_data(&fd, 0, size, cb, blob->file_on_disk);
716 read_staging_file_prefix(const struct blob_descriptor *blob, u64 size,
717 const struct consume_chunk_callback *cb,
724 raw_fd = openat(blob->staging_dir_fd, blob->staging_file_name,
725 O_RDONLY | O_NOFOLLOW);
726 if (unlikely(raw_fd < 0)) {
727 ERROR_WITH_ERRNO("Can't open staging file \"%s\"",
728 blob->staging_file_name);
729 return WIMLIB_ERR_OPEN;
731 filedes_init(&fd, raw_fd);
732 ret = read_raw_file_data(&fd, 0, size, cb, blob->staging_file_name);
738 /* This function handles the trivial case of reading blob data that is, in fact,
739 * already located in an in-memory buffer. */
741 read_buffer_prefix(const struct blob_descriptor *blob,
742 u64 size, const struct consume_chunk_callback *cb,
747 return consume_chunk(cb, blob->attached_buffer, size);
750 typedef int (*read_blob_prefix_handler_t)(const struct blob_descriptor *blob,
752 const struct consume_chunk_callback *cb,
756 * Read the first @size bytes from a generic "blob", which may be located in any
757 * one of several locations, such as in a WIM resource (possibly compressed), in
758 * an external file, or directly in an in-memory buffer. The blob data will be
759 * fed to @cb in chunks that are nonempty but otherwise are of unspecified size.
761 * Returns 0 on success; nonzero on error. A nonzero value will be returned if
762 * the blob data cannot be successfully read (for a number of different reasons,
763 * depending on the blob location), or if @cb returned nonzero in which case
764 * that error code will be returned. If @recover_data is true, then errors
765 * decompressing chunks in WIM resources will be ignored.
768 read_blob_prefix(const struct blob_descriptor *blob, u64 size,
769 const struct consume_chunk_callback *cb, bool recover_data)
771 static const read_blob_prefix_handler_t handlers[] = {
772 [BLOB_IN_WIM] = read_wim_blob_prefix,
773 [BLOB_IN_FILE_ON_DISK] = read_file_on_disk_prefix,
774 [BLOB_IN_ATTACHED_BUFFER] = read_buffer_prefix,
776 [BLOB_IN_STAGING_FILE] = read_staging_file_prefix,
779 [BLOB_IN_NTFS_VOLUME] = read_ntfs_attribute_prefix,
782 [BLOB_IN_WINDOWS_FILE] = read_windows_file_prefix,
785 wimlib_assert(blob->blob_location < ARRAY_LEN(handlers)
786 && handlers[blob->blob_location] != NULL);
787 wimlib_assert(size <= blob->size);
788 return handlers[blob->blob_location](blob, size, cb, recover_data);
791 struct blob_chunk_ctx {
792 const struct blob_descriptor *blob;
793 const struct read_blob_callbacks *cbs;
798 consume_blob_chunk(const void *chunk, size_t size, void *_ctx)
800 struct blob_chunk_ctx *ctx = _ctx;
803 ret = call_continue_blob(ctx->blob, ctx->offset, chunk, size, ctx->cbs);
808 /* Read the full data of the specified blob, passing the data into the specified
809 * callbacks (all of which are optional). */
811 read_blob_with_cbs(struct blob_descriptor *blob,
812 const struct read_blob_callbacks *cbs, bool recover_data)
815 struct blob_chunk_ctx ctx = {
820 struct consume_chunk_callback cb = {
821 .func = consume_blob_chunk,
825 ret = call_begin_blob(blob, cbs);
829 ret = read_blob_prefix(blob, blob->size, &cb, recover_data);
831 return call_end_blob(blob, ret, cbs);
834 /* Read the full uncompressed data of the specified blob into the specified
835 * buffer, which must have space for at least blob->size bytes. The SHA-1
836 * message digest is *not* checked. */
838 read_blob_into_buf(const struct blob_descriptor *blob, void *buf)
840 struct consume_chunk_callback cb = {
844 return read_blob_prefix(blob, blob->size, &cb, false);
847 /* Retrieve the full uncompressed data of the specified blob. A buffer large
848 * enough hold the data is allocated and returned in @buf_ret. The SHA-1
849 * message digest is *not* checked. */
851 read_blob_into_alloc_buf(const struct blob_descriptor *blob, void **buf_ret)
856 if (unlikely((size_t)blob->size != blob->size)) {
857 ERROR("Can't read %"PRIu64" byte blob into memory", blob->size);
858 return WIMLIB_ERR_NOMEM;
861 buf = MALLOC(blob->size);
863 return WIMLIB_ERR_NOMEM;
865 ret = read_blob_into_buf(blob, buf);
875 /* Retrieve the full uncompressed data of a WIM resource specified as a raw
876 * `wim_reshdr' and the corresponding WIM file. A buffer large enough hold the
877 * data is allocated and returned in @buf_ret. */
879 wim_reshdr_to_data(const struct wim_reshdr *reshdr, WIMStruct *wim,
882 struct wim_resource_descriptor rdesc;
883 struct blob_descriptor blob;
885 wim_reshdr_to_desc_and_blob(reshdr, wim, &rdesc, &blob);
887 return read_blob_into_alloc_buf(&blob, buf_ret);
890 /* Calculate the SHA-1 message digest of the uncompressed data of the specified
893 wim_reshdr_to_hash(const struct wim_reshdr *reshdr, WIMStruct *wim,
894 u8 hash[SHA1_HASH_SIZE])
896 struct wim_resource_descriptor rdesc;
897 struct blob_descriptor blob;
900 wim_reshdr_to_desc_and_blob(reshdr, wim, &rdesc, &blob);
903 ret = sha1_blob(&blob);
907 copy_hash(hash, blob.hash);
911 struct blobifier_context {
912 struct read_blob_callbacks cbs;
913 struct blob_descriptor *cur_blob;
914 struct blob_descriptor *next_blob;
916 struct blob_descriptor *final_blob;
917 size_t list_head_offset;
920 static struct blob_descriptor *
921 next_blob(struct blob_descriptor *blob, size_t list_head_offset)
923 struct list_head *cur;
925 cur = (struct list_head*)((u8*)blob + list_head_offset);
927 return (struct blob_descriptor*)((u8*)cur->next - list_head_offset);
931 * A consume_chunk implementation that translates raw resource data into blobs,
932 * calling the begin_blob, continue_blob, and end_blob callbacks as appropriate.
935 blobifier_cb(const void *chunk, size_t size, void *_ctx)
937 struct blobifier_context *ctx = _ctx;
940 wimlib_assert(ctx->cur_blob != NULL);
941 wimlib_assert(size <= ctx->cur_blob->size - ctx->cur_blob_offset);
943 if (ctx->cur_blob_offset == 0) {
944 /* Starting a new blob. */
945 ret = call_begin_blob(ctx->cur_blob, &ctx->cbs);
950 ret = call_continue_blob(ctx->cur_blob, ctx->cur_blob_offset,
951 chunk, size, &ctx->cbs);
952 ctx->cur_blob_offset += size;
956 if (ctx->cur_blob_offset == ctx->cur_blob->size) {
957 /* Finished reading all the data for a blob. */
959 ctx->cur_blob_offset = 0;
961 ret = call_end_blob(ctx->cur_blob, 0, &ctx->cbs);
965 /* Advance to next blob. */
966 ctx->cur_blob = ctx->next_blob;
967 if (ctx->cur_blob != NULL) {
968 if (ctx->cur_blob != ctx->final_blob)
969 ctx->next_blob = next_blob(ctx->cur_blob,
970 ctx->list_head_offset);
972 ctx->next_blob = NULL;
978 struct hasher_context {
979 struct sha1_ctx sha_ctx;
981 struct read_blob_callbacks cbs;
984 /* Callback for starting to read a blob while calculating its SHA-1 message
987 hasher_begin_blob(struct blob_descriptor *blob, void *_ctx)
989 struct hasher_context *ctx = _ctx;
991 sha1_init(&ctx->sha_ctx);
994 return call_begin_blob(blob, &ctx->cbs);
998 * A continue_blob() implementation that continues calculating the SHA-1 message
999 * digest of the blob being read, then optionally passes the data on to another
1000 * continue_blob() implementation. This allows checking the SHA-1 message
1001 * digest of a blob being extracted, for example.
1004 hasher_continue_blob(const struct blob_descriptor *blob, u64 offset,
1005 const void *chunk, size_t size, void *_ctx)
1007 struct hasher_context *ctx = _ctx;
1009 sha1_update(&ctx->sha_ctx, chunk, size);
1011 return call_continue_blob(blob, offset, chunk, size, &ctx->cbs);
1015 report_sha1_mismatch(struct blob_descriptor *blob,
1016 const u8 actual_hash[SHA1_HASH_SIZE], bool recover_data)
1018 tchar expected_hashstr[SHA1_HASH_STRING_LEN];
1019 tchar actual_hashstr[SHA1_HASH_STRING_LEN];
1021 wimlib_assert(blob->blob_location != BLOB_NONEXISTENT);
1022 wimlib_assert(blob->blob_location != BLOB_IN_ATTACHED_BUFFER);
1024 sprint_hash(blob->hash, expected_hashstr);
1025 sprint_hash(actual_hash, actual_hashstr);
1027 blob->corrupted = 1;
1029 if (blob_is_in_file(blob)) {
1030 ERROR("A file was concurrently modified!\n"
1031 " Path: \"%"TS"\"\n"
1032 " Expected SHA-1: %"TS"\n"
1033 " Actual SHA-1: %"TS"\n",
1034 blob_file_path(blob), expected_hashstr, actual_hashstr);
1035 return WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
1036 } else if (blob->blob_location == BLOB_IN_WIM) {
1037 const struct wim_resource_descriptor *rdesc = blob->rdesc;
1039 (recover_data ? wimlib_warning : wimlib_error)(
1040 T("A WIM resource is corrupted!\n"
1041 " WIM file: \"%"TS"\"\n"
1042 " Blob uncompressed size: %"PRIu64"\n"
1043 " Resource offset in WIM: %"PRIu64"\n"
1044 " Resource uncompressed size: %"PRIu64"\n"
1045 " Resource size in WIM: %"PRIu64"\n"
1046 " Resource flags: 0x%x%"TS"\n"
1047 " Resource compression type: %"TS"\n"
1048 " Resource compression chunk size: %"PRIu32"\n"
1049 " Expected SHA-1: %"TS"\n"
1050 " Actual SHA-1: %"TS"\n"),
1051 rdesc->wim->filename,
1053 rdesc->offset_in_wim,
1054 rdesc->uncompressed_size,
1056 (unsigned int)rdesc->flags,
1057 (rdesc->is_pipable ? T(", pipable") : T("")),
1058 wimlib_get_compression_type_string(
1059 rdesc->compression_type),
1061 expected_hashstr, actual_hashstr);
1064 return WIMLIB_ERR_INVALID_RESOURCE_HASH;
1066 ERROR("File data was concurrently modified!\n"
1067 " Location ID: %d\n"
1068 " Expected SHA-1: %"TS"\n"
1069 " Actual SHA-1: %"TS"\n",
1070 (int)blob->blob_location,
1071 expected_hashstr, actual_hashstr);
1072 return WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
1076 /* Callback for finishing reading a blob while calculating its SHA-1 message
1079 hasher_end_blob(struct blob_descriptor *blob, int status, void *_ctx)
1081 struct hasher_context *ctx = _ctx;
1082 u8 hash[SHA1_HASH_SIZE];
1085 if (unlikely(status)) {
1086 /* Error occurred; the full blob may not have been read. */
1091 /* Retrieve the final SHA-1 message digest. */
1092 sha1_final(&ctx->sha_ctx, hash);
1094 /* Set the SHA-1 message digest of the blob, or compare the calculated
1095 * value with stored value. */
1096 if (blob->unhashed) {
1097 if (ctx->flags & COMPUTE_MISSING_BLOB_HASHES)
1098 copy_hash(blob->hash, hash);
1099 } else if ((ctx->flags & VERIFY_BLOB_HASHES) &&
1100 unlikely(!hashes_equal(hash, blob->hash)))
1102 ret = report_sha1_mismatch(blob, hash,
1103 ctx->flags & RECOVER_DATA);
1108 return call_end_blob(blob, ret, &ctx->cbs);
1111 /* Read the full data of the specified blob, passing the data into the specified
1112 * callbacks (all of which are optional) and either checking or computing the
1113 * SHA-1 message digest of the blob. */
1115 read_blob_with_sha1(struct blob_descriptor *blob,
1116 const struct read_blob_callbacks *cbs, bool recover_data)
1118 struct hasher_context hasher_ctx = {
1119 .flags = VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES |
1120 (recover_data ? RECOVER_DATA : 0),
1123 struct read_blob_callbacks hasher_cbs = {
1124 .begin_blob = hasher_begin_blob,
1125 .continue_blob = hasher_continue_blob,
1126 .end_blob = hasher_end_blob,
1129 return read_blob_with_cbs(blob, &hasher_cbs, recover_data);
1133 read_blobs_in_solid_resource(struct blob_descriptor *first_blob,
1134 struct blob_descriptor *last_blob,
1136 size_t list_head_offset,
1137 const struct read_blob_callbacks *sink_cbs,
1140 struct data_range *ranges;
1141 bool ranges_malloced;
1142 struct blob_descriptor *cur_blob;
1145 u64 ranges_alloc_size;
1147 /* Setup data ranges array (one range per blob to read); this way
1148 * read_compressed_wim_resource() does not need to be aware of blobs.
1151 ranges_alloc_size = (u64)blob_count * sizeof(ranges[0]);
1153 if (unlikely((size_t)ranges_alloc_size != ranges_alloc_size))
1156 if (ranges_alloc_size <= STACK_MAX) {
1157 ranges = alloca(ranges_alloc_size);
1158 ranges_malloced = false;
1160 ranges = MALLOC(ranges_alloc_size);
1161 if (unlikely(!ranges))
1163 ranges_malloced = true;
1166 for (i = 0, cur_blob = first_blob;
1168 i++, cur_blob = next_blob(cur_blob, list_head_offset))
1170 ranges[i].offset = cur_blob->offset_in_res;
1171 ranges[i].size = cur_blob->size;
1174 struct blobifier_context blobifier_ctx = {
1176 .cur_blob = first_blob,
1177 .next_blob = next_blob(first_blob, list_head_offset),
1178 .cur_blob_offset = 0,
1179 .final_blob = last_blob,
1180 .list_head_offset = list_head_offset,
1182 struct consume_chunk_callback cb = {
1183 .func = blobifier_cb,
1184 .ctx = &blobifier_ctx,
1187 ret = read_compressed_wim_resource(first_blob->rdesc, ranges,
1188 blob_count, &cb, recover_data);
1190 if (ranges_malloced)
1193 if (unlikely(ret && blobifier_ctx.cur_blob_offset != 0)) {
1194 ret = call_end_blob(blobifier_ctx.cur_blob, ret,
1195 &blobifier_ctx.cbs);
1200 ERROR("Too many blobs in one resource!");
1201 return WIMLIB_ERR_NOMEM;
1205 * Read a list of blobs, each of which may be in any supported location (e.g.
1206 * in a WIM or in an external file). This function optimizes the case where
1207 * multiple blobs are combined into a single solid compressed WIM resource by
1208 * reading the blobs in sequential order, only decompressing the solid resource
1212 * List of blobs to read.
1214 * Offset of the `struct list_head' within each `struct blob_descriptor'
1215 * that makes up the @blob_list.
1217 * Callback functions to accept the blob data.
1219 * Bitwise OR of zero or more of the following flags:
1221 * VERIFY_BLOB_HASHES:
1222 * For all blobs being read that have already had SHA-1 message
1223 * digests computed, calculate the SHA-1 message digest of the read
1224 * data and compare it with the previously computed value. If they
1225 * do not match, return WIMLIB_ERR_INVALID_RESOURCE_HASH (unless
1226 * RECOVER_DATA is also set, in which case just issue a warning).
1228 * COMPUTE_MISSING_BLOB_HASHES
1229 * For all blobs being read that have not yet had their SHA-1
1230 * message digests computed, calculate and save their SHA-1 message
1233 * BLOB_LIST_ALREADY_SORTED
1234 * @blob_list is already sorted in sequential order for reading.
1237 * Don't consider corrupted blob data to be an error.
1239 * The callback functions are allowed to delete the current blob from the list
1242 * Returns 0 on success; a nonzero error code on failure. Failure can occur due
1243 * to an error reading the data or due to an error status being returned by any
1244 * of the callback functions.
1247 read_blob_list(struct list_head *blob_list, size_t list_head_offset,
1248 const struct read_blob_callbacks *cbs, int flags)
1251 struct list_head *cur, *next;
1252 struct blob_descriptor *blob;
1253 struct hasher_context *hasher_ctx;
1254 struct read_blob_callbacks *sink_cbs;
1256 if (!(flags & BLOB_LIST_ALREADY_SORTED)) {
1257 ret = sort_blob_list_by_sequential_order(blob_list,
1263 if (flags & (VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES)) {
1264 hasher_ctx = alloca(sizeof(*hasher_ctx));
1265 *hasher_ctx = (struct hasher_context) {
1269 sink_cbs = alloca(sizeof(*sink_cbs));
1270 *sink_cbs = (struct read_blob_callbacks) {
1271 .begin_blob = hasher_begin_blob,
1272 .continue_blob = hasher_continue_blob,
1273 .end_blob = hasher_end_blob,
1277 sink_cbs = (struct read_blob_callbacks *)cbs;
1280 for (cur = blob_list->next, next = cur->next;
1282 cur = next, next = cur->next)
1284 blob = (struct blob_descriptor*)((u8*)cur - list_head_offset);
1286 if (blob->blob_location == BLOB_IN_WIM &&
1287 blob->size != blob->rdesc->uncompressed_size)
1289 struct blob_descriptor *blob_next, *blob_last;
1290 struct list_head *next2;
1293 /* The next blob is a proper sub-sequence of a WIM
1294 * resource. See if there are other blobs in the same
1295 * resource that need to be read. Since
1296 * sort_blob_list_by_sequential_order() sorted the blobs
1297 * by offset in the WIM, this can be determined by
1298 * simply scanning forward in the list. */
1304 && (blob_next = (struct blob_descriptor*)
1305 ((u8*)next2 - list_head_offset),
1306 blob_next->blob_location == BLOB_IN_WIM
1307 && blob_next->rdesc == blob->rdesc);
1308 next2 = next2->next)
1310 blob_last = blob_next;
1313 if (blob_count > 1) {
1314 /* Reading multiple blobs combined into a single
1315 * WIM resource. They are in the blob list,
1316 * sorted by offset; @blob specifies the first
1317 * blob in the resource that needs to be read
1318 * and @blob_last specifies the last blob in the
1319 * resource that needs to be read. */
1321 ret = read_blobs_in_solid_resource(blob, blob_last,
1325 flags & RECOVER_DATA);
1332 ret = read_blob_with_cbs(blob, sink_cbs, flags & RECOVER_DATA);
1333 if (unlikely(ret && ret != BEGIN_BLOB_STATUS_SKIP_BLOB))
1340 extract_chunk_to_fd(const void *chunk, size_t size, void *_fd)
1342 struct filedes *fd = _fd;
1343 int ret = full_write(fd, chunk, size);
1345 ERROR_WITH_ERRNO("Error writing to file descriptor");
1350 extract_blob_chunk_to_fd(const struct blob_descriptor *blob, u64 offset,
1351 const void *chunk, size_t size, void *_fd)
1353 return extract_chunk_to_fd(chunk, size, _fd);
1356 /* Extract the first @size bytes of the specified blob to the specified file
1357 * descriptor. This does *not* check the SHA-1 message digest. */
1359 extract_blob_prefix_to_fd(struct blob_descriptor *blob, u64 size,
1362 struct consume_chunk_callback cb = {
1363 .func = extract_chunk_to_fd,
1366 return read_blob_prefix(blob, size, &cb, false);
1369 /* Extract the full uncompressed contents of the specified blob to the specified
1370 * file descriptor. This checks the SHA-1 message digest. */
1372 extract_blob_to_fd(struct blob_descriptor *blob, struct filedes *fd,
1375 struct read_blob_callbacks cbs = {
1376 .continue_blob = extract_blob_chunk_to_fd,
1379 return read_blob_with_sha1(blob, &cbs, recover_data);
1382 /* Calculate the SHA-1 message digest of a blob and store it in @blob->hash. */
1384 sha1_blob(struct blob_descriptor *blob)
1386 static const struct read_blob_callbacks cbs = {
1388 return read_blob_with_sha1(blob, &cbs, false);
1392 * Convert a short WIM resource header to a stand-alone WIM resource descriptor.
1394 * Note: for solid resources some fields still need to be overridden.
1397 wim_reshdr_to_desc(const struct wim_reshdr *reshdr, WIMStruct *wim,
1398 struct wim_resource_descriptor *rdesc)
1401 rdesc->offset_in_wim = reshdr->offset_in_wim;
1402 rdesc->size_in_wim = reshdr->size_in_wim;
1403 rdesc->uncompressed_size = reshdr->uncompressed_size;
1404 INIT_LIST_HEAD(&rdesc->blob_list);
1405 rdesc->flags = reshdr->flags;
1406 rdesc->is_pipable = wim_is_pipable(wim);
1407 if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
1408 rdesc->compression_type = wim->compression_type;
1409 rdesc->chunk_size = wim->chunk_size;
1411 rdesc->compression_type = WIMLIB_COMPRESSION_TYPE_NONE;
1412 rdesc->chunk_size = 0;
1417 * Convert the short WIM resource header @reshdr to a stand-alone WIM resource
1418 * descriptor @rdesc, then set @blob to consist of that entire resource. This
1419 * should only be used for non-solid resources!
1422 wim_reshdr_to_desc_and_blob(const struct wim_reshdr *reshdr, WIMStruct *wim,
1423 struct wim_resource_descriptor *rdesc,
1424 struct blob_descriptor *blob)
1426 wim_reshdr_to_desc(reshdr, wim, rdesc);
1427 blob->size = rdesc->uncompressed_size;
1428 blob_set_is_located_in_wim_resource(blob, rdesc, 0);
1431 /* Import a WIM resource header from the on-disk format. */
1433 get_wim_reshdr(const struct wim_reshdr_disk *disk_reshdr,
1434 struct wim_reshdr *reshdr)
1436 reshdr->offset_in_wim = le64_to_cpu(disk_reshdr->offset_in_wim);
1437 reshdr->size_in_wim = (((u64)disk_reshdr->size_in_wim[0] << 0) |
1438 ((u64)disk_reshdr->size_in_wim[1] << 8) |
1439 ((u64)disk_reshdr->size_in_wim[2] << 16) |
1440 ((u64)disk_reshdr->size_in_wim[3] << 24) |
1441 ((u64)disk_reshdr->size_in_wim[4] << 32) |
1442 ((u64)disk_reshdr->size_in_wim[5] << 40) |
1443 ((u64)disk_reshdr->size_in_wim[6] << 48));
1444 reshdr->uncompressed_size = le64_to_cpu(disk_reshdr->uncompressed_size);
1445 reshdr->flags = disk_reshdr->flags;
1448 /* Export a WIM resource header to the on-disk format. */
1450 put_wim_reshdr(const struct wim_reshdr *reshdr,
1451 struct wim_reshdr_disk *disk_reshdr)
1453 disk_reshdr->size_in_wim[0] = reshdr->size_in_wim >> 0;
1454 disk_reshdr->size_in_wim[1] = reshdr->size_in_wim >> 8;
1455 disk_reshdr->size_in_wim[2] = reshdr->size_in_wim >> 16;
1456 disk_reshdr->size_in_wim[3] = reshdr->size_in_wim >> 24;
1457 disk_reshdr->size_in_wim[4] = reshdr->size_in_wim >> 32;
1458 disk_reshdr->size_in_wim[5] = reshdr->size_in_wim >> 40;
1459 disk_reshdr->size_in_wim[6] = reshdr->size_in_wim >> 48;
1460 disk_reshdr->flags = reshdr->flags;
1461 disk_reshdr->offset_in_wim = cpu_to_le64(reshdr->offset_in_wim);
1462 disk_reshdr->uncompressed_size = cpu_to_le64(reshdr->uncompressed_size);