4 * Code for reading blobs and resources, including compressed WIM resources.
8 * Copyright (C) 2012, 2013, 2015 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
32 #include "wimlib/alloca.h"
33 #include "wimlib/assert.h"
34 #include "wimlib/bitops.h"
35 #include "wimlib/blob_table.h"
36 #include "wimlib/endianness.h"
37 #include "wimlib/error.h"
38 #include "wimlib/file_io.h"
39 #include "wimlib/resource.h"
40 #include "wimlib/sha1.h"
41 #include "wimlib/wim.h"
44 /* for read_winnt_stream_prefix(), read_win32_encrypted_file_prefix() */
45 # include "wimlib/win32.h"
49 /* for read_ntfs_attribute_prefix() */
50 # include "wimlib/ntfs_3g.h"
55 * Compressed WIM resources
57 * A compressed resource in a WIM consists of a number of compressed chunks,
58 * each of which decompresses to a fixed chunk size (given in the WIM header;
59 * usually 32768) except possibly the last, which always decompresses to any
60 * remaining bytes. In addition, immediately before the chunks, a table (the
61 * "chunk table") provides the offset, in bytes relative to the end of the chunk
62 * table, of the start of each compressed chunk, except for the first chunk
63 * which is omitted as it always has an offset of 0. Therefore, a compressed
64 * resource with N chunks will have a chunk table with N - 1 entries.
66 * Additional information:
68 * - Entries in the chunk table are 4 bytes each, except if the uncompressed
69 * size of the resource is greater than 4 GiB, in which case the entries in
70 * the chunk table are 8 bytes each. In either case, the entries are unsigned
71 * little-endian integers.
73 * - The chunk table is included in the compressed size of the resource provided
74 * in the corresponding entry in the WIM's blob table.
76 * - The compressed size of a chunk is never greater than the uncompressed size.
77 * From the compressor's point of view, chunks that would have compressed to a
78 * size greater than or equal to their original size are in fact stored
79 * uncompressed. From the decompresser's point of view, chunks with
80 * compressed size equal to their uncompressed size are in fact uncompressed.
82 * Furthermore, wimlib supports its own "pipable" WIM format, and for this the
83 * structure of compressed resources was modified to allow piped reading and
84 * writing. To make sequential writing possible, the chunk table is placed
85 * after the chunks rather than before the chunks, and to make sequential
86 * reading possible, each chunk is prefixed with a 4-byte header giving its
87 * compressed size as a 32-bit, unsigned, little-endian integer. Otherwise the
88 * details are the same.
98 * read_compressed_wim_resource() -
100 * Read data from a compressed WIM resource.
103 * Description of the compressed WIM resource to read from.
105 * Nonoverlapping, nonempty ranges of the uncompressed resource data to
106 * read, sorted by increasing offset.
108 * Number of ranges in @ranges; must be at least 1.
110 * Callback function to feed the data being read. Each call provides the
111 * next chunk of the requested data, uncompressed. Each chunk will be of
112 * nonzero size and will not cross range boundaries, but otherwise will be
113 * of unspecified size.
115 * Parameter to pass to @cb_ctx.
117 * Possible return values:
119 * WIMLIB_ERR_SUCCESS (0)
120 * WIMLIB_ERR_READ (errno set)
121 * WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
122 * WIMLIB_ERR_NOMEM (errno set to ENOMEM)
123 * WIMLIB_ERR_DECOMPRESSION (errno set to EINVAL)
125 * or other error code returned by the @cb function.
128 read_compressed_wim_resource(const struct wim_resource_descriptor * const rdesc,
129 const struct data_range * const ranges,
130 const size_t num_ranges,
131 const consume_data_callback_t cb,
137 u64 *chunk_offsets = NULL;
140 bool chunk_offsets_malloced = false;
141 bool ubuf_malloced = false;
142 bool cbuf_malloced = false;
143 struct wimlib_decompressor *decompressor = NULL;
146 wimlib_assert(rdesc != NULL);
147 wimlib_assert(resource_is_compressed(rdesc));
148 wimlib_assert(cb != NULL);
149 wimlib_assert(num_ranges != 0);
150 for (size_t i = 0; i < num_ranges; i++) {
151 DEBUG("Range %zu/%zu: %"PRIu64"@+%"PRIu64" / %"PRIu64,
152 i + 1, num_ranges, ranges[i].size, ranges[i].offset,
153 rdesc->uncompressed_size);
154 wimlib_assert(ranges[i].size != 0);
155 wimlib_assert(ranges[i].offset + ranges[i].size >= ranges[i].size);
156 wimlib_assert(ranges[i].offset + ranges[i].size <= rdesc->uncompressed_size);
158 for (size_t i = 0; i < num_ranges - 1; i++)
159 wimlib_assert(ranges[i].offset + ranges[i].size <= ranges[i + 1].offset);
161 /* Get the offsets of the first and last bytes of the read. */
162 const u64 first_offset = ranges[0].offset;
163 const u64 last_offset = ranges[num_ranges - 1].offset + ranges[num_ranges - 1].size - 1;
165 /* Get the file descriptor for the WIM. */
166 struct filedes * const in_fd = &rdesc->wim->in_fd;
168 /* Determine if we're reading a pipable resource from a pipe or not. */
169 const bool is_pipe_read = (rdesc->is_pipable && !filedes_is_seekable(in_fd));
171 /* Determine if the chunk table is in an alternate format. */
172 const bool alt_chunk_table = (rdesc->flags & WIM_RESHDR_FLAG_SOLID)
175 /* Get the maximum size of uncompressed chunks in this resource, which
176 * we require be a power of 2. */
177 u64 cur_read_offset = rdesc->offset_in_wim;
178 int ctype = rdesc->compression_type;
179 u32 chunk_size = rdesc->chunk_size;
180 if (alt_chunk_table) {
181 /* Alternate chunk table format. Its header specifies the chunk
182 * size and compression format. Note: it could be read here;
183 * however, the relevant data was already loaded into @rdesc by
184 * read_blob_table(). */
185 cur_read_offset += sizeof(struct alt_chunk_table_header_disk);
188 if (!is_power_of_2(chunk_size)) {
189 ERROR("Invalid compressed resource: "
190 "expected power-of-2 chunk size (got %"PRIu32")",
192 ret = WIMLIB_ERR_INVALID_CHUNK_SIZE;
194 goto out_free_memory;
197 /* Get valid decompressor. */
198 if (ctype == rdesc->wim->decompressor_ctype &&
199 chunk_size == rdesc->wim->decompressor_max_block_size)
201 /* Cached decompressor. */
202 decompressor = rdesc->wim->decompressor;
203 rdesc->wim->decompressor_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
204 rdesc->wim->decompressor = NULL;
206 ret = wimlib_create_decompressor(ctype, chunk_size,
209 if (ret != WIMLIB_ERR_NOMEM)
211 goto out_free_memory;
215 const u32 chunk_order = fls32(chunk_size);
217 /* Calculate the total number of chunks the resource is divided into. */
218 const u64 num_chunks = (rdesc->uncompressed_size + chunk_size - 1) >> chunk_order;
220 /* Calculate the 0-based indices of the first and last chunks containing
221 * data that needs to be passed to the callback. */
222 const u64 first_needed_chunk = first_offset >> chunk_order;
223 const u64 last_needed_chunk = last_offset >> chunk_order;
225 /* Calculate the 0-based index of the first chunk that actually needs to
226 * be read. This is normally first_needed_chunk, but for pipe reads we
227 * must always start from the 0th chunk. */
228 const u64 read_start_chunk = (is_pipe_read ? 0 : first_needed_chunk);
230 /* Calculate the number of chunk offsets that are needed for the chunks
232 const u64 num_needed_chunk_offsets =
233 last_needed_chunk - read_start_chunk + 1 +
234 (last_needed_chunk < num_chunks - 1);
236 /* Calculate the number of entries in the chunk table. Normally, it's
237 * one less than the number of chunks, since the first chunk has no
238 * entry. But in the alternate chunk table format, the chunk entries
239 * contain chunk sizes, not offsets, and there is one per chunk. */
240 const u64 num_chunk_entries = (alt_chunk_table ? num_chunks : num_chunks - 1);
242 /* Set the size of each chunk table entry based on the resource's
243 * uncompressed size. */
244 const u64 chunk_entry_size = get_chunk_entry_size(rdesc->uncompressed_size,
247 /* Calculate the size of the chunk table in bytes. */
248 const u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
250 /* Calculate the size of the chunk table in bytes, including the header
251 * in the case of the alternate chunk table format. */
252 const u64 chunk_table_full_size =
253 (alt_chunk_table) ? chunk_table_size + sizeof(struct alt_chunk_table_header_disk)
257 /* Read the needed chunk table entries into memory and use them
258 * to initialize the chunk_offsets array. */
260 u64 first_chunk_entry_to_read;
261 u64 last_chunk_entry_to_read;
263 if (alt_chunk_table) {
264 /* The alternate chunk table contains chunk sizes, not
265 * offsets, so we always must read all preceding entries
266 * in order to determine offsets. */
267 first_chunk_entry_to_read = 0;
268 last_chunk_entry_to_read = last_needed_chunk;
270 /* Here we must account for the fact that the first
271 * chunk has no explicit chunk table entry. */
273 if (read_start_chunk == 0)
274 first_chunk_entry_to_read = 0;
276 first_chunk_entry_to_read = read_start_chunk - 1;
278 if (last_needed_chunk == 0)
279 last_chunk_entry_to_read = 0;
281 last_chunk_entry_to_read = last_needed_chunk - 1;
283 if (last_needed_chunk < num_chunks - 1)
284 last_chunk_entry_to_read++;
287 const u64 num_chunk_entries_to_read =
288 last_chunk_entry_to_read - first_chunk_entry_to_read + 1;
290 const u64 chunk_offsets_alloc_size =
291 max(num_chunk_entries_to_read,
292 num_needed_chunk_offsets) * sizeof(chunk_offsets[0]);
294 if ((size_t)chunk_offsets_alloc_size != chunk_offsets_alloc_size)
297 if (chunk_offsets_alloc_size <= STACK_MAX) {
298 chunk_offsets = alloca(chunk_offsets_alloc_size);
300 chunk_offsets = MALLOC(chunk_offsets_alloc_size);
301 if (chunk_offsets == NULL)
303 chunk_offsets_malloced = true;
306 const size_t chunk_table_size_to_read =
307 num_chunk_entries_to_read * chunk_entry_size;
309 const u64 file_offset_of_needed_chunk_entries =
311 + (first_chunk_entry_to_read * chunk_entry_size)
312 + (rdesc->is_pipable ? (rdesc->size_in_wim - chunk_table_size) : 0);
314 void * const chunk_table_data =
316 chunk_offsets_alloc_size -
317 chunk_table_size_to_read;
319 ret = full_pread(in_fd, chunk_table_data, chunk_table_size_to_read,
320 file_offset_of_needed_chunk_entries);
324 /* Now fill in chunk_offsets from the entries we have read in
325 * chunk_tab_data. We break aliasing rules here to avoid having
326 * to allocate yet another array. */
327 typedef le64 _may_alias_attribute aliased_le64_t;
328 typedef le32 _may_alias_attribute aliased_le32_t;
329 u64 * chunk_offsets_p = chunk_offsets;
331 if (alt_chunk_table) {
333 aliased_le32_t *raw_entries = chunk_table_data;
335 for (size_t i = 0; i < num_chunk_entries_to_read; i++) {
336 u32 entry = le32_to_cpu(raw_entries[i]);
337 if (i >= read_start_chunk)
338 *chunk_offsets_p++ = cur_offset;
341 if (last_needed_chunk < num_chunks - 1)
342 *chunk_offsets_p = cur_offset;
344 if (read_start_chunk == 0)
345 *chunk_offsets_p++ = 0;
347 if (chunk_entry_size == 4) {
348 aliased_le32_t *raw_entries = chunk_table_data;
349 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
350 *chunk_offsets_p++ = le32_to_cpu(raw_entries[i]);
352 aliased_le64_t *raw_entries = chunk_table_data;
353 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
354 *chunk_offsets_p++ = le64_to_cpu(raw_entries[i]);
358 /* Set offset to beginning of first chunk to read. */
359 cur_read_offset += chunk_offsets[0];
360 if (rdesc->is_pipable)
361 cur_read_offset += read_start_chunk * sizeof(struct pwm_chunk_hdr);
363 cur_read_offset += chunk_table_size;
366 /* Allocate buffer for holding the uncompressed data of each chunk. */
367 if (chunk_size <= STACK_MAX) {
368 ubuf = alloca(chunk_size);
370 ubuf = MALLOC(chunk_size);
373 ubuf_malloced = true;
376 /* Allocate a temporary buffer for reading compressed chunks, each of
377 * which can be at most @chunk_size - 1 bytes. This excludes compressed
378 * chunks that are a full @chunk_size bytes, which are actually stored
380 if (chunk_size - 1 <= STACK_MAX) {
381 cbuf = alloca(chunk_size - 1);
383 cbuf = MALLOC(chunk_size - 1);
386 cbuf_malloced = true;
389 /* Set current data range. */
390 const struct data_range *cur_range = ranges;
391 const struct data_range * const end_range = &ranges[num_ranges];
392 u64 cur_range_pos = cur_range->offset;
393 u64 cur_range_end = cur_range->offset + cur_range->size;
395 /* Read and process each needed chunk. */
396 for (u64 i = read_start_chunk; i <= last_needed_chunk; i++) {
398 /* Calculate uncompressed size of next chunk. */
400 if ((i == num_chunks - 1) && (rdesc->uncompressed_size & (chunk_size - 1)))
401 chunk_usize = (rdesc->uncompressed_size & (chunk_size - 1));
403 chunk_usize = chunk_size;
405 /* Calculate compressed size of next chunk. */
408 struct pwm_chunk_hdr chunk_hdr;
410 ret = full_pread(in_fd, &chunk_hdr,
411 sizeof(chunk_hdr), cur_read_offset);
414 chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
416 if (i == num_chunks - 1) {
417 chunk_csize = rdesc->size_in_wim -
418 chunk_table_full_size -
419 chunk_offsets[i - read_start_chunk];
420 if (rdesc->is_pipable)
421 chunk_csize -= num_chunks * sizeof(struct pwm_chunk_hdr);
423 chunk_csize = chunk_offsets[i + 1 - read_start_chunk] -
424 chunk_offsets[i - read_start_chunk];
427 if (chunk_csize == 0 || chunk_csize > chunk_usize) {
428 ERROR("Invalid chunk size in compressed resource!");
430 ret = WIMLIB_ERR_DECOMPRESSION;
431 goto out_free_memory;
433 if (rdesc->is_pipable)
434 cur_read_offset += sizeof(struct pwm_chunk_hdr);
436 /* Offsets in the uncompressed resource at which this chunk
437 * starts and ends. */
438 const u64 chunk_start_offset = i << chunk_order;
439 const u64 chunk_end_offset = chunk_start_offset + chunk_usize;
441 if (chunk_end_offset <= cur_range_pos) {
443 /* The next range does not require data in this chunk,
445 cur_read_offset += chunk_csize;
449 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
455 /* Read the chunk and feed data to the callback
459 if (chunk_csize == chunk_usize)
464 ret = full_pread(in_fd,
471 if (read_buf == cbuf) {
472 DEBUG("Decompressing chunk %"PRIu64" "
473 "(csize=%"PRIu32" usize=%"PRIu32")",
474 i, chunk_csize, chunk_usize);
475 ret = wimlib_decompress(cbuf,
481 ERROR("Failed to decompress data!");
482 ret = WIMLIB_ERR_DECOMPRESSION;
484 goto out_free_memory;
487 cur_read_offset += chunk_csize;
489 /* At least one range requires data in this chunk. */
491 size_t start, end, size;
493 /* Calculate how many bytes of data should be
494 * sent to the callback function, taking into
495 * account that data sent to the callback
496 * function must not overlap range boundaries.
498 start = cur_range_pos - chunk_start_offset;
499 end = min(cur_range_end, chunk_end_offset) - chunk_start_offset;
502 ret = (*cb)(&ubuf[start], size, cb_ctx);
505 goto out_free_memory;
507 cur_range_pos += size;
508 if (cur_range_pos == cur_range_end) {
509 /* Advance to next range. */
510 if (++cur_range == end_range) {
511 cur_range_pos = ~0ULL;
513 cur_range_pos = cur_range->offset;
514 cur_range_end = cur_range->offset + cur_range->size;
517 } while (cur_range_pos < chunk_end_offset);
522 last_offset == rdesc->uncompressed_size - 1 &&
526 /* If reading a pipable resource from a pipe and the full data
527 * was requested, skip the chunk table at the end so that the
528 * file descriptor is fully clear of the resource after this
530 cur_read_offset += chunk_table_size;
531 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
540 wimlib_free_decompressor(rdesc->wim->decompressor);
541 rdesc->wim->decompressor = decompressor;
542 rdesc->wim->decompressor_ctype = ctype;
543 rdesc->wim->decompressor_max_block_size = chunk_size;
545 if (chunk_offsets_malloced)
555 ERROR("Not enough memory available to read size=%"PRIu64" bytes "
556 "from compressed WIM resource!", last_offset - first_offset + 1);
558 ret = WIMLIB_ERR_NOMEM;
559 goto out_free_memory;
562 ERROR_WITH_ERRNO("Error reading compressed WIM resource!");
563 goto out_free_memory;
567 fill_zeroes(u64 size, consume_data_callback_t cb, void *cb_ctx)
569 if (unlikely(size)) {
570 u8 buf[min(size, BUFFER_SIZE)];
572 memset(buf, 0, sizeof(buf));
578 len = min(size, BUFFER_SIZE);
579 ret = cb(buf, len, cb_ctx);
588 /* Read raw data from a file descriptor at the specified offset, feeding the
589 * data it in chunks into the specified callback function. */
591 read_raw_file_data(struct filedes *in_fd, u64 offset, u64 size,
592 consume_data_callback_t cb, void *cb_ctx)
595 size_t bytes_to_read;
599 bytes_to_read = min(sizeof(buf), size);
600 ret = full_pread(in_fd, buf, bytes_to_read, offset);
602 ERROR_WITH_ERRNO("Read error");
605 ret = cb(buf, bytes_to_read, cb_ctx);
608 size -= bytes_to_read;
609 offset += bytes_to_read;
614 /* A consume_data_callback_t implementation that simply concatenates all chunks
617 bufferer_cb(const void *chunk, size_t size, void *_ctx)
621 *buf_p = mempcpy(*buf_p, chunk, size);
626 * read_partial_wim_resource()-
628 * Read a range of data from an uncompressed or compressed resource in a WIM
632 * Description of the WIM resource to read from.
634 * Offset within the uncompressed resource at which to start reading.
636 * Number of bytes to read.
638 * Callback function to feed the data being read. Each call provides the
639 * next chunk of the requested data, uncompressed. Each chunk will be of
640 * nonzero size and will not cross range boundaries, but otherwise will be
641 * of unspecified size.
643 * Parameter to pass to @cb_ctx.
646 * WIMLIB_ERR_SUCCESS (0)
647 * WIMLIB_ERR_READ (errno set)
648 * WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
649 * WIMLIB_ERR_NOMEM (errno set to ENOMEM)
650 * WIMLIB_ERR_DECOMPRESSION (errno set to EINVAL)
652 * or other error code returned by the @cb function.
655 read_partial_wim_resource(const struct wim_resource_descriptor *rdesc,
656 u64 offset, u64 size,
657 consume_data_callback_t cb, void *cb_ctx)
660 wimlib_assert(offset + size >= offset);
661 wimlib_assert(offset + size <= rdesc->uncompressed_size);
663 DEBUG("Reading %"PRIu64" @ %"PRIu64" from WIM resource "
664 "%"PRIu64" => %"PRIu64" @ %"PRIu64,
665 size, offset, rdesc->uncompressed_size,
666 rdesc->size_in_wim, rdesc->offset_in_wim);
672 if (resource_is_compressed(rdesc)) {
673 struct data_range range = {
677 return read_compressed_wim_resource(rdesc, &range, 1,
680 /* Reading uncompressed resource. For completeness, handle the
681 * weird case where size_in_wim < uncompressed_size. */
687 if (likely(offset + size <= rdesc->size_in_wim) ||
693 if (offset >= rdesc->size_in_wim) {
697 read_size = rdesc->size_in_wim - offset;
698 zeroes_size = offset + size - rdesc->size_in_wim;
702 ret = read_raw_file_data(&rdesc->wim->in_fd,
703 rdesc->offset_in_wim + offset,
710 return fill_zeroes(zeroes_size, cb, cb_ctx);
714 /* Read the specified range of uncompressed data from the specified blob, which
715 * must be located into a WIM file, into the specified buffer. */
717 read_partial_wim_blob_into_buf(const struct blob_descriptor *blob,
718 size_t size, u64 offset, void *_buf)
722 wimlib_assert(blob->blob_location == BLOB_IN_WIM);
724 return read_partial_wim_resource(blob->rdesc,
725 blob->offset_in_res + offset,
731 /* A consume_data_callback_t implementation that simply ignores the data
734 skip_chunk_cb(const void *chunk, size_t size, void *_ctx)
739 /* Skip over the data of the specified WIM resource. */
741 skip_wim_resource(struct wim_resource_descriptor *rdesc)
743 DEBUG("Skipping resource (size=%"PRIu64")", rdesc->uncompressed_size);
744 return read_partial_wim_resource(rdesc, 0, rdesc->uncompressed_size,
745 skip_chunk_cb, NULL);
749 read_wim_blob_prefix(const struct blob_descriptor *blob, u64 size,
750 consume_data_callback_t cb, void *cb_ctx)
752 return read_partial_wim_resource(blob->rdesc, blob->offset_in_res, size,
756 /* This function handles reading blob data that is located in an external file,
757 * such as a file that has been added to the WIM image through execution of a
758 * wimlib_add_command.
760 * This assumes the file can be accessed using the standard POSIX open(),
761 * read(), and close(). On Windows this will not necessarily be the case (since
762 * the file may need FILE_FLAG_BACKUP_SEMANTICS to be opened, or the file may be
763 * encrypted), so Windows uses its own code for its equivalent case. */
765 read_file_on_disk_prefix(const struct blob_descriptor *blob, u64 size,
766 consume_data_callback_t cb, void *cb_ctx)
772 wimlib_assert(size <= blob->size);
774 DEBUG("Reading %"PRIu64" bytes from \"%"TS"\"", size, blob->file_on_disk);
776 raw_fd = topen(blob->file_on_disk, O_BINARY | O_RDONLY);
778 ERROR_WITH_ERRNO("Can't open \"%"TS"\"", blob->file_on_disk);
779 return WIMLIB_ERR_OPEN;
781 filedes_init(&fd, raw_fd);
782 ret = read_raw_file_data(&fd, 0, size, cb, cb_ctx);
789 read_staging_file_prefix(const struct blob_descriptor *blob, u64 size,
790 consume_data_callback_t cb, void *cb_ctx)
796 wimlib_assert(size <= blob->size);
798 DEBUG("Reading %"PRIu64" bytes from staging file \"%s\"",
799 size, blob->staging_file_name);
801 raw_fd = openat(blob->staging_dir_fd, blob->staging_file_name,
802 O_RDONLY | O_NOFOLLOW);
804 ERROR_WITH_ERRNO("Can't open staging file \"%s\"",
805 blob->staging_file_name);
806 return WIMLIB_ERR_OPEN;
808 filedes_init(&fd, raw_fd);
809 ret = read_raw_file_data(&fd, 0, size, cb, cb_ctx);
815 /* This function handles the trivial case of reading blob data that is, in fact,
816 * already located in an in-memory buffer. */
818 read_buffer_prefix(const struct blob_descriptor *blob,
819 u64 size, consume_data_callback_t cb, void *cb_ctx)
821 wimlib_assert(size <= blob->size);
822 return (*cb)(blob->attached_buffer, size, cb_ctx);
825 typedef int (*read_blob_prefix_handler_t)(const struct blob_descriptor *blob,
827 consume_data_callback_t cb,
831 * read_blob_prefix()-
833 * Reads the first @size bytes from a generic "blob", which may be located in
834 * any one of several locations, such as in a WIM file (compressed or
835 * uncompressed), in an external file, or directly in an in-memory buffer.
837 * This function feeds the data to a callback function @cb in chunks of
840 * Returns 0 on success; nonzero on error. A nonzero value will be returned if
841 * the blob data cannot be successfully read (for a number of different reasons,
842 * depending on the blob location), or if @cb returned nonzero in which case
843 * that error code will be returned.
846 read_blob_prefix(const struct blob_descriptor *blob, u64 size,
847 consume_data_callback_t cb, void *cb_ctx)
849 static const read_blob_prefix_handler_t handlers[] = {
850 [BLOB_IN_WIM] = read_wim_blob_prefix,
851 [BLOB_IN_FILE_ON_DISK] = read_file_on_disk_prefix,
852 [BLOB_IN_ATTACHED_BUFFER] = read_buffer_prefix,
854 [BLOB_IN_STAGING_FILE] = read_staging_file_prefix,
857 [BLOB_IN_NTFS_VOLUME] = read_ntfs_attribute_prefix,
860 [BLOB_IN_WINNT_FILE_ON_DISK] = read_winnt_stream_prefix,
861 [BLOB_WIN32_ENCRYPTED] = read_win32_encrypted_file_prefix,
864 wimlib_assert(blob->blob_location < ARRAY_LEN(handlers)
865 && handlers[blob->blob_location] != NULL);
866 return handlers[blob->blob_location](blob, size, cb, cb_ctx);
869 /* Read the full uncompressed data of the specified blob into the specified
870 * buffer, which must have space for at least blob->size bytes. */
872 read_full_blob_into_buf(const struct blob_descriptor *blob, void *_buf)
875 return read_blob_prefix(blob, blob->size, bufferer_cb, &buf);
878 /* Retrieve the full uncompressed data of the specified blob. A buffer large
879 * enough hold the data is allocated and returned in @buf_ret. */
881 read_full_blob_into_alloc_buf(const struct blob_descriptor *blob, void **buf_ret)
886 if ((size_t)blob->size != blob->size) {
887 ERROR("Can't read %"PRIu64" byte blob into memory", blob->size);
888 return WIMLIB_ERR_NOMEM;
891 buf = MALLOC(blob->size);
893 return WIMLIB_ERR_NOMEM;
895 ret = read_full_blob_into_buf(blob, buf);
905 /* Retrieve the full uncompressed data of a WIM resource specified as a raw
906 * `wim_reshdr' and the corresponding WIM file. A buffer large enough hold the
907 * data is allocated and returned in @buf_ret. */
909 wim_reshdr_to_data(const struct wim_reshdr *reshdr, WIMStruct *wim, void **buf_ret)
911 struct wim_resource_descriptor rdesc;
912 struct blob_descriptor blob;
914 wim_res_hdr_to_desc(reshdr, wim, &rdesc);
915 blob_set_is_located_in_wim_resource(&blob, &rdesc);
917 blob.size = rdesc.uncompressed_size;
918 blob.offset_in_res = 0;
920 return read_full_blob_into_alloc_buf(&blob, buf_ret);
924 wim_reshdr_to_hash(const struct wim_reshdr *reshdr, WIMStruct *wim,
925 u8 hash[SHA1_HASH_SIZE])
927 struct wim_resource_descriptor rdesc;
928 struct blob_descriptor blob;
931 wim_res_hdr_to_desc(reshdr, wim, &rdesc);
932 blob_set_is_located_in_wim_resource(&blob, &rdesc);
934 blob.size = rdesc.uncompressed_size;
935 blob.offset_in_res = 0;
938 ret = sha1_blob(&blob);
941 copy_hash(hash, blob.hash);
945 struct blobifier_context {
946 struct read_blob_list_callbacks cbs;
947 struct blob_descriptor *cur_blob;
948 struct blob_descriptor *next_blob;
950 struct blob_descriptor *final_blob;
951 size_t list_head_offset;
954 static struct blob_descriptor *
955 next_blob(struct blob_descriptor *blob, size_t list_head_offset)
957 struct list_head *cur;
959 cur = (struct list_head*)((u8*)blob + list_head_offset);
961 return (struct blob_descriptor*)((u8*)cur->next - list_head_offset);
964 /* A consume_data_callback_t implementation that translates raw resource data
965 * into blobs, calling the begin_blob, consume_chunk, and end_blob callback
966 * functions as appropriate. */
968 blobifier_cb(const void *chunk, size_t size, void *_ctx)
970 struct blobifier_context *ctx = _ctx;
973 DEBUG("%zu bytes passed to blobifier", size);
975 wimlib_assert(ctx->cur_blob != NULL);
976 wimlib_assert(size <= ctx->cur_blob->size - ctx->cur_blob_offset);
978 if (ctx->cur_blob_offset == 0) {
980 /* Starting a new blob. */
981 DEBUG("Begin new blob (size=%"PRIu64").", ctx->cur_blob->size);
983 ret = (*ctx->cbs.begin_blob)(ctx->cur_blob,
984 ctx->cbs.begin_blob_ctx);
989 /* Consume the chunk. */
990 ret = (*ctx->cbs.consume_chunk)(chunk, size,
991 ctx->cbs.consume_chunk_ctx);
992 ctx->cur_blob_offset += size;
996 if (ctx->cur_blob_offset == ctx->cur_blob->size) {
997 /* Finished reading all the data for a blob. */
999 ctx->cur_blob_offset = 0;
1001 DEBUG("End blob (size=%"PRIu64").", ctx->cur_blob->size);
1002 ret = (*ctx->cbs.end_blob)(ctx->cur_blob, 0,
1003 ctx->cbs.end_blob_ctx);
1007 /* Advance to next blob. */
1008 ctx->cur_blob = ctx->next_blob;
1009 if (ctx->cur_blob != NULL) {
1010 if (ctx->cur_blob != ctx->final_blob)
1011 ctx->next_blob = next_blob(ctx->cur_blob,
1012 ctx->list_head_offset);
1014 ctx->next_blob = NULL;
1020 struct hasher_context {
1023 struct read_blob_list_callbacks cbs;
1026 /* Callback for starting to read a blob while calculating its SHA-1 message
1029 hasher_begin_blob(struct blob_descriptor *blob, void *_ctx)
1031 struct hasher_context *ctx = _ctx;
1033 sha1_init(&ctx->sha_ctx);
1035 if (ctx->cbs.begin_blob == NULL)
1038 return (*ctx->cbs.begin_blob)(blob, ctx->cbs.begin_blob_ctx);
1041 /* A consume_data_callback_t implementation that continues calculating the SHA-1
1042 * message digest of the blob being read, then optionally passes the data on to
1043 * another consume_data_callback_t implementation. This allows checking the
1044 * SHA-1 message digest of a blob being extracted, for example. */
1046 hasher_consume_chunk(const void *chunk, size_t size, void *_ctx)
1048 struct hasher_context *ctx = _ctx;
1050 sha1_update(&ctx->sha_ctx, chunk, size);
1051 if (ctx->cbs.consume_chunk == NULL)
1054 return (*ctx->cbs.consume_chunk)(chunk, size, ctx->cbs.consume_chunk_ctx);
1057 /* Callback for finishing reading a blob while calculating its SHA-1 message
1060 hasher_end_blob(struct blob_descriptor *blob, int status, void *_ctx)
1062 struct hasher_context *ctx = _ctx;
1063 u8 hash[SHA1_HASH_SIZE];
1067 /* Error occurred; the full blob may not have been read. */
1072 /* Retrieve the final SHA-1 message digest. */
1073 sha1_final(hash, &ctx->sha_ctx);
1075 if (blob->unhashed) {
1076 if (ctx->flags & COMPUTE_MISSING_BLOB_HASHES) {
1077 /* No SHA-1 message digest was previously present for the
1078 * blob. Set it to the one just calculated. */
1079 DEBUG("Set SHA-1 message digest for blob "
1080 "(size=%"PRIu64").", blob->size);
1081 copy_hash(blob->hash, hash);
1084 if (ctx->flags & VERIFY_BLOB_HASHES) {
1085 /* The blob already had a SHA-1 message digest present.
1086 * Verify that it is the same as the calculated value.
1088 if (!hashes_equal(hash, blob->hash)) {
1089 if (wimlib_print_errors) {
1090 tchar expected_hashstr[SHA1_HASH_SIZE * 2 + 1];
1091 tchar actual_hashstr[SHA1_HASH_SIZE * 2 + 1];
1092 sprint_hash(blob->hash, expected_hashstr);
1093 sprint_hash(hash, actual_hashstr);
1094 ERROR("The blob is corrupted!\n"
1095 " (Expected SHA-1=%"TS",\n"
1096 " got SHA-1=%"TS")",
1097 expected_hashstr, actual_hashstr);
1099 ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
1103 DEBUG("SHA-1 message digest okay for "
1104 "blob (size=%"PRIu64").", blob->size);
1109 if (ctx->cbs.end_blob == NULL)
1112 return (*ctx->cbs.end_blob)(blob, ret, ctx->cbs.end_blob_ctx);
1116 read_full_blob_with_cbs(struct blob_descriptor *blob,
1117 const struct read_blob_list_callbacks *cbs)
1121 ret = (*cbs->begin_blob)(blob, cbs->begin_blob_ctx);
1125 ret = read_blob_prefix(blob, blob->size, cbs->consume_chunk,
1126 cbs->consume_chunk_ctx);
1128 return (*cbs->end_blob)(blob, ret, cbs->end_blob_ctx);
1131 /* Read the full data of the specified blob, passing the data into the specified
1132 * callbacks (all of which are optional) and either checking or computing the
1133 * SHA-1 message digest of the blob. */
1135 read_full_blob_with_sha1(struct blob_descriptor *blob,
1136 const struct read_blob_list_callbacks *cbs)
1138 struct hasher_context hasher_ctx = {
1139 .flags = VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES,
1142 struct read_blob_list_callbacks hasher_cbs = {
1143 .begin_blob = hasher_begin_blob,
1144 .begin_blob_ctx = &hasher_ctx,
1145 .consume_chunk = hasher_consume_chunk,
1146 .consume_chunk_ctx = &hasher_ctx,
1147 .end_blob = hasher_end_blob,
1148 .end_blob_ctx = &hasher_ctx,
1150 return read_full_blob_with_cbs(blob, &hasher_cbs);
1154 read_blobs_in_solid_resource(struct blob_descriptor *first_blob,
1155 struct blob_descriptor *last_blob,
1157 size_t list_head_offset,
1158 const struct read_blob_list_callbacks *sink_cbs)
1160 struct data_range *ranges;
1161 bool ranges_malloced;
1162 struct blob_descriptor *cur_blob;
1165 u64 ranges_alloc_size;
1167 DEBUG("Reading %"PRIu64" blobs combined in same WIM resource",
1170 /* Setup data ranges array (one range per blob to read); this way
1171 * read_compressed_wim_resource() does not need to be aware of blobs.
1174 ranges_alloc_size = blob_count * sizeof(ranges[0]);
1176 if (unlikely((size_t)ranges_alloc_size != ranges_alloc_size)) {
1177 ERROR("Too many blobs in one resource!");
1178 return WIMLIB_ERR_NOMEM;
1180 if (likely(ranges_alloc_size <= STACK_MAX)) {
1181 ranges = alloca(ranges_alloc_size);
1182 ranges_malloced = false;
1184 ranges = MALLOC(ranges_alloc_size);
1185 if (ranges == NULL) {
1186 ERROR("Too many blobs in one resource!");
1187 return WIMLIB_ERR_NOMEM;
1189 ranges_malloced = true;
1192 for (i = 0, cur_blob = first_blob;
1194 i++, cur_blob = next_blob(cur_blob, list_head_offset))
1196 ranges[i].offset = cur_blob->offset_in_res;
1197 ranges[i].size = cur_blob->size;
1200 struct blobifier_context blobifier_ctx = {
1202 .cur_blob = first_blob,
1203 .next_blob = next_blob(first_blob, list_head_offset),
1204 .cur_blob_offset = 0,
1205 .final_blob = last_blob,
1206 .list_head_offset = list_head_offset,
1209 ret = read_compressed_wim_resource(first_blob->rdesc,
1215 if (ranges_malloced)
1219 if (blobifier_ctx.cur_blob_offset != 0) {
1220 ret = (*blobifier_ctx.cbs.end_blob)
1221 (blobifier_ctx.cur_blob,
1223 blobifier_ctx.cbs.end_blob_ctx);
1230 * Read a list of blobs, each of which may be in any supported location (e.g.
1231 * in a WIM or in an external file). This function optimizes the case where
1232 * multiple blobs are combined into a single solid compressed WIM resource by
1233 * reading the blobs in sequential order, only decompressing the solid resource
1237 * List of blobs to read.
1239 * Offset of the `struct list_head' within each `struct blob_descriptor' that makes up
1242 * Callback functions to accept the blob data.
1244 * Bitwise OR of zero or more of the following flags:
1246 * VERIFY_BLOB_HASHES:
1247 * For all blobs being read that have already had SHA-1 message
1248 * digests computed, calculate the SHA-1 message digest of the read
1249 * data and compare it with the previously computed value. If they
1250 * do not match, return WIMLIB_ERR_INVALID_RESOURCE_HASH.
1252 * COMPUTE_MISSING_BLOB_HASHES
1253 * For all blobs being read that have not yet had their SHA-1
1254 * message digests computed, calculate and save their SHA-1 message
1257 * BLOB_LIST_ALREADY_SORTED
1258 * @blob_list is already sorted in sequential order for reading.
1260 * The callback functions are allowed to delete the current blob from the list
1263 * Returns 0 on success; a nonzero error code on failure. Failure can occur due
1264 * to an error reading the data or due to an error status being returned by any
1265 * of the callback functions.
1268 read_blob_list(struct list_head *blob_list,
1269 size_t list_head_offset,
1270 const struct read_blob_list_callbacks *cbs,
1274 struct list_head *cur, *next;
1275 struct blob_descriptor *blob;
1276 struct hasher_context *hasher_ctx;
1277 struct read_blob_list_callbacks *sink_cbs;
1279 if (!(flags & BLOB_LIST_ALREADY_SORTED)) {
1280 ret = sort_blob_list_by_sequential_order(blob_list, list_head_offset);
1285 if (flags & (VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES)) {
1286 hasher_ctx = alloca(sizeof(*hasher_ctx));
1287 *hasher_ctx = (struct hasher_context) {
1291 sink_cbs = alloca(sizeof(*sink_cbs));
1292 *sink_cbs = (struct read_blob_list_callbacks) {
1293 .begin_blob = hasher_begin_blob,
1294 .begin_blob_ctx = hasher_ctx,
1295 .consume_chunk = hasher_consume_chunk,
1296 .consume_chunk_ctx = hasher_ctx,
1297 .end_blob = hasher_end_blob,
1298 .end_blob_ctx = hasher_ctx,
1301 sink_cbs = (struct read_blob_list_callbacks*)cbs;
1304 for (cur = blob_list->next, next = cur->next;
1306 cur = next, next = cur->next)
1308 blob = (struct blob_descriptor*)((u8*)cur - list_head_offset);
1310 if (blob->blob_location == BLOB_IN_WIM &&
1311 blob->size != blob->rdesc->uncompressed_size)
1313 struct blob_descriptor *blob_next, *blob_last;
1314 struct list_head *next2;
1317 /* The next blob is a proper sub-sequence of a WIM
1318 * resource. See if there are other blobs in the same
1319 * resource that need to be read. Since
1320 * sort_blob_list_by_sequential_order() sorted the blobs
1321 * by offset in the WIM, this can be determined by
1322 * simply scanning forward in the list. */
1328 && (blob_next = (struct blob_descriptor*)
1329 ((u8*)next2 - list_head_offset),
1330 blob_next->blob_location == BLOB_IN_WIM
1331 && blob_next->rdesc == blob->rdesc);
1332 next2 = next2->next)
1334 blob_last = blob_next;
1337 if (blob_count > 1) {
1338 /* Reading multiple blobs combined into a single
1339 * WIM resource. They are in the blob list,
1340 * sorted by offset; @blob specifies the first
1341 * blob in the resource that needs to be read
1342 * and @blob_last specifies the last blob in the
1343 * resource that needs to be read. */
1345 ret = read_blobs_in_solid_resource(blob, blob_last,
1355 ret = read_full_blob_with_cbs(blob, sink_cbs);
1356 if (ret && ret != BEGIN_BLOB_STATUS_SKIP_BLOB)
1363 * Extract the first @size bytes of the specified blob.
1365 * If @size specifies the full uncompressed size of the blob, then the SHA-1
1366 * message digest of the uncompressed blob is checked while being extracted.
1368 * The uncompressed data of the blob is passed in chunks of unspecified size to
1369 * the @extract_chunk function, passing it @extract_chunk_arg.
1372 extract_blob(struct blob_descriptor *blob, u64 size,
1373 consume_data_callback_t extract_chunk, void *extract_chunk_arg)
1375 wimlib_assert(size <= blob->size);
1376 if (size == blob->size) {
1378 struct read_blob_list_callbacks cbs = {
1379 .consume_chunk = extract_chunk,
1380 .consume_chunk_ctx = extract_chunk_arg,
1382 return read_full_blob_with_sha1(blob, &cbs);
1384 /* Don't do SHA-1. */
1385 return read_blob_prefix(blob, size, extract_chunk,
1390 /* A consume_data_callback_t implementation that writes the chunk of data to a
1391 * file descriptor. */
1393 extract_chunk_to_fd(const void *chunk, size_t size, void *_fd_p)
1395 struct filedes *fd = _fd_p;
1397 int ret = full_write(fd, chunk, size);
1399 ERROR_WITH_ERRNO("Error writing to file descriptor");
1405 /* Extract the first @size bytes of the specified blob to the specified file
1408 extract_blob_to_fd(struct blob_descriptor *blob, struct filedes *fd, u64 size)
1410 return extract_blob(blob, size, extract_chunk_to_fd, fd);
1413 /* Extract the full uncompressed contents of the specified blob to the specified
1414 * file descriptor. */
1416 extract_full_blob_to_fd(struct blob_descriptor *blob, struct filedes *fd)
1418 return extract_blob_to_fd(blob, fd, blob->size);
1421 /* Calculate the SHA-1 message digest of a blob and store it in @blob->hash. */
1423 sha1_blob(struct blob_descriptor *blob)
1425 wimlib_assert(blob->unhashed);
1426 struct read_blob_list_callbacks cbs = {
1428 return read_full_blob_with_sha1(blob, &cbs);
1432 * Convert a short WIM resource header to a stand-alone WIM resource descriptor.
1434 * Note: for solid resources some fields still need to be overridden.
1437 wim_res_hdr_to_desc(const struct wim_reshdr *reshdr, WIMStruct *wim,
1438 struct wim_resource_descriptor *rdesc)
1441 rdesc->offset_in_wim = reshdr->offset_in_wim;
1442 rdesc->size_in_wim = reshdr->size_in_wim;
1443 rdesc->uncompressed_size = reshdr->uncompressed_size;
1444 INIT_LIST_HEAD(&rdesc->blob_list);
1445 rdesc->flags = reshdr->flags;
1446 rdesc->is_pipable = wim_is_pipable(wim);
1447 if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
1448 rdesc->compression_type = wim->compression_type;
1449 rdesc->chunk_size = wim->chunk_size;
1451 rdesc->compression_type = WIMLIB_COMPRESSION_TYPE_NONE;
1452 rdesc->chunk_size = 0;
1456 /* Convert a stand-alone resource descriptor to a WIM resource header. */
1458 wim_res_desc_to_hdr(const struct wim_resource_descriptor *rdesc,
1459 struct wim_reshdr *reshdr)
1461 reshdr->offset_in_wim = rdesc->offset_in_wim;
1462 reshdr->size_in_wim = rdesc->size_in_wim;
1463 reshdr->flags = rdesc->flags;
1464 reshdr->uncompressed_size = rdesc->uncompressed_size;
1467 /* Translates a WIM resource header from the on-disk format into an in-memory
1470 get_wim_reshdr(const struct wim_reshdr_disk *disk_reshdr,
1471 struct wim_reshdr *reshdr)
1473 reshdr->offset_in_wim = le64_to_cpu(disk_reshdr->offset_in_wim);
1474 reshdr->size_in_wim = (((u64)disk_reshdr->size_in_wim[0] << 0) |
1475 ((u64)disk_reshdr->size_in_wim[1] << 8) |
1476 ((u64)disk_reshdr->size_in_wim[2] << 16) |
1477 ((u64)disk_reshdr->size_in_wim[3] << 24) |
1478 ((u64)disk_reshdr->size_in_wim[4] << 32) |
1479 ((u64)disk_reshdr->size_in_wim[5] << 40) |
1480 ((u64)disk_reshdr->size_in_wim[6] << 48));
1481 reshdr->uncompressed_size = le64_to_cpu(disk_reshdr->uncompressed_size);
1482 reshdr->flags = disk_reshdr->flags;
1485 /* Translates a WIM resource header from an in-memory format into the on-disk
1488 put_wim_reshdr(const struct wim_reshdr *reshdr,
1489 struct wim_reshdr_disk *disk_reshdr)
1491 disk_reshdr->size_in_wim[0] = reshdr->size_in_wim >> 0;
1492 disk_reshdr->size_in_wim[1] = reshdr->size_in_wim >> 8;
1493 disk_reshdr->size_in_wim[2] = reshdr->size_in_wim >> 16;
1494 disk_reshdr->size_in_wim[3] = reshdr->size_in_wim >> 24;
1495 disk_reshdr->size_in_wim[4] = reshdr->size_in_wim >> 32;
1496 disk_reshdr->size_in_wim[5] = reshdr->size_in_wim >> 40;
1497 disk_reshdr->size_in_wim[6] = reshdr->size_in_wim >> 48;
1498 disk_reshdr->flags = reshdr->flags;
1499 disk_reshdr->offset_in_wim = cpu_to_le64(reshdr->offset_in_wim);
1500 disk_reshdr->uncompressed_size = cpu_to_le64(reshdr->uncompressed_size);