4 * Code for reading blobs and resources, including compressed WIM resources.
8 * Copyright (C) 2012, 2013, 2015 Eric Biggers
10 * This file is free software; you can redistribute it and/or modify it under
11 * the terms of the GNU Lesser General Public License as published by the Free
12 * Software Foundation; either version 3 of the License, or (at your option) any
15 * This file is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this file; if not, see http://www.gnu.org/licenses/.
32 #include "wimlib/alloca.h"
33 #include "wimlib/assert.h"
34 #include "wimlib/bitops.h"
35 #include "wimlib/blob_table.h"
36 #include "wimlib/endianness.h"
37 #include "wimlib/error.h"
38 #include "wimlib/file_io.h"
39 #include "wimlib/ntfs_3g.h" /* for read_ntfs_attribute_prefix() */
40 #include "wimlib/resource.h"
41 #include "wimlib/sha1.h"
42 #include "wimlib/wim.h"
45 /* for read_winnt_stream_prefix(), read_win32_encrypted_file_prefix() */
46 # include "wimlib/win32.h"
50 * Compressed WIM resources
52 * A compressed resource in a WIM consists of a number of compressed chunks,
53 * each of which decompresses to a fixed chunk size (given in the WIM header;
54 * usually 32768) except possibly the last, which always decompresses to any
55 * remaining bytes. In addition, immediately before the chunks, a table (the
56 * "chunk table") provides the offset, in bytes relative to the end of the chunk
57 * table, of the start of each compressed chunk, except for the first chunk
58 * which is omitted as it always has an offset of 0. Therefore, a compressed
59 * resource with N chunks will have a chunk table with N - 1 entries.
61 * Additional information:
63 * - Entries in the chunk table are 4 bytes each, except if the uncompressed
64 * size of the resource is greater than 4 GiB, in which case the entries in
65 * the chunk table are 8 bytes each. In either case, the entries are unsigned
66 * little-endian integers.
68 * - The chunk table is included in the compressed size of the resource provided
69 * in the corresponding entry in the WIM's blob table.
71 * - The compressed size of a chunk is never greater than the uncompressed size.
72 * From the compressor's point of view, chunks that would have compressed to a
73 * size greater than or equal to their original size are in fact stored
74 * uncompressed. From the decompresser's point of view, chunks with
75 * compressed size equal to their uncompressed size are in fact uncompressed.
77 * Furthermore, wimlib supports its own "pipable" WIM format, and for this the
78 * structure of compressed resources was modified to allow piped reading and
79 * writing. To make sequential writing possible, the chunk table is placed
80 * after the chunks rather than before the chunks, and to make sequential
81 * reading possible, each chunk is prefixed with a 4-byte header giving its
82 * compressed size as a 32-bit, unsigned, little-endian integer. Otherwise the
83 * details are the same.
93 * read_compressed_wim_resource() -
95 * Read data from a compressed WIM resource.
98 * Description of the compressed WIM resource to read from.
100 * Nonoverlapping, nonempty ranges of the uncompressed resource data to
101 * read, sorted by increasing offset.
103 * Number of ranges in @ranges; must be at least 1.
105 * Callback function to feed the data being read. Each call provides the
106 * next chunk of the requested data, uncompressed. Each chunk will be of
107 * nonzero size and will not cross range boundaries, but otherwise will be
108 * of unspecified size.
110 * Parameter to pass to @cb_ctx.
112 * Possible return values:
114 * WIMLIB_ERR_SUCCESS (0)
115 * WIMLIB_ERR_READ (errno set)
116 * WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
117 * WIMLIB_ERR_NOMEM (errno set to ENOMEM)
118 * WIMLIB_ERR_DECOMPRESSION (errno set to EINVAL)
120 * or other error code returned by the @cb function.
123 read_compressed_wim_resource(const struct wim_resource_descriptor * const rdesc,
124 const struct data_range * const ranges,
125 const size_t num_ranges,
126 const consume_data_callback_t cb,
132 u64 *chunk_offsets = NULL;
135 bool chunk_offsets_malloced = false;
136 bool ubuf_malloced = false;
137 bool cbuf_malloced = false;
138 struct wimlib_decompressor *decompressor = NULL;
141 wimlib_assert(rdesc != NULL);
142 wimlib_assert(resource_is_compressed(rdesc));
143 wimlib_assert(cb != NULL);
144 wimlib_assert(num_ranges != 0);
145 for (size_t i = 0; i < num_ranges; i++) {
146 DEBUG("Range %zu/%zu: %"PRIu64"@+%"PRIu64" / %"PRIu64,
147 i + 1, num_ranges, ranges[i].size, ranges[i].offset,
148 rdesc->uncompressed_size);
149 wimlib_assert(ranges[i].size != 0);
150 wimlib_assert(ranges[i].offset + ranges[i].size >= ranges[i].size);
151 wimlib_assert(ranges[i].offset + ranges[i].size <= rdesc->uncompressed_size);
153 for (size_t i = 0; i < num_ranges - 1; i++)
154 wimlib_assert(ranges[i].offset + ranges[i].size <= ranges[i + 1].offset);
156 /* Get the offsets of the first and last bytes of the read. */
157 const u64 first_offset = ranges[0].offset;
158 const u64 last_offset = ranges[num_ranges - 1].offset + ranges[num_ranges - 1].size - 1;
160 /* Get the file descriptor for the WIM. */
161 struct filedes * const in_fd = &rdesc->wim->in_fd;
163 /* Determine if we're reading a pipable resource from a pipe or not. */
164 const bool is_pipe_read = (rdesc->is_pipable && !filedes_is_seekable(in_fd));
166 /* Determine if the chunk table is in an alternate format. */
167 const bool alt_chunk_table = (rdesc->flags & WIM_RESHDR_FLAG_SOLID)
170 /* Get the maximum size of uncompressed chunks in this resource, which
171 * we require be a power of 2. */
172 u64 cur_read_offset = rdesc->offset_in_wim;
173 int ctype = rdesc->compression_type;
174 u32 chunk_size = rdesc->chunk_size;
175 if (alt_chunk_table) {
176 /* Alternate chunk table format. Its header specifies the chunk
177 * size and compression format. Note: it could be read here;
178 * however, the relevant data was already loaded into @rdesc by
179 * read_blob_table(). */
180 cur_read_offset += sizeof(struct alt_chunk_table_header_disk);
183 if (!is_power_of_2(chunk_size)) {
184 ERROR("Invalid compressed resource: "
185 "expected power-of-2 chunk size (got %"PRIu32")",
187 ret = WIMLIB_ERR_INVALID_CHUNK_SIZE;
189 goto out_free_memory;
192 /* Get valid decompressor. */
193 if (ctype == rdesc->wim->decompressor_ctype &&
194 chunk_size == rdesc->wim->decompressor_max_block_size)
196 /* Cached decompressor. */
197 decompressor = rdesc->wim->decompressor;
198 rdesc->wim->decompressor_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
199 rdesc->wim->decompressor = NULL;
201 ret = wimlib_create_decompressor(ctype, chunk_size,
204 if (ret != WIMLIB_ERR_NOMEM)
206 goto out_free_memory;
210 const u32 chunk_order = fls32(chunk_size);
212 /* Calculate the total number of chunks the resource is divided into. */
213 const u64 num_chunks = (rdesc->uncompressed_size + chunk_size - 1) >> chunk_order;
215 /* Calculate the 0-based indices of the first and last chunks containing
216 * data that needs to be passed to the callback. */
217 const u64 first_needed_chunk = first_offset >> chunk_order;
218 const u64 last_needed_chunk = last_offset >> chunk_order;
220 /* Calculate the 0-based index of the first chunk that actually needs to
221 * be read. This is normally first_needed_chunk, but for pipe reads we
222 * must always start from the 0th chunk. */
223 const u64 read_start_chunk = (is_pipe_read ? 0 : first_needed_chunk);
225 /* Calculate the number of chunk offsets that are needed for the chunks
227 const u64 num_needed_chunk_offsets =
228 last_needed_chunk - read_start_chunk + 1 +
229 (last_needed_chunk < num_chunks - 1);
231 /* Calculate the number of entries in the chunk table. Normally, it's
232 * one less than the number of chunks, since the first chunk has no
233 * entry. But in the alternate chunk table format, the chunk entries
234 * contain chunk sizes, not offsets, and there is one per chunk. */
235 const u64 num_chunk_entries = (alt_chunk_table ? num_chunks : num_chunks - 1);
237 /* Set the size of each chunk table entry based on the resource's
238 * uncompressed size. */
239 const u64 chunk_entry_size = get_chunk_entry_size(rdesc->uncompressed_size,
242 /* Calculate the size of the chunk table in bytes. */
243 const u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
245 /* Calculate the size of the chunk table in bytes, including the header
246 * in the case of the alternate chunk table format. */
247 const u64 chunk_table_full_size =
248 (alt_chunk_table) ? chunk_table_size + sizeof(struct alt_chunk_table_header_disk)
252 /* Read the needed chunk table entries into memory and use them
253 * to initialize the chunk_offsets array. */
255 u64 first_chunk_entry_to_read;
256 u64 last_chunk_entry_to_read;
258 if (alt_chunk_table) {
259 /* The alternate chunk table contains chunk sizes, not
260 * offsets, so we always must read all preceding entries
261 * in order to determine offsets. */
262 first_chunk_entry_to_read = 0;
263 last_chunk_entry_to_read = last_needed_chunk;
265 /* Here we must account for the fact that the first
266 * chunk has no explicit chunk table entry. */
268 if (read_start_chunk == 0)
269 first_chunk_entry_to_read = 0;
271 first_chunk_entry_to_read = read_start_chunk - 1;
273 if (last_needed_chunk == 0)
274 last_chunk_entry_to_read = 0;
276 last_chunk_entry_to_read = last_needed_chunk - 1;
278 if (last_needed_chunk < num_chunks - 1)
279 last_chunk_entry_to_read++;
282 const u64 num_chunk_entries_to_read =
283 last_chunk_entry_to_read - first_chunk_entry_to_read + 1;
285 const u64 chunk_offsets_alloc_size =
286 max(num_chunk_entries_to_read,
287 num_needed_chunk_offsets) * sizeof(chunk_offsets[0]);
289 if ((size_t)chunk_offsets_alloc_size != chunk_offsets_alloc_size)
292 if (chunk_offsets_alloc_size <= STACK_MAX) {
293 chunk_offsets = alloca(chunk_offsets_alloc_size);
295 chunk_offsets = MALLOC(chunk_offsets_alloc_size);
296 if (chunk_offsets == NULL)
298 chunk_offsets_malloced = true;
301 const size_t chunk_table_size_to_read =
302 num_chunk_entries_to_read * chunk_entry_size;
304 const u64 file_offset_of_needed_chunk_entries =
306 + (first_chunk_entry_to_read * chunk_entry_size)
307 + (rdesc->is_pipable ? (rdesc->size_in_wim - chunk_table_size) : 0);
309 void * const chunk_table_data =
311 chunk_offsets_alloc_size -
312 chunk_table_size_to_read;
314 ret = full_pread(in_fd, chunk_table_data, chunk_table_size_to_read,
315 file_offset_of_needed_chunk_entries);
319 /* Now fill in chunk_offsets from the entries we have read in
320 * chunk_tab_data. We break aliasing rules here to avoid having
321 * to allocate yet another array. */
322 typedef le64 _may_alias_attribute aliased_le64_t;
323 typedef le32 _may_alias_attribute aliased_le32_t;
324 u64 * chunk_offsets_p = chunk_offsets;
326 if (alt_chunk_table) {
328 aliased_le32_t *raw_entries = chunk_table_data;
330 for (size_t i = 0; i < num_chunk_entries_to_read; i++) {
331 u32 entry = le32_to_cpu(raw_entries[i]);
332 if (i >= read_start_chunk)
333 *chunk_offsets_p++ = cur_offset;
336 if (last_needed_chunk < num_chunks - 1)
337 *chunk_offsets_p = cur_offset;
339 if (read_start_chunk == 0)
340 *chunk_offsets_p++ = 0;
342 if (chunk_entry_size == 4) {
343 aliased_le32_t *raw_entries = chunk_table_data;
344 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
345 *chunk_offsets_p++ = le32_to_cpu(raw_entries[i]);
347 aliased_le64_t *raw_entries = chunk_table_data;
348 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
349 *chunk_offsets_p++ = le64_to_cpu(raw_entries[i]);
353 /* Set offset to beginning of first chunk to read. */
354 cur_read_offset += chunk_offsets[0];
355 if (rdesc->is_pipable)
356 cur_read_offset += read_start_chunk * sizeof(struct pwm_chunk_hdr);
358 cur_read_offset += chunk_table_size;
361 /* Allocate buffer for holding the uncompressed data of each chunk. */
362 if (chunk_size <= STACK_MAX) {
363 ubuf = alloca(chunk_size);
365 ubuf = MALLOC(chunk_size);
368 ubuf_malloced = true;
371 /* Allocate a temporary buffer for reading compressed chunks, each of
372 * which can be at most @chunk_size - 1 bytes. This excludes compressed
373 * chunks that are a full @chunk_size bytes, which are actually stored
375 if (chunk_size - 1 <= STACK_MAX) {
376 cbuf = alloca(chunk_size - 1);
378 cbuf = MALLOC(chunk_size - 1);
381 cbuf_malloced = true;
384 /* Set current data range. */
385 const struct data_range *cur_range = ranges;
386 const struct data_range * const end_range = &ranges[num_ranges];
387 u64 cur_range_pos = cur_range->offset;
388 u64 cur_range_end = cur_range->offset + cur_range->size;
390 /* Read and process each needed chunk. */
391 for (u64 i = read_start_chunk; i <= last_needed_chunk; i++) {
393 /* Calculate uncompressed size of next chunk. */
395 if ((i == num_chunks - 1) && (rdesc->uncompressed_size & (chunk_size - 1)))
396 chunk_usize = (rdesc->uncompressed_size & (chunk_size - 1));
398 chunk_usize = chunk_size;
400 /* Calculate compressed size of next chunk. */
403 struct pwm_chunk_hdr chunk_hdr;
405 ret = full_pread(in_fd, &chunk_hdr,
406 sizeof(chunk_hdr), cur_read_offset);
409 chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
411 if (i == num_chunks - 1) {
412 chunk_csize = rdesc->size_in_wim -
413 chunk_table_full_size -
414 chunk_offsets[i - read_start_chunk];
415 if (rdesc->is_pipable)
416 chunk_csize -= num_chunks * sizeof(struct pwm_chunk_hdr);
418 chunk_csize = chunk_offsets[i + 1 - read_start_chunk] -
419 chunk_offsets[i - read_start_chunk];
422 if (chunk_csize == 0 || chunk_csize > chunk_usize) {
423 ERROR("Invalid chunk size in compressed resource!");
425 ret = WIMLIB_ERR_DECOMPRESSION;
426 goto out_free_memory;
428 if (rdesc->is_pipable)
429 cur_read_offset += sizeof(struct pwm_chunk_hdr);
431 /* Offsets in the uncompressed resource at which this chunk
432 * starts and ends. */
433 const u64 chunk_start_offset = i << chunk_order;
434 const u64 chunk_end_offset = chunk_start_offset + chunk_usize;
436 if (chunk_end_offset <= cur_range_pos) {
438 /* The next range does not require data in this chunk,
440 cur_read_offset += chunk_csize;
444 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
450 /* Read the chunk and feed data to the callback
454 if (chunk_csize == chunk_usize)
459 ret = full_pread(in_fd,
466 if (read_buf == cbuf) {
467 DEBUG("Decompressing chunk %"PRIu64" "
468 "(csize=%"PRIu32" usize=%"PRIu32")",
469 i, chunk_csize, chunk_usize);
470 ret = wimlib_decompress(cbuf,
476 ERROR("Failed to decompress data!");
477 ret = WIMLIB_ERR_DECOMPRESSION;
479 goto out_free_memory;
482 cur_read_offset += chunk_csize;
484 /* At least one range requires data in this chunk. */
486 size_t start, end, size;
488 /* Calculate how many bytes of data should be
489 * sent to the callback function, taking into
490 * account that data sent to the callback
491 * function must not overlap range boundaries.
493 start = cur_range_pos - chunk_start_offset;
494 end = min(cur_range_end, chunk_end_offset) - chunk_start_offset;
497 ret = (*cb)(&ubuf[start], size, cb_ctx);
500 goto out_free_memory;
502 cur_range_pos += size;
503 if (cur_range_pos == cur_range_end) {
504 /* Advance to next range. */
505 if (++cur_range == end_range) {
506 cur_range_pos = ~0ULL;
508 cur_range_pos = cur_range->offset;
509 cur_range_end = cur_range->offset + cur_range->size;
512 } while (cur_range_pos < chunk_end_offset);
517 last_offset == rdesc->uncompressed_size - 1 &&
521 /* If reading a pipable resource from a pipe and the full data
522 * was requested, skip the chunk table at the end so that the
523 * file descriptor is fully clear of the resource after this
525 cur_read_offset += chunk_table_size;
526 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
535 wimlib_free_decompressor(rdesc->wim->decompressor);
536 rdesc->wim->decompressor = decompressor;
537 rdesc->wim->decompressor_ctype = ctype;
538 rdesc->wim->decompressor_max_block_size = chunk_size;
540 if (chunk_offsets_malloced)
550 ERROR("Not enough memory available to read size=%"PRIu64" bytes "
551 "from compressed WIM resource!", last_offset - first_offset + 1);
553 ret = WIMLIB_ERR_NOMEM;
554 goto out_free_memory;
557 ERROR_WITH_ERRNO("Error reading compressed WIM resource!");
558 goto out_free_memory;
562 fill_zeroes(u64 size, consume_data_callback_t cb, void *cb_ctx)
564 if (unlikely(size)) {
565 u8 buf[min(size, BUFFER_SIZE)];
567 memset(buf, 0, sizeof(buf));
573 len = min(size, BUFFER_SIZE);
574 ret = cb(buf, len, cb_ctx);
583 /* Read raw data from a file descriptor at the specified offset, feeding the
584 * data it in chunks into the specified callback function. */
586 read_raw_file_data(struct filedes *in_fd, u64 offset, u64 size,
587 consume_data_callback_t cb, void *cb_ctx)
590 size_t bytes_to_read;
594 bytes_to_read = min(sizeof(buf), size);
595 ret = full_pread(in_fd, buf, bytes_to_read, offset);
597 ERROR_WITH_ERRNO("Read error");
600 ret = cb(buf, bytes_to_read, cb_ctx);
603 size -= bytes_to_read;
604 offset += bytes_to_read;
609 /* A consume_data_callback_t implementation that simply concatenates all chunks
612 bufferer_cb(const void *chunk, size_t size, void *_ctx)
616 *buf_p = mempcpy(*buf_p, chunk, size);
621 * read_partial_wim_resource()-
623 * Read a range of data from an uncompressed or compressed resource in a WIM
627 * Description of the WIM resource to read from.
629 * Offset within the uncompressed resource at which to start reading.
631 * Number of bytes to read.
633 * Callback function to feed the data being read. Each call provides the
634 * next chunk of the requested data, uncompressed. Each chunk will be of
635 * nonzero size and will not cross range boundaries, but otherwise will be
636 * of unspecified size.
638 * Parameter to pass to @cb_ctx.
641 * WIMLIB_ERR_SUCCESS (0)
642 * WIMLIB_ERR_READ (errno set)
643 * WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
644 * WIMLIB_ERR_NOMEM (errno set to ENOMEM)
645 * WIMLIB_ERR_DECOMPRESSION (errno set to EINVAL)
647 * or other error code returned by the @cb function.
650 read_partial_wim_resource(const struct wim_resource_descriptor *rdesc,
651 u64 offset, u64 size,
652 consume_data_callback_t cb, void *cb_ctx)
655 wimlib_assert(offset + size >= offset);
656 wimlib_assert(offset + size <= rdesc->uncompressed_size);
658 DEBUG("Reading %"PRIu64" @ %"PRIu64" from WIM resource "
659 "%"PRIu64" => %"PRIu64" @ %"PRIu64,
660 size, offset, rdesc->uncompressed_size,
661 rdesc->size_in_wim, rdesc->offset_in_wim);
667 if (resource_is_compressed(rdesc)) {
668 struct data_range range = {
672 return read_compressed_wim_resource(rdesc, &range, 1,
675 /* Reading uncompressed resource. For completeness, handle the
676 * weird case where size_in_wim < uncompressed_size. */
682 if (likely(offset + size <= rdesc->size_in_wim) ||
688 if (offset >= rdesc->size_in_wim) {
692 read_size = rdesc->size_in_wim - offset;
693 zeroes_size = offset + size - rdesc->size_in_wim;
697 ret = read_raw_file_data(&rdesc->wim->in_fd,
698 rdesc->offset_in_wim + offset,
705 return fill_zeroes(zeroes_size, cb, cb_ctx);
709 /* Read the specified range of uncompressed data from the specified blob, which
710 * must be located into a WIM file, into the specified buffer. */
712 read_partial_wim_blob_into_buf(const struct blob_descriptor *blob,
713 size_t size, u64 offset, void *_buf)
717 wimlib_assert(blob->blob_location == BLOB_IN_WIM);
719 return read_partial_wim_resource(blob->rdesc,
720 blob->offset_in_res + offset,
726 /* A consume_data_callback_t implementation that simply ignores the data
729 skip_chunk_cb(const void *chunk, size_t size, void *_ctx)
734 /* Skip over the data of the specified WIM resource. */
736 skip_wim_resource(struct wim_resource_descriptor *rdesc)
738 DEBUG("Skipping resource (size=%"PRIu64")", rdesc->uncompressed_size);
739 return read_partial_wim_resource(rdesc, 0, rdesc->uncompressed_size,
740 skip_chunk_cb, NULL);
744 read_wim_blob_prefix(const struct blob_descriptor *blob, u64 size,
745 consume_data_callback_t cb, void *cb_ctx)
747 return read_partial_wim_resource(blob->rdesc, blob->offset_in_res, size,
751 /* This function handles reading blob data that is located in an external file,
752 * such as a file that has been added to the WIM image through execution of a
753 * wimlib_add_command.
755 * This assumes the file can be accessed using the standard POSIX open(),
756 * read(), and close(). On Windows this will not necessarily be the case (since
757 * the file may need FILE_FLAG_BACKUP_SEMANTICS to be opened, or the file may be
758 * encrypted), so Windows uses its own code for its equivalent case. */
760 read_file_on_disk_prefix(const struct blob_descriptor *blob, u64 size,
761 consume_data_callback_t cb, void *cb_ctx)
767 DEBUG("Reading %"PRIu64" bytes from \"%"TS"\"", size, blob->file_on_disk);
769 raw_fd = topen(blob->file_on_disk, O_BINARY | O_RDONLY);
771 ERROR_WITH_ERRNO("Can't open \"%"TS"\"", blob->file_on_disk);
772 return WIMLIB_ERR_OPEN;
774 filedes_init(&fd, raw_fd);
775 ret = read_raw_file_data(&fd, 0, size, cb, cb_ctx);
782 read_staging_file_prefix(const struct blob_descriptor *blob, u64 size,
783 consume_data_callback_t cb, void *cb_ctx)
789 DEBUG("Reading %"PRIu64" bytes from staging file \"%s\"",
790 size, blob->staging_file_name);
792 raw_fd = openat(blob->staging_dir_fd, blob->staging_file_name,
793 O_RDONLY | O_NOFOLLOW);
795 ERROR_WITH_ERRNO("Can't open staging file \"%s\"",
796 blob->staging_file_name);
797 return WIMLIB_ERR_OPEN;
799 filedes_init(&fd, raw_fd);
800 ret = read_raw_file_data(&fd, 0, size, cb, cb_ctx);
806 /* This function handles the trivial case of reading blob data that is, in fact,
807 * already located in an in-memory buffer. */
809 read_buffer_prefix(const struct blob_descriptor *blob,
810 u64 size, consume_data_callback_t cb, void *cb_ctx)
812 return (*cb)(blob->attached_buffer, size, cb_ctx);
815 typedef int (*read_blob_prefix_handler_t)(const struct blob_descriptor *blob,
817 consume_data_callback_t cb,
821 * read_blob_prefix()-
823 * Reads the first @size bytes from a generic "blob", which may be located in
824 * any one of several locations, such as in a WIM file (compressed or
825 * uncompressed), in an external file, or directly in an in-memory buffer.
827 * This function feeds the data to a callback function @cb in chunks of
830 * Returns 0 on success; nonzero on error. A nonzero value will be returned if
831 * the blob data cannot be successfully read (for a number of different reasons,
832 * depending on the blob location), or if @cb returned nonzero in which case
833 * that error code will be returned.
836 read_blob_prefix(const struct blob_descriptor *blob, u64 size,
837 consume_data_callback_t cb, void *cb_ctx)
839 static const read_blob_prefix_handler_t handlers[] = {
840 [BLOB_IN_WIM] = read_wim_blob_prefix,
841 [BLOB_IN_FILE_ON_DISK] = read_file_on_disk_prefix,
842 [BLOB_IN_ATTACHED_BUFFER] = read_buffer_prefix,
844 [BLOB_IN_STAGING_FILE] = read_staging_file_prefix,
847 [BLOB_IN_NTFS_VOLUME] = read_ntfs_attribute_prefix,
850 [BLOB_IN_WINNT_FILE_ON_DISK] = read_winnt_stream_prefix,
851 [BLOB_WIN32_ENCRYPTED] = read_win32_encrypted_file_prefix,
854 wimlib_assert(blob->blob_location < ARRAY_LEN(handlers)
855 && handlers[blob->blob_location] != NULL);
856 wimlib_assert(size <= blob->size);
857 return handlers[blob->blob_location](blob, size, cb, cb_ctx);
860 /* Read the full uncompressed data of the specified blob into the specified
861 * buffer, which must have space for at least blob->size bytes. */
863 read_full_blob_into_buf(const struct blob_descriptor *blob, void *_buf)
866 return read_blob_prefix(blob, blob->size, bufferer_cb, &buf);
869 /* Retrieve the full uncompressed data of the specified blob. A buffer large
870 * enough hold the data is allocated and returned in @buf_ret. */
872 read_full_blob_into_alloc_buf(const struct blob_descriptor *blob, void **buf_ret)
877 if ((size_t)blob->size != blob->size) {
878 ERROR("Can't read %"PRIu64" byte blob into memory", blob->size);
879 return WIMLIB_ERR_NOMEM;
882 buf = MALLOC(blob->size);
884 return WIMLIB_ERR_NOMEM;
886 ret = read_full_blob_into_buf(blob, buf);
896 /* Retrieve the full uncompressed data of a WIM resource specified as a raw
897 * `wim_reshdr' and the corresponding WIM file. A buffer large enough hold the
898 * data is allocated and returned in @buf_ret. */
900 wim_reshdr_to_data(const struct wim_reshdr *reshdr, WIMStruct *wim, void **buf_ret)
902 struct wim_resource_descriptor rdesc;
903 struct blob_descriptor blob;
905 wim_res_hdr_to_desc(reshdr, wim, &rdesc);
906 blob_set_is_located_in_nonsolid_wim_resource(&blob, &rdesc);
908 return read_full_blob_into_alloc_buf(&blob, buf_ret);
912 wim_reshdr_to_hash(const struct wim_reshdr *reshdr, WIMStruct *wim,
913 u8 hash[SHA1_HASH_SIZE])
915 struct wim_resource_descriptor rdesc;
916 struct blob_descriptor blob;
919 wim_res_hdr_to_desc(reshdr, wim, &rdesc);
920 blob_set_is_located_in_nonsolid_wim_resource(&blob, &rdesc);
923 ret = sha1_blob(&blob);
926 copy_hash(hash, blob.hash);
930 struct blobifier_context {
931 struct read_blob_list_callbacks cbs;
932 struct blob_descriptor *cur_blob;
933 struct blob_descriptor *next_blob;
935 struct blob_descriptor *final_blob;
936 size_t list_head_offset;
939 static struct blob_descriptor *
940 next_blob(struct blob_descriptor *blob, size_t list_head_offset)
942 struct list_head *cur;
944 cur = (struct list_head*)((u8*)blob + list_head_offset);
946 return (struct blob_descriptor*)((u8*)cur->next - list_head_offset);
949 /* A consume_data_callback_t implementation that translates raw resource data
950 * into blobs, calling the begin_blob, consume_chunk, and end_blob callback
951 * functions as appropriate. */
953 blobifier_cb(const void *chunk, size_t size, void *_ctx)
955 struct blobifier_context *ctx = _ctx;
958 DEBUG("%zu bytes passed to blobifier", size);
960 wimlib_assert(ctx->cur_blob != NULL);
961 wimlib_assert(size <= ctx->cur_blob->size - ctx->cur_blob_offset);
963 if (ctx->cur_blob_offset == 0) {
965 /* Starting a new blob. */
966 DEBUG("Begin new blob (size=%"PRIu64").", ctx->cur_blob->size);
968 ret = (*ctx->cbs.begin_blob)(ctx->cur_blob,
969 ctx->cbs.begin_blob_ctx);
974 /* Consume the chunk. */
975 ret = (*ctx->cbs.consume_chunk)(chunk, size,
976 ctx->cbs.consume_chunk_ctx);
977 ctx->cur_blob_offset += size;
981 if (ctx->cur_blob_offset == ctx->cur_blob->size) {
982 /* Finished reading all the data for a blob. */
984 ctx->cur_blob_offset = 0;
986 DEBUG("End blob (size=%"PRIu64").", ctx->cur_blob->size);
987 ret = (*ctx->cbs.end_blob)(ctx->cur_blob, 0,
988 ctx->cbs.end_blob_ctx);
992 /* Advance to next blob. */
993 ctx->cur_blob = ctx->next_blob;
994 if (ctx->cur_blob != NULL) {
995 if (ctx->cur_blob != ctx->final_blob)
996 ctx->next_blob = next_blob(ctx->cur_blob,
997 ctx->list_head_offset);
999 ctx->next_blob = NULL;
1005 struct hasher_context {
1008 struct read_blob_list_callbacks cbs;
1011 /* Callback for starting to read a blob while calculating its SHA-1 message
1014 hasher_begin_blob(struct blob_descriptor *blob, void *_ctx)
1016 struct hasher_context *ctx = _ctx;
1018 sha1_init(&ctx->sha_ctx);
1020 if (ctx->cbs.begin_blob == NULL)
1023 return (*ctx->cbs.begin_blob)(blob, ctx->cbs.begin_blob_ctx);
1026 /* A consume_data_callback_t implementation that continues calculating the SHA-1
1027 * message digest of the blob being read, then optionally passes the data on to
1028 * another consume_data_callback_t implementation. This allows checking the
1029 * SHA-1 message digest of a blob being extracted, for example. */
1031 hasher_consume_chunk(const void *chunk, size_t size, void *_ctx)
1033 struct hasher_context *ctx = _ctx;
1035 sha1_update(&ctx->sha_ctx, chunk, size);
1036 if (ctx->cbs.consume_chunk == NULL)
1039 return (*ctx->cbs.consume_chunk)(chunk, size, ctx->cbs.consume_chunk_ctx);
1042 /* Callback for finishing reading a blob while calculating its SHA-1 message
1045 hasher_end_blob(struct blob_descriptor *blob, int status, void *_ctx)
1047 struct hasher_context *ctx = _ctx;
1048 u8 hash[SHA1_HASH_SIZE];
1052 /* Error occurred; the full blob may not have been read. */
1057 /* Retrieve the final SHA-1 message digest. */
1058 sha1_final(hash, &ctx->sha_ctx);
1060 if (blob->unhashed) {
1061 if (ctx->flags & COMPUTE_MISSING_BLOB_HASHES) {
1062 /* No SHA-1 message digest was previously present for the
1063 * blob. Set it to the one just calculated. */
1064 DEBUG("Set SHA-1 message digest for blob "
1065 "(size=%"PRIu64").", blob->size);
1066 copy_hash(blob->hash, hash);
1069 if (ctx->flags & VERIFY_BLOB_HASHES) {
1070 /* The blob already had a SHA-1 message digest present.
1071 * Verify that it is the same as the calculated value.
1073 if (!hashes_equal(hash, blob->hash)) {
1074 if (wimlib_print_errors) {
1075 tchar expected_hashstr[SHA1_HASH_SIZE * 2 + 1];
1076 tchar actual_hashstr[SHA1_HASH_SIZE * 2 + 1];
1077 sprint_hash(blob->hash, expected_hashstr);
1078 sprint_hash(hash, actual_hashstr);
1079 ERROR("The data is corrupted!\n"
1080 " (Expected SHA-1=%"TS",\n"
1081 " got SHA-1=%"TS")",
1082 expected_hashstr, actual_hashstr);
1084 ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
1088 DEBUG("SHA-1 message digest okay for "
1089 "blob (size=%"PRIu64").", blob->size);
1094 if (ctx->cbs.end_blob == NULL)
1097 return (*ctx->cbs.end_blob)(blob, ret, ctx->cbs.end_blob_ctx);
1101 read_full_blob_with_cbs(struct blob_descriptor *blob,
1102 const struct read_blob_list_callbacks *cbs)
1106 ret = (*cbs->begin_blob)(blob, cbs->begin_blob_ctx);
1110 ret = read_blob_prefix(blob, blob->size, cbs->consume_chunk,
1111 cbs->consume_chunk_ctx);
1113 return (*cbs->end_blob)(blob, ret, cbs->end_blob_ctx);
1116 /* Read the full data of the specified blob, passing the data into the specified
1117 * callbacks (all of which are optional) and either checking or computing the
1118 * SHA-1 message digest of the blob. */
1120 read_full_blob_with_sha1(struct blob_descriptor *blob,
1121 const struct read_blob_list_callbacks *cbs)
1123 struct hasher_context hasher_ctx = {
1124 .flags = VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES,
1127 struct read_blob_list_callbacks hasher_cbs = {
1128 .begin_blob = hasher_begin_blob,
1129 .begin_blob_ctx = &hasher_ctx,
1130 .consume_chunk = hasher_consume_chunk,
1131 .consume_chunk_ctx = &hasher_ctx,
1132 .end_blob = hasher_end_blob,
1133 .end_blob_ctx = &hasher_ctx,
1135 return read_full_blob_with_cbs(blob, &hasher_cbs);
1139 read_blobs_in_solid_resource(struct blob_descriptor *first_blob,
1140 struct blob_descriptor *last_blob,
1142 size_t list_head_offset,
1143 const struct read_blob_list_callbacks *sink_cbs)
1145 struct data_range *ranges;
1146 bool ranges_malloced;
1147 struct blob_descriptor *cur_blob;
1150 u64 ranges_alloc_size;
1152 DEBUG("Reading %"PRIu64" blobs combined in same WIM resource",
1155 /* Setup data ranges array (one range per blob to read); this way
1156 * read_compressed_wim_resource() does not need to be aware of blobs.
1159 ranges_alloc_size = blob_count * sizeof(ranges[0]);
1161 if (unlikely((size_t)ranges_alloc_size != ranges_alloc_size)) {
1162 ERROR("Too many blobs in one resource!");
1163 return WIMLIB_ERR_NOMEM;
1165 if (likely(ranges_alloc_size <= STACK_MAX)) {
1166 ranges = alloca(ranges_alloc_size);
1167 ranges_malloced = false;
1169 ranges = MALLOC(ranges_alloc_size);
1170 if (ranges == NULL) {
1171 ERROR("Too many blobs in one resource!");
1172 return WIMLIB_ERR_NOMEM;
1174 ranges_malloced = true;
1177 for (i = 0, cur_blob = first_blob;
1179 i++, cur_blob = next_blob(cur_blob, list_head_offset))
1181 ranges[i].offset = cur_blob->offset_in_res;
1182 ranges[i].size = cur_blob->size;
1185 struct blobifier_context blobifier_ctx = {
1187 .cur_blob = first_blob,
1188 .next_blob = next_blob(first_blob, list_head_offset),
1189 .cur_blob_offset = 0,
1190 .final_blob = last_blob,
1191 .list_head_offset = list_head_offset,
1194 ret = read_compressed_wim_resource(first_blob->rdesc,
1200 if (ranges_malloced)
1204 if (blobifier_ctx.cur_blob_offset != 0) {
1205 ret = (*blobifier_ctx.cbs.end_blob)
1206 (blobifier_ctx.cur_blob,
1208 blobifier_ctx.cbs.end_blob_ctx);
1215 * Read a list of blobs, each of which may be in any supported location (e.g.
1216 * in a WIM or in an external file). This function optimizes the case where
1217 * multiple blobs are combined into a single solid compressed WIM resource by
1218 * reading the blobs in sequential order, only decompressing the solid resource
1222 * List of blobs to read.
1224 * Offset of the `struct list_head' within each `struct blob_descriptor' that makes up
1227 * Callback functions to accept the blob data.
1229 * Bitwise OR of zero or more of the following flags:
1231 * VERIFY_BLOB_HASHES:
1232 * For all blobs being read that have already had SHA-1 message
1233 * digests computed, calculate the SHA-1 message digest of the read
1234 * data and compare it with the previously computed value. If they
1235 * do not match, return WIMLIB_ERR_INVALID_RESOURCE_HASH.
1237 * COMPUTE_MISSING_BLOB_HASHES
1238 * For all blobs being read that have not yet had their SHA-1
1239 * message digests computed, calculate and save their SHA-1 message
1242 * BLOB_LIST_ALREADY_SORTED
1243 * @blob_list is already sorted in sequential order for reading.
1245 * The callback functions are allowed to delete the current blob from the list
1248 * Returns 0 on success; a nonzero error code on failure. Failure can occur due
1249 * to an error reading the data or due to an error status being returned by any
1250 * of the callback functions.
1253 read_blob_list(struct list_head *blob_list,
1254 size_t list_head_offset,
1255 const struct read_blob_list_callbacks *cbs,
1259 struct list_head *cur, *next;
1260 struct blob_descriptor *blob;
1261 struct hasher_context *hasher_ctx;
1262 struct read_blob_list_callbacks *sink_cbs;
1264 if (!(flags & BLOB_LIST_ALREADY_SORTED)) {
1265 ret = sort_blob_list_by_sequential_order(blob_list, list_head_offset);
1270 if (flags & (VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES)) {
1271 hasher_ctx = alloca(sizeof(*hasher_ctx));
1272 *hasher_ctx = (struct hasher_context) {
1276 sink_cbs = alloca(sizeof(*sink_cbs));
1277 *sink_cbs = (struct read_blob_list_callbacks) {
1278 .begin_blob = hasher_begin_blob,
1279 .begin_blob_ctx = hasher_ctx,
1280 .consume_chunk = hasher_consume_chunk,
1281 .consume_chunk_ctx = hasher_ctx,
1282 .end_blob = hasher_end_blob,
1283 .end_blob_ctx = hasher_ctx,
1286 sink_cbs = (struct read_blob_list_callbacks*)cbs;
1289 for (cur = blob_list->next, next = cur->next;
1291 cur = next, next = cur->next)
1293 blob = (struct blob_descriptor*)((u8*)cur - list_head_offset);
1295 if (blob->blob_location == BLOB_IN_WIM &&
1296 blob->size != blob->rdesc->uncompressed_size)
1298 struct blob_descriptor *blob_next, *blob_last;
1299 struct list_head *next2;
1302 /* The next blob is a proper sub-sequence of a WIM
1303 * resource. See if there are other blobs in the same
1304 * resource that need to be read. Since
1305 * sort_blob_list_by_sequential_order() sorted the blobs
1306 * by offset in the WIM, this can be determined by
1307 * simply scanning forward in the list. */
1313 && (blob_next = (struct blob_descriptor*)
1314 ((u8*)next2 - list_head_offset),
1315 blob_next->blob_location == BLOB_IN_WIM
1316 && blob_next->rdesc == blob->rdesc);
1317 next2 = next2->next)
1319 blob_last = blob_next;
1322 if (blob_count > 1) {
1323 /* Reading multiple blobs combined into a single
1324 * WIM resource. They are in the blob list,
1325 * sorted by offset; @blob specifies the first
1326 * blob in the resource that needs to be read
1327 * and @blob_last specifies the last blob in the
1328 * resource that needs to be read. */
1330 ret = read_blobs_in_solid_resource(blob, blob_last,
1340 ret = read_full_blob_with_cbs(blob, sink_cbs);
1341 if (ret && ret != BEGIN_BLOB_STATUS_SKIP_BLOB)
1348 * Extract the first @size bytes of the specified blob.
1350 * If @size specifies the full uncompressed size of the blob, then the SHA-1
1351 * message digest of the uncompressed blob is checked while being extracted.
1353 * The uncompressed data of the blob is passed in chunks of unspecified size to
1354 * the @extract_chunk function, passing it @extract_chunk_arg.
1357 extract_blob(struct blob_descriptor *blob, u64 size,
1358 consume_data_callback_t extract_chunk, void *extract_chunk_arg)
1360 wimlib_assert(size <= blob->size);
1361 if (size == blob->size) {
1363 struct read_blob_list_callbacks cbs = {
1364 .consume_chunk = extract_chunk,
1365 .consume_chunk_ctx = extract_chunk_arg,
1367 return read_full_blob_with_sha1(blob, &cbs);
1369 /* Don't do SHA-1. */
1370 return read_blob_prefix(blob, size, extract_chunk,
1375 /* A consume_data_callback_t implementation that writes the chunk of data to a
1376 * file descriptor. */
1378 extract_chunk_to_fd(const void *chunk, size_t size, void *_fd_p)
1380 struct filedes *fd = _fd_p;
1382 int ret = full_write(fd, chunk, size);
1384 ERROR_WITH_ERRNO("Error writing to file descriptor");
1390 /* Extract the first @size bytes of the specified blob to the specified file
1393 extract_blob_to_fd(struct blob_descriptor *blob, struct filedes *fd, u64 size)
1395 return extract_blob(blob, size, extract_chunk_to_fd, fd);
1398 /* Extract the full uncompressed contents of the specified blob to the specified
1399 * file descriptor. */
1401 extract_full_blob_to_fd(struct blob_descriptor *blob, struct filedes *fd)
1403 return extract_blob_to_fd(blob, fd, blob->size);
1406 /* Calculate the SHA-1 message digest of a blob and store it in @blob->hash. */
1408 sha1_blob(struct blob_descriptor *blob)
1410 wimlib_assert(blob->unhashed);
1411 struct read_blob_list_callbacks cbs = {
1413 return read_full_blob_with_sha1(blob, &cbs);
1417 * Convert a short WIM resource header to a stand-alone WIM resource descriptor.
1419 * Note: for solid resources some fields still need to be overridden.
1422 wim_res_hdr_to_desc(const struct wim_reshdr *reshdr, WIMStruct *wim,
1423 struct wim_resource_descriptor *rdesc)
1426 rdesc->offset_in_wim = reshdr->offset_in_wim;
1427 rdesc->size_in_wim = reshdr->size_in_wim;
1428 rdesc->uncompressed_size = reshdr->uncompressed_size;
1429 INIT_LIST_HEAD(&rdesc->blob_list);
1430 rdesc->flags = reshdr->flags;
1431 rdesc->is_pipable = wim_is_pipable(wim);
1432 if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
1433 rdesc->compression_type = wim->compression_type;
1434 rdesc->chunk_size = wim->chunk_size;
1436 rdesc->compression_type = WIMLIB_COMPRESSION_TYPE_NONE;
1437 rdesc->chunk_size = 0;
1441 /* Convert a stand-alone resource descriptor to a WIM resource header. */
1443 wim_res_desc_to_hdr(const struct wim_resource_descriptor *rdesc,
1444 struct wim_reshdr *reshdr)
1446 reshdr->offset_in_wim = rdesc->offset_in_wim;
1447 reshdr->size_in_wim = rdesc->size_in_wim;
1448 reshdr->flags = rdesc->flags;
1449 reshdr->uncompressed_size = rdesc->uncompressed_size;
1452 /* Translates a WIM resource header from the on-disk format into an in-memory
1455 get_wim_reshdr(const struct wim_reshdr_disk *disk_reshdr,
1456 struct wim_reshdr *reshdr)
1458 reshdr->offset_in_wim = le64_to_cpu(disk_reshdr->offset_in_wim);
1459 reshdr->size_in_wim = (((u64)disk_reshdr->size_in_wim[0] << 0) |
1460 ((u64)disk_reshdr->size_in_wim[1] << 8) |
1461 ((u64)disk_reshdr->size_in_wim[2] << 16) |
1462 ((u64)disk_reshdr->size_in_wim[3] << 24) |
1463 ((u64)disk_reshdr->size_in_wim[4] << 32) |
1464 ((u64)disk_reshdr->size_in_wim[5] << 40) |
1465 ((u64)disk_reshdr->size_in_wim[6] << 48));
1466 reshdr->uncompressed_size = le64_to_cpu(disk_reshdr->uncompressed_size);
1467 reshdr->flags = disk_reshdr->flags;
1470 /* Translates a WIM resource header from an in-memory format into the on-disk
1473 put_wim_reshdr(const struct wim_reshdr *reshdr,
1474 struct wim_reshdr_disk *disk_reshdr)
1476 disk_reshdr->size_in_wim[0] = reshdr->size_in_wim >> 0;
1477 disk_reshdr->size_in_wim[1] = reshdr->size_in_wim >> 8;
1478 disk_reshdr->size_in_wim[2] = reshdr->size_in_wim >> 16;
1479 disk_reshdr->size_in_wim[3] = reshdr->size_in_wim >> 24;
1480 disk_reshdr->size_in_wim[4] = reshdr->size_in_wim >> 32;
1481 disk_reshdr->size_in_wim[5] = reshdr->size_in_wim >> 40;
1482 disk_reshdr->size_in_wim[6] = reshdr->size_in_wim >> 48;
1483 disk_reshdr->flags = reshdr->flags;
1484 disk_reshdr->offset_in_wim = cpu_to_le64(reshdr->offset_in_wim);
1485 disk_reshdr->uncompressed_size = cpu_to_le64(reshdr->uncompressed_size);