]> wimlib.net Git - wimlib/blobdiff - src/resource.c
resource.c: fix bug in read_compressed_wim_resource()
[wimlib] / src / resource.c
index 466de1a5ef8e02c76ba7a509a7e551730ab3253e..efdbdd40aab293825cbb9aebd96c5173b0154182 100644 (file)
 /*
  * resource.c
  *
- * Read uncompressed and compressed metadata and file resources from a WIM file.
+ * Code for reading blobs and resources, including compressed WIM resources.
  */
 
 /*
- * Copyright (C) 2012, 2013 Eric Biggers
+ * Copyright (C) 2012, 2013, 2015 Eric Biggers
  *
- * This file is part of wimlib, a library for working with WIM files.
+ * This file is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your option) any
+ * later version.
  *
- * wimlib is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 3 of the License, or (at your option) any later
- * version.
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+ * details.
  *
- * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
- * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * wimlib; if not, see http://www.gnu.org/licenses/.
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this file; if not, see http://www.gnu.org/licenses/.
  */
 
 #ifdef HAVE_CONFIG_H
 #  include "config.h"
 #endif
 
-#include "wimlib.h"
-#include "wimlib/dentry.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "wimlib/alloca.h"
+#include "wimlib/assert.h"
+#include "wimlib/bitops.h"
+#include "wimlib/blob_table.h"
 #include "wimlib/endianness.h"
 #include "wimlib/error.h"
 #include "wimlib/file_io.h"
-#include "wimlib/lookup_table.h"
+#include "wimlib/ntfs_3g.h"
 #include "wimlib/resource.h"
 #include "wimlib/sha1.h"
-
-#ifdef __WIN32__
-/* for read_win32_file_prefix(), read_win32_encrypted_file_prefix() */
-#  include "wimlib/win32.h"
-#endif
-
-#ifdef WITH_NTFS_3G
-/* for read_ntfs_file_prefix() */
-#  include "wimlib/ntfs_3g.h"
-#endif
-
-#ifdef HAVE_ALLOCA_H
-#  include <alloca.h>
-#endif
-#include <errno.h>
-#include <fcntl.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <unistd.h>
+#include "wimlib/wim.h"
+#include "wimlib/win32.h"
 
 /*
- *                            Compressed resources
- *
- * A compressed resource in a WIM consists of a number of consecutive LZX or
- * XPRESS-compressed chunks, each of which decompresses to 32768 bytes of data,
- * except possibly the last, which always decompresses to any remaining bytes.
- * In addition, immediately before the chunks, a table (the "chunk table")
- * provides the offset, in bytes relative to the end of the chunk table, of the
- * start of each compressed chunk, except for the first chunk which is omitted
- * as it always has an offset of 0.  Therefore, a compressed resource with N
- * chunks will have a chunk table with N - 1 entries.
+ *                         Compressed WIM resources
  *
- * Additional information:
+ * A compressed resource in a WIM consists of a sequence of chunks.  Each chunk
+ * decompresses to the same size except possibly for the last, which
+ * decompresses to the remaining size.  Chunks that did not compress to less
+ * than their original size are stored uncompressed.
  *
- * - Entries in the chunk table are 4 bytes each, except if the uncompressed
- *   size of the resource is greater than 4 GiB, in which case the entries in
- *   the chunk table are 8 bytes each.  In either case, the entries are unsigned
- *   little-endian integers.
+ * We support three variations on this resource format, independently of the
+ * compression type and chunk size which can vary as well:
  *
- * - The chunk table is included in the compressed size of the resource provided
- *   in the corresponding entry in the WIM's stream lookup table.
+ * - Original resource format: immediately before the compressed chunks, the
+ *   "chunk table" provides the offset, in bytes relative to the end of the
+ *   chunk table, of the start of each compressed chunk, except for the first
+ *   chunk which is omitted as it always has an offset of 0.  Chunk table
+ *   entries are 32-bit for resources <= 4 GiB uncompressed and 64-bit for
+ *   resources > 4 GiB uncompressed.
  *
- * - The compressed size of a chunk is never greater than the uncompressed size.
- *   From the compressor's point of view, chunks that would have compressed to a
- *   size greater than or equal to their original size are in fact stored
- *   uncompressed.  From the decompresser's point of view, chunks with
- *   compressed size equal to their uncompressed size are in fact uncompressed.
+ * - Solid resource format (distinguished by the use of WIM_RESHDR_FLAG_SOLID
+ *   instead of WIM_RESHDR_FLAG_COMPRESSED): similar to the original format, but
+ *   the resource begins with a 16-byte header which specifies the uncompressed
+ *   size of the resource, the compression type, and the chunk size.  (In the
+ *   original format, these values were instead determined from outside the
+ *   resource itself, from the blob table and the WIM file header.) In addition,
+ *   in this format the entries in the chunk table contain compressed chunk
+ *   sizes rather than offsets.  As a consequence of this, the chunk table
+ *   entries are always 32-bit and there is an entry for chunk 0.
  *
- * Furthermore, wimlib supports its own "pipable" WIM format, and for this the
- * structure of compressed resources was modified to allow piped reading and
- * writing.  To make sequential writing possible, the chunk table is placed
- * after the chunks rather than before the chunks, and to make sequential
- * reading possible, each chunk is prefixed with a 4-byte header giving its
- * compressed size as a 32-bit, unsigned, little-endian integer (less than or
- * equal to 32768).  Otherwise the details are the same.
+ * - Pipable resource format (wimlib extension; all resources in a pipable WIM
+ *   have this format): similar to the original format, but the chunk table is
+ *   at the end of the resource rather than the beginning, and each compressed
+ *   chunk is prefixed with its compressed size as a 32-bit integer.  This
+ *   format allows a resource to be written without rewinding.
  */
 
-static int decompress(const void *cchunk, unsigned clen,
-                     void *uchunk, unsigned ulen,
-                     int ctype, u32 wim_chunk_size)
-{
-       switch (ctype) {
-       case WIMLIB_COMPRESSION_TYPE_XPRESS:
-               return wimlib_xpress_decompress(cchunk,
-                                               clen,
-                                               uchunk,
-                                               ulen);
-       case WIMLIB_COMPRESSION_TYPE_LZX:
-               return wimlib_lzx_decompress2(cchunk,
-                                             clen,
-                                             uchunk,
-                                             ulen,
-                                             wim_chunk_size);
-       default:
-               wimlib_assert(0);
-               return -1;
-       }
-}
+
+struct data_range {
+       u64 offset;
+       u64 size;
+};
 
 /*
- * read_compressed_resource()-
+ * Read data from a compressed WIM resource.
  *
- * Read data from a compressed resource being read from a seekable WIM file.
- * The resource may be either pipable or non-pipable.
+ * @rdesc
+ *     Description of the compressed WIM resource to read from.
+ * @ranges
+ *     Nonoverlapping, nonempty ranges of the uncompressed resource data to
+ *     read, sorted by increasing offset.
+ * @num_ranges
+ *     Number of ranges in @ranges; must be at least 1.
+ * @cbs
+ *     Structure which provides the consume_chunk() callback to feed the data
+ *     being read.  Each call provides the next chunk of the requested data,
+ *     uncompressed.  Each chunk will be nonempty and will not cross range
+ *     boundaries but otherwise will be of unspecified size.
  *
- * @flags may be:
+ * Possible return values:
  *
- * 0:
- *     Just do a normal read, decompressing the data if necessary.
+ *     WIMLIB_ERR_SUCCESS (0)
+ *     WIMLIB_ERR_READ                   (errno set)
+ *     WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to EINVAL)
+ *     WIMLIB_ERR_NOMEM                  (errno set to ENOMEM)
+ *     WIMLIB_ERR_DECOMPRESSION          (errno set to EINVAL)
+ *     WIMLIB_ERR_INVALID_CHUNK_SIZE     (errno set to EINVAL)
  *
- * WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS:
- *     Read the raw contents of the compressed chunks of the compressed
- *     resource.  For pipable resources, this does *not* include the chunk
- *     headers.  If a callback function is being used, it will be called once
- *     for each compressed chunk.  For non-pipable resources, this mode
- *     excludes the chunk table.  For pipable resources, this mode excludes the
- *     stream and chunk headers.
+ *     or other error code returned by the cbs->consume_chunk() function.
  */
 static int
-read_compressed_resource(const struct wim_lookup_table_entry * const lte,
-                        u64 size, const consume_data_callback_t cb,
-                        const u32 in_chunk_size, void * const ctx_or_buf,
-                        const int flags, const u64 offset)
+read_compressed_wim_resource(const struct wim_resource_descriptor * const rdesc,
+                            const struct data_range * const ranges,
+                            const size_t num_ranges,
+                            const struct read_blob_callbacks *cbs)
 {
        int ret;
-
-       const u32 orig_chunk_size = wim_resource_chunk_size(lte);
-       const u32 orig_chunk_order = bsr32(orig_chunk_size);
-
-       wimlib_assert(is_power_of_2(orig_chunk_size));
-       wimlib_assert(cb == NULL || is_power_of_2(in_chunk_size));
-
-       /* Currently, reading raw compressed chunks is only guaranteed to work
-        * correctly when the full resource is requested.  Furthermore, in such
-        * cases the requested size is specified as the compressed size, but
-        * here we change it to an uncompressed size to avoid confusing the rest
-        * of this function.  */
-       if (flags & WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS) {
-               wimlib_assert(offset == 0);
-               wimlib_assert(size == lte->resource_entry.size);
-               wimlib_assert(wim_resource_chunk_size(lte) == in_chunk_size);
-               size = wim_resource_size(lte);
-       }
-
-       wimlib_assert(offset + size <= wim_resource_size(lte));
-
-       /* Handle the trivial case.  */
-       if (size == 0)
-               return 0;
-
        u64 *chunk_offsets = NULL;
-       u8 *out_buf = NULL;
-       u8 *tmp_buf = NULL;
-       void *compressed_buf = NULL;
+       u8 *ubuf = NULL;
+       void *cbuf = NULL;
        bool chunk_offsets_malloced = false;
-       bool out_buf_malloced = false;
-       bool tmp_buf_malloced = false;
-       bool compressed_buf_malloced = false;
-       const size_t stack_max = 32768;
+       bool ubuf_malloced = false;
+       bool cbuf_malloced = false;
+       struct wimlib_decompressor *decompressor = NULL;
+
+       /* Sanity checks  */
+       wimlib_assert(num_ranges != 0);
+       for (size_t i = 0; i < num_ranges; i++) {
+               wimlib_assert(ranges[i].offset + ranges[i].size > ranges[i].offset &&
+                             ranges[i].offset + ranges[i].size <= rdesc->uncompressed_size);
+       }
+       for (size_t i = 0; i < num_ranges - 1; i++)
+               wimlib_assert(ranges[i].offset + ranges[i].size <= ranges[i + 1].offset);
+
+       /* Get the offsets of the first and last bytes of the read.  */
+       const u64 first_offset = ranges[0].offset;
+       const u64 last_offset = ranges[num_ranges - 1].offset + ranges[num_ranges - 1].size - 1;
 
        /* Get the file descriptor for the WIM.  */
-       struct filedes * const in_fd = &lte->wim->in_fd;
+       struct filedes * const in_fd = &rdesc->wim->in_fd;
+
+       /* Determine if we're reading a pipable resource from a pipe or not.  */
+       const bool is_pipe_read = (rdesc->is_pipable && !filedes_is_seekable(in_fd));
+
+       /* Determine if the chunk table is in an alternate format.  */
+       const bool alt_chunk_table = (rdesc->flags & WIM_RESHDR_FLAG_SOLID)
+                                       && !is_pipe_read;
+
+       /* Get the maximum size of uncompressed chunks in this resource, which
+        * we require be a power of 2.  */
+       u64 cur_read_offset = rdesc->offset_in_wim;
+       int ctype = rdesc->compression_type;
+       u32 chunk_size = rdesc->chunk_size;
+       if (alt_chunk_table) {
+               /* Alternate chunk table format.  Its header specifies the chunk
+                * size and compression format.  Note: it could be read here;
+                * however, the relevant data was already loaded into @rdesc by
+                * read_blob_table().  */
+               cur_read_offset += sizeof(struct alt_chunk_table_header_disk);
+       }
 
-       /* Calculate the number of chunks the resource is divided into.  */
-       const u64 num_chunks = wim_resource_chunks(lte);
+       if (unlikely(!is_power_of_2(chunk_size))) {
+               ERROR("Invalid compressed resource: "
+                     "expected power-of-2 chunk size (got %"PRIu32")",
+                     chunk_size);
+               ret = WIMLIB_ERR_INVALID_CHUNK_SIZE;
+               errno = EINVAL;
+               goto out_cleanup;
+       }
 
-       /* Calculate the number of entries in the chunk table; it's one less
-        * than the number of chunks, since the first chunk has no entry.  */
-       const u64 num_chunk_entries = num_chunks - 1;
+       /* Get valid decompressor.  */
+       if (likely(ctype == rdesc->wim->decompressor_ctype &&
+                  chunk_size == rdesc->wim->decompressor_max_block_size))
+       {
+               /* Cached decompressor.  */
+               decompressor = rdesc->wim->decompressor;
+               rdesc->wim->decompressor_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
+               rdesc->wim->decompressor = NULL;
+       } else {
+               ret = wimlib_create_decompressor(ctype, chunk_size,
+                                                &decompressor);
+               if (unlikely(ret)) {
+                       if (ret != WIMLIB_ERR_NOMEM)
+                               errno = EINVAL;
+                       goto out_cleanup;
+               }
+       }
 
-       /* Calculate the 0-based index of the chunk at which the read starts.
-        */
-       const u64 start_chunk = offset >> orig_chunk_order;
+       const u32 chunk_order = fls32(chunk_size);
+
+       /* Calculate the total number of chunks the resource is divided into.  */
+       const u64 num_chunks = (rdesc->uncompressed_size + chunk_size - 1) >> chunk_order;
 
-       /* Calculate the offset, within the start chunk, of the first byte of
-        * the read.  */
-       const u32 start_offset_in_chunk = offset & (orig_chunk_size - 1);
+       /* Calculate the 0-based indices of the first and last chunks containing
+        * data that needs to be passed to the callback.  */
+       const u64 first_needed_chunk = first_offset >> chunk_order;
+       const u64 last_needed_chunk = last_offset >> chunk_order;
 
-       /* Calculate the index of the chunk that contains the last byte of the
-        * read.  */
-       const u64 end_chunk = (offset + size - 1) >> orig_chunk_order;
+       /* Calculate the 0-based index of the first chunk that actually needs to
+        * be read.  This is normally first_needed_chunk, but for pipe reads we
+        * must always start from the 0th chunk.  */
+       const u64 read_start_chunk = (is_pipe_read ? 0 : first_needed_chunk);
 
-       /* Calculate the offset, within the end chunk, of the last byte of the
-        * read.  */
-       const u32 end_offset_in_chunk = (offset + size - 1) & (orig_chunk_size - 1);
+       /* Calculate the number of chunk offsets that are needed for the chunks
+        * being read.  */
+       const u64 num_needed_chunk_offsets =
+               last_needed_chunk - read_start_chunk + 1 +
+               (last_needed_chunk < num_chunks - 1);
 
-       /* Calculate the number of chunk entries are actually needed to read the
-        * requested part of the resource.  Include an entry for the first chunk
-        * even though that doesn't exist in the on-disk table, but take into
-        * account that if the last chunk required for the read is not the last
-        * chunk of the resource, an extra chunk entry is needed so that the
-        * compressed size of the last chunk of the read can be determined.  */
-       const u64 num_alloc_chunk_entries = end_chunk - start_chunk +
-                                           1 + (end_chunk != num_chunks - 1);
+       /* Calculate the number of entries in the chunk table.  Normally, it's
+        * one less than the number of chunks, since the first chunk has no
+        * entry.  But in the alternate chunk table format, the chunk entries
+        * contain chunk sizes, not offsets, and there is one per chunk.  */
+       const u64 num_chunk_entries = (alt_chunk_table ? num_chunks : num_chunks - 1);
 
        /* Set the size of each chunk table entry based on the resource's
         * uncompressed size.  */
-       const u64 chunk_entry_size = (wim_resource_size(lte) > ((u64)1 << 32)) ? 8 : 4;
+       const u64 chunk_entry_size = get_chunk_entry_size(rdesc->uncompressed_size,
+                                                         alt_chunk_table);
 
-       /* Calculate the size, in bytes, of the full chunk table.  */
+       /* Calculate the size of the chunk table in bytes.  */
        const u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
 
-       /* Allocate a buffer to hold a subset of the chunk table.  It will only
-        * contain offsets for the chunks that are actually needed for this
-        * read.  For speed, allocate the buffer on the stack unless it's too
-        * large.  */
-       if (num_alloc_chunk_entries <= stack_max) {
-               chunk_offsets = alloca(num_alloc_chunk_entries * sizeof(u64));
-               chunk_offsets_malloced = false;
-       } else {
-               chunk_offsets = MALLOC(num_alloc_chunk_entries * sizeof(u64));
-               if (!chunk_offsets) {
-                       ERROR("Failed to allocate chunk table "
-                             "with %"PRIu64" entries", num_alloc_chunk_entries);
-                       return WIMLIB_ERR_NOMEM;
+       /* Calculate the size of the chunk table in bytes, including the header
+        * in the case of the alternate chunk table format.  */
+       const u64 chunk_table_full_size =
+               (alt_chunk_table) ? chunk_table_size + sizeof(struct alt_chunk_table_header_disk)
+                                 : chunk_table_size;
+
+       if (!is_pipe_read) {
+               /* Read the needed chunk table entries into memory and use them
+                * to initialize the chunk_offsets array.  */
+
+               u64 first_chunk_entry_to_read;
+               u64 num_chunk_entries_to_read;
+
+               if (alt_chunk_table) {
+                       /* The alternate chunk table contains chunk sizes, not
+                        * offsets, so we always must read all preceding entries
+                        * in order to determine offsets.  */
+                       first_chunk_entry_to_read = 0;
+                       num_chunk_entries_to_read = last_needed_chunk + 1;
+               } else {
+
+                       num_chunk_entries_to_read = last_needed_chunk - read_start_chunk + 1;
+
+                       /* The first chunk has no explicit chunk table entry.  */
+                       if (read_start_chunk == 0) {
+                               num_chunk_entries_to_read--;
+                               first_chunk_entry_to_read = 0;
+                       } else {
+                               first_chunk_entry_to_read = read_start_chunk - 1;
+                       }
+
+                       /* Unless we're reading the final chunk of the resource,
+                        * we need the offset of the chunk following the last
+                        * needed chunk so that the compressed size of the last
+                        * needed chunk can be computed.  */
+                       if (last_needed_chunk < num_chunks - 1)
+                               num_chunk_entries_to_read++;
                }
-               chunk_offsets_malloced = true;
-       }
 
-       /* Set the implicit offset of the first chunk if it's included in the
-        * needed chunks.  */
-       if (start_chunk == 0)
-               chunk_offsets[0] = 0;
-
-       /* Calculate the index of the first needed entry in the chunk table.  */
-       const u64 start_table_idx = (start_chunk == 0) ? 0 : start_chunk - 1;
-
-       /* Calculate the number of entries that need to be read from the chunk
-        * table.  */
-       const u64 num_needed_chunk_entries = (start_chunk == 0) ?
-                               num_alloc_chunk_entries - 1 : num_alloc_chunk_entries;
-
-       /* Calculate the number of bytes of data that need to be read from the
-        * chunk table.  */
-       const size_t chunk_table_needed_size =
-                               num_needed_chunk_entries * chunk_entry_size;
-       if ((u64)chunk_table_needed_size !=
-           num_needed_chunk_entries * chunk_entry_size)
-       {
-               ERROR("Compressed read request too large to fit into memory!");
-               ret = WIMLIB_ERR_NOMEM;
-               goto out_free_memory;
-       }
+               const u64 chunk_offsets_alloc_size =
+                       max(num_chunk_entries_to_read,
+                           num_needed_chunk_offsets) * sizeof(chunk_offsets[0]);
 
-       /* Calculate the byte offset, in the WIM file, of the first chunk table
-        * entry to read.  Take into account that if the WIM file is in the
-        * special "pipable" format, then the chunk table is at the end of the
-        * resource, not the beginning.  */
-       const u64 file_offset_of_needed_chunk_entries =
-               lte->resource_entry.offset
-               + (start_table_idx * chunk_entry_size)
-               + (lte->is_pipable ? (lte->resource_entry.size - chunk_table_size) : 0);
-
-       /* Read the needed chunk table entries into the end of the chunk_offsets
-        * buffer.  */
-       void * const chunk_tab_data = (u8*)&chunk_offsets[num_alloc_chunk_entries] -
-                                     chunk_table_needed_size;
-       ret = full_pread(in_fd, chunk_tab_data, chunk_table_needed_size,
-                        file_offset_of_needed_chunk_entries);
-       if (ret)
-               goto read_error;
-
-       /* Now fill in chunk_offsets from the entries we have read in
-        * chunk_tab_data.  Careful: chunk_offsets aliases chunk_tab_data, which
-        * breaks C's aliasing rules when we read 32-bit integers and store
-        * 64-bit integers.  But since the operations are safe as long as the
-        * compiler doesn't mess with their order, we use the gcc may_alias
-        * extension to tell the compiler that loads from the 32-bit integers
-        * may alias stores to the 64-bit integers.  */
-       {
-               typedef le64 __attribute__((may_alias)) aliased_le64_t;
-               typedef le32 __attribute__((may_alias)) aliased_le32_t;
-               u64 * const chunk_offsets_p = chunk_offsets + (start_chunk == 0);
-               u64 i;
-
-               if (chunk_entry_size == 4) {
-                       aliased_le32_t *raw_entries = (aliased_le32_t*)chunk_tab_data;
-                       for (i = 0; i < num_needed_chunk_entries; i++)
-                               chunk_offsets_p[i] = le32_to_cpu(raw_entries[i]);
-               } else {
-                       aliased_le64_t *raw_entries = (aliased_le64_t*)chunk_tab_data;
-                       for (i = 0; i < num_needed_chunk_entries; i++)
-                               chunk_offsets_p[i] = le64_to_cpu(raw_entries[i]);
+               if (unlikely((size_t)chunk_offsets_alloc_size != chunk_offsets_alloc_size)) {
+                       errno = ENOMEM;
+                       goto oom;
                }
-       }
 
-       /* Calculate file offset of the first chunk that needs to be read.
-        * Note: if the resource is pipable, the entries in the chunk table do
-        * *not* include the chunk headers.  */
-       u64 cur_read_offset = lte->resource_entry.offset + chunk_offsets[0];
-       if (!lte->is_pipable)
-               cur_read_offset += chunk_table_size;
-       else
-               cur_read_offset += start_chunk * sizeof(struct pwm_chunk_hdr);
-
-       /* If using a callback function, allocate a temporary buffer that will
-        * be used to pass data to it.  If writing directly to a buffer instead,
-        * arrange to write data directly into it.  */
-       size_t out_buf_size;
-       u8 *out_buf_end, *out_p;
-       if (cb) {
-               out_buf_size = max(in_chunk_size, orig_chunk_size);
-               if (out_buf_size <= stack_max) {
-                       out_buf = alloca(out_buf_size);
+               if (likely(chunk_offsets_alloc_size <= STACK_MAX)) {
+                       chunk_offsets = alloca(chunk_offsets_alloc_size);
                } else {
-                       out_buf = MALLOC(out_buf_size);
-                       if (out_buf == NULL) {
-                               ret = WIMLIB_ERR_NOMEM;
-                               goto out_free_memory;
-                       }
-                       out_buf_malloced = true;
+                       chunk_offsets = MALLOC(chunk_offsets_alloc_size);
+                       if (unlikely(!chunk_offsets))
+                               goto oom;
+                       chunk_offsets_malloced = true;
                }
-       } else {
-               out_buf_size = size;
-               out_buf = ctx_or_buf;
-       }
-       out_buf_end = out_buf + out_buf_size;
-       out_p = out_buf;
 
-       /* Unless the raw compressed data was requested, allocate a temporary
-        * buffer for reading compressed chunks, each of which can be at most
-        * orig_chunk_size - 1 bytes.  This excludes compressed chunks that are
-        * a full orig_chunk_size bytes, which are actually stored uncompressed.
-        */
-       if (!(flags & WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS)) {
-               if (orig_chunk_size - 1 <= stack_max) {
-                       compressed_buf = alloca(orig_chunk_size - 1);
+               const size_t chunk_table_size_to_read =
+                       num_chunk_entries_to_read * chunk_entry_size;
+
+               const u64 file_offset_of_needed_chunk_entries =
+                       cur_read_offset
+                       + (first_chunk_entry_to_read * chunk_entry_size)
+                       + (rdesc->is_pipable ? (rdesc->size_in_wim - chunk_table_size) : 0);
+
+               void * const chunk_table_data =
+                       (u8*)chunk_offsets +
+                       chunk_offsets_alloc_size -
+                       chunk_table_size_to_read;
+
+               ret = full_pread(in_fd, chunk_table_data, chunk_table_size_to_read,
+                                file_offset_of_needed_chunk_entries);
+               if (unlikely(ret))
+                       goto read_error;
+
+               /* Now fill in chunk_offsets from the entries we have read in
+                * chunk_tab_data.  We break aliasing rules here to avoid having
+                * to allocate yet another array.  */
+               typedef le64 _may_alias_attribute aliased_le64_t;
+               typedef le32 _may_alias_attribute aliased_le32_t;
+               u64 * chunk_offsets_p = chunk_offsets;
+
+               if (alt_chunk_table) {
+                       u64 cur_offset = 0;
+                       aliased_le32_t *raw_entries = chunk_table_data;
+
+                       for (size_t i = 0; i < num_chunk_entries_to_read; i++) {
+                               u32 entry = le32_to_cpu(raw_entries[i]);
+                               if (i >= read_start_chunk)
+                                       *chunk_offsets_p++ = cur_offset;
+                               cur_offset += entry;
+                       }
+                       if (last_needed_chunk < num_chunks - 1)
+                               *chunk_offsets_p = cur_offset;
                } else {
-                       compressed_buf = MALLOC(orig_chunk_size - 1);
-                       if (compressed_buf == NULL) {
-                               ret = WIMLIB_ERR_NOMEM;
-                               goto out_free_memory;
+                       if (read_start_chunk == 0)
+                               *chunk_offsets_p++ = 0;
+
+                       if (chunk_entry_size == 4) {
+                               aliased_le32_t *raw_entries = chunk_table_data;
+                               for (size_t i = 0; i < num_chunk_entries_to_read; i++)
+                                       *chunk_offsets_p++ = le32_to_cpu(raw_entries[i]);
+                       } else {
+                               aliased_le64_t *raw_entries = chunk_table_data;
+                               for (size_t i = 0; i < num_chunk_entries_to_read; i++)
+                                       *chunk_offsets_p++ = le64_to_cpu(raw_entries[i]);
                        }
-                       compressed_buf_malloced = true;
                }
+
+               /* Set offset to beginning of first chunk to read.  */
+               cur_read_offset += chunk_offsets[0];
+               if (rdesc->is_pipable)
+                       cur_read_offset += read_start_chunk * sizeof(struct pwm_chunk_hdr);
+               else
+                       cur_read_offset += chunk_table_size;
        }
 
-       /* Allocate yet another temporary buffer, this one for reading partial
-        * chunks.  */
-       if (start_offset_in_chunk != 0 ||
-           (end_offset_in_chunk != orig_chunk_size - 1 &&
-            offset + size != wim_resource_size(lte)))
-       {
-               if (orig_chunk_size <= stack_max) {
-                       tmp_buf = alloca(orig_chunk_size);
-               } else {
-                       tmp_buf = MALLOC(orig_chunk_size);
-                       if (tmp_buf == NULL) {
-                               ret = WIMLIB_ERR_NOMEM;
-                               goto out_free_memory;
-                       }
-                       tmp_buf_malloced = true;
-               }
+       /* Allocate buffer for holding the uncompressed data of each chunk.  */
+       if (chunk_size <= STACK_MAX) {
+               ubuf = alloca(chunk_size);
+       } else {
+               ubuf = MALLOC(chunk_size);
+               if (unlikely(!ubuf))
+                       goto oom;
+               ubuf_malloced = true;
        }
 
-       /* Read, and possibly decompress, each needed chunk, either writing the
-        * data directly into the @ctx_or_buf buffer or passing it to the @cb
-        * callback function.  */
-       for (u64 i = start_chunk; i <= end_chunk; i++) {
+       /* Allocate a temporary buffer for reading compressed chunks, each of
+        * which can be at most @chunk_size - 1 bytes.  This excludes compressed
+        * chunks that are a full @chunk_size bytes, which are actually stored
+        * uncompressed.  */
+       if (chunk_size - 1 <= STACK_MAX) {
+               cbuf = alloca(chunk_size - 1);
+       } else {
+               cbuf = MALLOC(chunk_size - 1);
+               if (unlikely(!cbuf))
+                       goto oom;
+               cbuf_malloced = true;
+       }
 
-               /* If the resource is pipable, skip the chunk header.  */
-               if (lte->is_pipable)
-                       cur_read_offset += sizeof(struct pwm_chunk_hdr);
+       /* Set current data range.  */
+       const struct data_range *cur_range = ranges;
+       const struct data_range * const end_range = &ranges[num_ranges];
+       u64 cur_range_pos = cur_range->offset;
+       u64 cur_range_end = cur_range->offset + cur_range->size;
 
-               /* Calculate the sizes of the compressed chunk and of the
-                * uncompressed chunk.  */
-               u32 compressed_chunk_size;
-               u32 uncompressed_chunk_size;
-               if (i != num_chunks - 1) {
-                       /* Not the last chunk.  Compressed size is given by
-                        * difference of chunk table entries; uncompressed size
-                        * is always the WIM chunk size.  */
-                       compressed_chunk_size = chunk_offsets[i + 1 - start_chunk] -
-                                               chunk_offsets[i - start_chunk];
-                       uncompressed_chunk_size = orig_chunk_size;
-               } else {
-                       /* Last chunk.  Compressed size is the remaining size in
-                        * the compressed resource; uncompressed size is the
-                        * remaining size in the uncompressed resource.  */
-                       compressed_chunk_size = lte->resource_entry.size -
-                                               chunk_table_size -
-                                               chunk_offsets[i - start_chunk];
-                       if (lte->is_pipable)
-                               compressed_chunk_size -= num_chunks *
-                                                        sizeof(struct pwm_chunk_hdr);
-
-                       if ((wim_resource_size(lte) & (orig_chunk_size - 1)) == 0)
-                               uncompressed_chunk_size = orig_chunk_size;
-                       else
-                               uncompressed_chunk_size = wim_resource_size(lte) &
-                                                         (orig_chunk_size - 1);
-               }
+       /* Read and process each needed chunk.  */
+       for (u64 i = read_start_chunk; i <= last_needed_chunk; i++) {
 
-               /* Calculate how much of this chunk needs to be read.  */
+               /* Calculate uncompressed size of next chunk.  */
+               u32 chunk_usize;
+               if ((i == num_chunks - 1) && (rdesc->uncompressed_size & (chunk_size - 1)))
+                       chunk_usize = (rdesc->uncompressed_size & (chunk_size - 1));
+               else
+                       chunk_usize = chunk_size;
 
-               u32 partial_chunk_size;
-               u32 start_offset = 0;
-               u32 end_offset = orig_chunk_size - 1;
+               /* Calculate compressed size of next chunk.  */
+               u32 chunk_csize;
+               if (is_pipe_read) {
+                       struct pwm_chunk_hdr chunk_hdr;
 
-               if (flags & WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS) {
-                       partial_chunk_size = compressed_chunk_size;
+                       ret = full_pread(in_fd, &chunk_hdr,
+                                        sizeof(chunk_hdr), cur_read_offset);
+                       if (unlikely(ret))
+                               goto read_error;
+                       chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
                } else {
-                       if (i == start_chunk)
-                               start_offset = start_offset_in_chunk;
+                       if (i == num_chunks - 1) {
+                               chunk_csize = rdesc->size_in_wim -
+                                             chunk_table_full_size -
+                                             chunk_offsets[i - read_start_chunk];
+                               if (rdesc->is_pipable)
+                                       chunk_csize -= num_chunks * sizeof(struct pwm_chunk_hdr);
+                       } else {
+                               chunk_csize = chunk_offsets[i + 1 - read_start_chunk] -
+                                             chunk_offsets[i - read_start_chunk];
+                       }
+               }
+               if (unlikely(chunk_csize == 0 || chunk_csize > chunk_usize)) {
+                       ERROR("Invalid chunk size in compressed resource!");
+                       errno = EINVAL;
+                       ret = WIMLIB_ERR_DECOMPRESSION;
+                       goto out_cleanup;
+               }
+               if (rdesc->is_pipable)
+                       cur_read_offset += sizeof(struct pwm_chunk_hdr);
 
-                       if (i == end_chunk)
-                               end_offset = end_offset_in_chunk;
+               /* Offsets in the uncompressed resource at which this chunk
+                * starts and ends.  */
+               const u64 chunk_start_offset = i << chunk_order;
+               const u64 chunk_end_offset = chunk_start_offset + chunk_usize;
 
-                       partial_chunk_size = end_offset + 1 - start_offset;
-               }
+               if (chunk_end_offset <= cur_range_pos) {
 
-               if (compressed_chunk_size == uncompressed_chunk_size ||
-                   (flags & WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS))
-               {
-                       /* Chunk stored uncompressed, or reading raw chunk data.  */
-                       ret = full_pread(in_fd,
-                                        out_p,
-                                        partial_chunk_size,
-                                        cur_read_offset + start_offset);
-                       if (ret)
-                               goto read_error;
+                       /* The next range does not require data in this chunk,
+                        * so skip it.  */
+                       cur_read_offset += chunk_csize;
+                       if (is_pipe_read) {
+                               u8 dummy;
+
+                               ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
+                               if (unlikely(ret))
+                                       goto read_error;
+                       }
                } else {
-                       /* Compressed chunk and not doing raw read.  */
-                       u8 *target;
 
-                       /* Read the compressed data into compressed_buf.  */
-                       ret = full_pread(in_fd,
-                                        compressed_buf,
-                                        compressed_chunk_size,
-                                        cur_read_offset);
-                       if (ret)
-                               goto read_error;
+                       /* Read the chunk and feed data to the callback
+                        * function.  */
+                       u8 *read_buf;
 
-                       /* For partial chunks we must buffer the uncompressed
-                        * data because we don't need all of it.  */
-                       if (partial_chunk_size == uncompressed_chunk_size)
-                               target = out_p;
+                       if (chunk_csize == chunk_usize)
+                               read_buf = ubuf;
                        else
-                               target = tmp_buf;
-
-                       /* Decompress the chunk.  */
-                       ret = decompress(compressed_buf,
-                                        compressed_chunk_size,
-                                        target,
-                                        uncompressed_chunk_size,
-                                        wim_resource_compression_type(lte),
-                                        orig_chunk_size);
-                       if (ret) {
-                               ERROR("Failed to decompress data.");
-                               ret = WIMLIB_ERR_DECOMPRESSION;
-                               errno = EINVAL;
-                               goto out_free_memory;
-                       }
-                       if (partial_chunk_size != uncompressed_chunk_size)
-                               memcpy(out_p, tmp_buf + start_offset,
-                                      partial_chunk_size);
-               }
-
-               out_p += partial_chunk_size;
+                               read_buf = cbuf;
 
-               if (cb) {
-                       /* Feed the data to the callback function.  */
-                       wimlib_assert(offset == 0);
+                       ret = full_pread(in_fd,
+                                        read_buf,
+                                        chunk_csize,
+                                        cur_read_offset);
+                       if (unlikely(ret))
+                               goto read_error;
 
-                       if (flags & WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS) {
-                               ret = cb(out_buf, out_p - out_buf, ctx_or_buf);
-                               if (ret)
-                                       goto out_free_memory;
-                               out_p = out_buf;
-
-                       } else if (i == end_chunk || out_p == out_buf_end) {
-                               size_t bytes_sent;
-                               const u8 *p;
-
-                               for (p = out_buf; p != out_p; p += bytes_sent) {
-                                       bytes_sent = min(in_chunk_size, out_p - p);
-                                       ret = cb(p, bytes_sent, ctx_or_buf);
-                                       if (ret)
-                                               goto out_free_memory;
+                       if (read_buf == cbuf) {
+                               ret = wimlib_decompress(cbuf,
+                                                       chunk_csize,
+                                                       ubuf,
+                                                       chunk_usize,
+                                                       decompressor);
+                               if (unlikely(ret)) {
+                                       ERROR("Failed to decompress data!");
+                                       ret = WIMLIB_ERR_DECOMPRESSION;
+                                       errno = EINVAL;
+                                       goto out_cleanup;
                                }
-                               out_p = out_buf;
                        }
+                       cur_read_offset += chunk_csize;
+
+                       /* At least one range requires data in this chunk.  */
+                       do {
+                               size_t start, end, size;
+
+                               /* Calculate how many bytes of data should be
+                                * sent to the callback function, taking into
+                                * account that data sent to the callback
+                                * function must not overlap range boundaries.
+                                */
+                               start = cur_range_pos - chunk_start_offset;
+                               end = min(cur_range_end, chunk_end_offset) - chunk_start_offset;
+                               size = end - start;
+
+                               ret = call_consume_chunk(&ubuf[start], size, cbs);
+                               if (unlikely(ret))
+                                       goto out_cleanup;
+
+                               cur_range_pos += size;
+                               if (cur_range_pos == cur_range_end) {
+                                       /* Advance to next range.  */
+                                       if (++cur_range == end_range) {
+                                               cur_range_pos = ~0ULL;
+                                       } else {
+                                               cur_range_pos = cur_range->offset;
+                                               cur_range_end = cur_range->offset + cur_range->size;
+                                       }
+                               }
+                       } while (cur_range_pos < chunk_end_offset);
                }
-               cur_read_offset += compressed_chunk_size;
        }
 
+       if (is_pipe_read &&
+           last_offset == rdesc->uncompressed_size - 1 &&
+           chunk_table_size)
+       {
+               u8 dummy;
+               /* If reading a pipable resource from a pipe and the full data
+                * was requested, skip the chunk table at the end so that the
+                * file descriptor is fully clear of the resource after this
+                * returns.  */
+               cur_read_offset += chunk_table_size;
+               ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
+               if (unlikely(ret))
+                       goto read_error;
+       }
        ret = 0;
-out_free_memory:
+
+out_cleanup:
+       if (decompressor) {
+               wimlib_free_decompressor(rdesc->wim->decompressor);
+               rdesc->wim->decompressor = decompressor;
+               rdesc->wim->decompressor_ctype = ctype;
+               rdesc->wim->decompressor_max_block_size = chunk_size;
+       }
        if (chunk_offsets_malloced)
                FREE(chunk_offsets);
-       if (out_buf_malloced)
-               FREE(out_buf);
-       if (compressed_buf_malloced)
-               FREE(compressed_buf);
-       if (tmp_buf_malloced)
-               FREE(tmp_buf);
+       if (ubuf_malloced)
+               FREE(ubuf);
+       if (cbuf_malloced)
+               FREE(cbuf);
        return ret;
 
+oom:
+       ERROR("Out of memory while reading compressed WIM resource");
+       ret = WIMLIB_ERR_NOMEM;
+       goto out_cleanup;
+
 read_error:
-       ERROR_WITH_ERRNO("Error reading compressed file resource");
-       goto out_free_memory;
+       ERROR_WITH_ERRNO("Error reading data from WIM file");
+       goto out_cleanup;
 }
 
-/* Skip over the chunk table at the end of pipable, compressed resource being
- * read from a pipe.  */
+/* Read raw data from a file descriptor at the specified offset, feeding the
+ * data in nonempty chunks into the cbs->consume_chunk() function.  */
 static int
-skip_chunk_table(const struct wim_lookup_table_entry *lte,
-                struct filedes *in_fd)
+read_raw_file_data(struct filedes *in_fd, u64 offset, u64 size,
+                  const struct read_blob_callbacks *cbs,
+                  const tchar *filename)
 {
-       u64 num_chunk_entries = wim_resource_chunks(lte) - 1;
-       u64 chunk_entry_size = (wim_resource_size(lte) > ((u64)1 << 32)) ? 8 : 4;
-       u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
+       u8 buf[BUFFER_SIZE];
+       size_t bytes_to_read;
        int ret;
 
-       if (num_chunk_entries != 0) {
-               u8 dummy;
-               ret = full_pread(in_fd, &dummy, 1,
-                                in_fd->offset + chunk_table_size - 1);
-               if (ret)
+       while (size) {
+               bytes_to_read = min(sizeof(buf), size);
+               ret = full_pread(in_fd, buf, bytes_to_read, offset);
+               if (unlikely(ret))
+                       goto read_error;
+               ret = call_consume_chunk(buf, bytes_to_read, cbs);
+               if (unlikely(ret))
                        return ret;
+               size -= bytes_to_read;
+               offset += bytes_to_read;
        }
        return 0;
-}
 
-/* Read and decompress data from a compressed, pipable resource being read from
- * a pipe.  */
-static int
-read_pipable_resource(const struct wim_lookup_table_entry *lte,
-                     u64 size, consume_data_callback_t cb,
-                     u32 in_chunk_size, void *ctx_or_buf,
-                     int flags, u64 offset)
-{
-       struct filedes *in_fd;
-       int ret;
-       const u32 orig_chunk_size = wim_resource_chunk_size(lte);
-       u8 cchunk[orig_chunk_size - 1];
-
-       size_t out_buf_size;
-       u8 *out_buf, *out_buf_end, *out_p;
-       if (cb) {
-               out_buf_size = max(in_chunk_size, orig_chunk_size);
-               out_buf = alloca(out_buf_size);
+read_error:
+       if (!filename) {
+               ERROR_WITH_ERRNO("Error reading data from WIM file");
+       } else if (ret == WIMLIB_ERR_UNEXPECTED_END_OF_FILE) {
+               ERROR("\"%"TS"\": File was concurrently truncated", filename);
+               ret = WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
        } else {
-               out_buf_size = size;
-               out_buf = ctx_or_buf;
+               ERROR_WITH_ERRNO("\"%"TS"\": Error reading data", filename);
        }
-       out_buf_end = out_buf + out_buf_size;
-       out_p = out_buf;
-
-       /* Get pointers to appropriate decompression function and the input file
-        * descriptor.  */
-       in_fd = &lte->wim->in_fd;
-
-       /* This function currently assumes the entire resource is being read at
-        * once and that the raw compressed data isn't being requested.  This is
-        * based on the fact that this function currently only gets called
-        * during the operation of wimlib_extract_image_from_pipe().  */
-       wimlib_assert(!(flags & WIMLIB_READ_RESOURCE_FLAG_RAW));
-       wimlib_assert(offset == 0);
-       wimlib_assert(size == wim_resource_size(lte));
-       wimlib_assert(in_fd->offset == lte->resource_entry.offset);
-
-       u32 chunk_usize;
-       for (offset = 0; offset < size; offset += chunk_usize) {
-               struct pwm_chunk_hdr chunk_hdr;
-               u32 chunk_csize;
-
-               /* Calculate uncompressed size of next chunk.  */
-               chunk_usize = min(orig_chunk_size, size - offset);
-
-               /* Read the compressed size of the next chunk from the chunk
-                * header.  */
-               ret = full_read(in_fd, &chunk_hdr, sizeof(chunk_hdr));
-               if (ret)
-                       goto read_error;
-
-               chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
-
-               if (chunk_csize > orig_chunk_size) {
-                       errno = EINVAL;
-                       ret = WIMLIB_ERR_INVALID_PIPABLE_WIM;
-                       goto invalid;
-               }
-
-               /* Read chunk data.  */
-               ret = full_read(in_fd, cchunk, chunk_csize);
-               if (ret)
-                       goto read_error;
-
-               if (flags & WIMLIB_READ_RESOURCE_FLAG_SEEK_ONLY)
-                       continue;
-
-               /* Decompress chunk if needed.  Uncompressed size same
-                * as compressed size means the chunk is uncompressed.
-                */
-               if (chunk_csize == chunk_usize) {
-                       memcpy(out_p, cchunk, chunk_usize);
-               } else {
-                       ret = (*decompress)(cchunk, chunk_csize,
-                                           out_p, chunk_usize,
-                                           wim_resource_compression_type(lte),
-                                           orig_chunk_size);
-                       if (ret) {
-                               errno = EINVAL;
-                               ret = WIMLIB_ERR_DECOMPRESSION;
-                               goto invalid;
-                       }
-               }
-               out_p += chunk_usize;
-
-               /* Feed the uncompressed data into the callback function or copy
-                * it into the provided buffer.  */
-               if (cb && (out_p == out_buf_end ||
-                          offset + chunk_usize == size))
-               {
-                       size_t bytes_sent;
-                       const u8 *p;
+       return ret;
+}
 
-                       for (p = out_buf; p != out_p; p += bytes_sent) {
-                               bytes_sent = min(in_chunk_size, out_p - p);
-                               ret = cb(p, bytes_sent, ctx_or_buf);
-                               if (ret)
-                                       return ret;
-                       }
-                       out_p = out_buf;
-               }
-       }
+/* A consume_chunk() implementation that simply concatenates all chunks into an
+ * in-memory buffer.  */
+static int
+bufferer_cb(const void *chunk, size_t size, void *_ctx)
+{
+       void **buf_p = _ctx;
 
-       ret = skip_chunk_table(lte, in_fd);
-       if (ret)
-               goto read_error;
+       *buf_p = mempcpy(*buf_p, chunk, size);
        return 0;
-
-read_error:
-       ERROR_WITH_ERRNO("Error reading compressed file resource");
-       return ret;
-
-invalid:
-       ERROR("Compressed file resource is invalid");
-       return ret;
 }
 
 /*
- * read_partial_wim_resource()-
- *
- * Read a range of data from a uncompressed or compressed resource in a WIM
- * file.  Data is written into a buffer or fed into a callback function, as
- * documented in read_resource_prefix().
- *
- * @flags can be:
- *
- * 0:
- *     Just do a normal read, decompressing the data if necessary.  @size and
- *     @offset are interpreted relative to the uncompressed contents of the
- *     stream.
- *
- * WIMLIB_READ_RESOURCE_FLAG_RAW_FULL:
- *     Only valid when the resource is compressed:  Read the raw contents of
- *     the compressed resource.  If the resource is non-pipable, this includes
- *     the chunk table as well as the compressed chunks.  If the resource is
- *     pipable, this includes the compressed chunks--- including the chunk
- *     headers--- and the chunk table.  The stream header is still *not*
- *     included.
- *
- *     In this mode, @offset is relative to the beginning of the raw contents
- *     of the compressed resource--- that is, the chunk table if the resource
- *     is non-pipable, or the header for the first compressed chunk if the
- *     resource is pipable.  @size is the number of raw bytes to read, which
- *     must not overrun the end of the resource.  For example, if @offset is 0,
- *     then @size can be at most the raw size of the compressed resource
- *     (@lte->resource_entry.size).
- *
- * WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS:
- *     Only valid when the resource is compressed and is not being read from a
- *     pipe:  Read the raw contents of the compressed chunks of the compressed
- *     resource.  For pipable resources, this does *not* include the chunk
- *     headers.  If a callback function is being used, it will be called once
- *     for each compressed chunk.  The chunk table is excluded.  Also, for
- *     pipable resources, the stream and chunk headers are excluded.  In this
- *     mode, @size must be exactly the raw size of the compressed resource
- *     (@lte->resource_entry.size) and @offset must be 0.
+ * Read @size bytes at @offset in the WIM resource described by @rdesc and feed
+ * the data into the @cbs->consume_chunk callback function.
  *
- * WIMLIB_READ_RESOURCE_FLAG_SEEK_ONLY:
- *     Only valid when the resource is being read from a pipe:  Skip over the
- *     requested data rather than feed it to the callback function or write it
- *     into the buffer.  No decompression is done.
- *     WIMLIB_READ_RESOURCE_FLAG_RAW_* may not be combined with this flag.
- *     @offset must be 0 and @size must be the uncompressed size of the
- *     resource.
+ * @offset and @size are assumed to have already been validated against the
+ * resource's uncompressed size.
  *
- * Return values:
- *     WIMLIB_ERR_SUCCESS (0)
- *     WIMLIB_ERR_READ                   (errno set)
- *     WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
- *     WIMLIB_ERR_NOMEM                  (errno set to ENOMEM)
- *     WIMLIB_ERR_DECOMPRESSION          (errno set to EINVAL)
- *     WIMLIB_ERR_INVALID_PIPABLE_WIM    (errno set to EINVAL)
- *
- *     or other error code returned by the @cb function.
+ * Returns 0 on success; or the first nonzero value returned by the callback
+ * function; or a nonzero wimlib error code with errno set as well.
  */
-int
-read_partial_wim_resource(const struct wim_lookup_table_entry *lte,
-                         u64 size, consume_data_callback_t cb,
-                         u32 in_chunk_size,
-                         void *ctx_or_buf, int flags, u64 offset)
+static int
+read_partial_wim_resource(const struct wim_resource_descriptor *rdesc,
+                         const u64 offset, const u64 size,
+                         const struct read_blob_callbacks *cbs)
 {
-       struct filedes *in_fd;
-       int ret;
-
-       /* Make sure the resource is actually located in a WIM file and is not
-        * somewhere else.  */
-       wimlib_assert(lte->resource_location == RESOURCE_IN_WIM);
-
-       /* If a callback was specified, in_chunk_size must be a power of 2 (and
-        * not 0).  */
-       wimlib_assert(cb == NULL || is_power_of_2(in_chunk_size));
-
-       /* If a callback was specified, offset must be zero.  */
-       wimlib_assert(cb == NULL || offset == 0);
-
-       /* Retrieve input file descriptor for the WIM file.  */
-       in_fd = &lte->wim->in_fd;
-
-       /* Don't allow raw reads (either full or chunks) of uncompressed
-        * resources.  */
-       wimlib_assert(!(flags & WIMLIB_READ_RESOURCE_FLAG_RAW) ||
-                     resource_is_compressed(&lte->resource_entry));
-
-       /* Don't allow seek-only reads unless reading from a pipe; also don't
-        * allow combining SEEK_ONLY with either RAW flag.  */
-       wimlib_assert(!(flags & WIMLIB_READ_RESOURCE_FLAG_SEEK_ONLY) ||
-                     (!filedes_is_seekable(in_fd) &&
-                      !(flags & WIMLIB_READ_RESOURCE_FLAG_RAW)));
-
-       DEBUG("Reading WIM resource: %"PRIu64" @ +%"PRIu64" "
-             "from %"PRIu64" @ +%"PRIu64" (readflags 0x%08x, resflags 0x%02x%s)",
-             size, offset,
-             lte->resource_entry.original_size, lte->resource_entry.offset,
-             flags, lte->resource_entry.flags,
-             (lte->is_pipable ? ", pipable" : ""));
-
-       if ((flags & WIMLIB_READ_RESOURCE_FLAG_RAW_FULL) ||
-           !resource_is_compressed(&lte->resource_entry))
+       if (rdesc->flags & (WIM_RESHDR_FLAG_COMPRESSED |
+                           WIM_RESHDR_FLAG_SOLID))
        {
-               /* Reading raw resource contents or reading uncompressed
-                * resource.  */
-               wimlib_assert(offset + size <= lte->resource_entry.size);
-               offset += lte->resource_entry.offset;
-               if (flags & WIMLIB_READ_RESOURCE_FLAG_SEEK_ONLY) {
-                       if (lte->resource_entry.size != 0) {
-                               u8 dummy;
-                               ret = full_pread(in_fd, &dummy, 1,
-                                                offset + lte->resource_entry.size - 1);
-                               if (ret)
-                                       goto read_error;
-                       }
-               } else if (cb) {
-                       /* Send data to callback function */
-                       u8 buf[min(in_chunk_size, size)];
-                       while (size) {
-                               size_t bytes_to_read = min(in_chunk_size, size);
-                               ret = full_pread(in_fd, buf, bytes_to_read,
-                                                offset);
-                               if (ret)
-                                       goto read_error;
-                               ret = cb(buf, bytes_to_read, ctx_or_buf);
-                               if (ret)
-                                       goto out;
-                               size -= bytes_to_read;
-                               offset += bytes_to_read;
-                       }
-               } else {
-                       /* Send data directly to a buffer */
-                       ret = full_pread(in_fd, ctx_or_buf, size, offset);
-                       if (ret)
-                               goto read_error;
-               }
-               ret = 0;
-       } else if (lte->is_pipable && !filedes_is_seekable(in_fd)) {
-               /* Reading compressed, pipable resource from pipe.  */
-               ret = read_pipable_resource(lte, size, cb,
-                                           in_chunk_size,
-                                           ctx_or_buf, flags, offset);
-       } else {
-               /* Reading compressed, possibly pipable resource from seekable
-                * file.  */
-               ret = read_compressed_resource(lte, size, cb,
-                                              in_chunk_size,
-                                              ctx_or_buf, flags, offset);
+               /* Compressed resource  */
+               if (unlikely(!size))
+                       return 0;
+               struct data_range range = {
+                       .offset = offset,
+                       .size = size,
+               };
+               return read_compressed_wim_resource(rdesc, &range, 1, cbs);
        }
-       goto out;
 
-read_error:
-       ERROR_WITH_ERRNO("Error reading data from WIM");
-out:
-       return ret;
+       /* Uncompressed resource  */
+       return read_raw_file_data(&rdesc->wim->in_fd,
+                                 rdesc->offset_in_wim + offset,
+                                 size, cbs, NULL);
 }
 
+/* Read the specified range of uncompressed data from the specified blob, which
+ * must be located in a WIM file, into the specified buffer.  */
+int
+read_partial_wim_blob_into_buf(const struct blob_descriptor *blob,
+                              u64 offset, size_t size, void *buf)
+{
+       struct read_blob_callbacks cbs = {
+               .consume_chunk  = bufferer_cb,
+               .ctx            = &buf,
+       };
+       return read_partial_wim_resource(blob->rdesc,
+                                        blob->offset_in_res + offset,
+                                        size,
+                                        &cbs);
+}
 
+/* Skip over the data of the specified WIM resource.  */
 int
-read_partial_wim_resource_into_buf(const struct wim_lookup_table_entry *lte,
-                                  size_t size, u64 offset, void *buf)
+skip_wim_resource(const struct wim_resource_descriptor *rdesc)
 {
-       return read_partial_wim_resource(lte, size, NULL, 0, buf, 0, offset);
+       struct read_blob_callbacks cbs = {
+       };
+       return read_partial_wim_resource(rdesc, 0,
+                                        rdesc->uncompressed_size, &cbs);
 }
 
 static int
-read_wim_resource_prefix(const struct wim_lookup_table_entry *lte,
-                        u64 size,
-                        consume_data_callback_t cb,
-                        u32 in_chunk_size,
-                        void *ctx_or_buf,
-                        int flags)
+read_wim_blob_prefix(const struct blob_descriptor *blob, u64 size,
+                    const struct read_blob_callbacks *cbs)
 {
-       return read_partial_wim_resource(lte, size, cb, in_chunk_size,
-                                        ctx_or_buf, flags, 0);
+       return read_partial_wim_resource(blob->rdesc, blob->offset_in_res,
+                                        size, cbs);
 }
 
-
-#ifndef __WIN32__
+/* This function handles reading blob data that is located in an external file,
+ * such as a file that has been added to the WIM image through execution of a
+ * wimlib_add_command.
+ *
+ * This assumes the file can be accessed using the standard POSIX open(),
+ * read(), and close().  On Windows this will not necessarily be the case (since
+ * the file may need FILE_FLAG_BACKUP_SEMANTICS to be opened, or the file may be
+ * encrypted), so Windows uses its own code for its equivalent case.  */
 static int
-read_file_on_disk_prefix(const struct wim_lookup_table_entry *lte,
-                        u64 size,
-                        consume_data_callback_t cb,
-                        u32 in_chunk_size,
-                        void *ctx_or_buf,
-                        int _ignored_flags)
+read_file_on_disk_prefix(const struct blob_descriptor *blob, u64 size,
+                        const struct read_blob_callbacks *cbs)
 {
-       const tchar *filename = lte->file_on_disk;
        int ret;
-       struct filedes fd;
        int raw_fd;
-       u8 *out_buf;
-       bool out_buf_malloced;
-       const size_t stack_max = 32768;
-
-       DEBUG("Reading %"PRIu64" bytes from \"%"TS"\"",
-             size, lte->file_on_disk);
+       struct filedes fd;
 
-       raw_fd = open(filename, O_RDONLY);
-       if (raw_fd < 0) {
-               ERROR_WITH_ERRNO("Can't open \"%"TS"\"", filename);
+       raw_fd = topen(blob->file_on_disk, O_BINARY | O_RDONLY);
+       if (unlikely(raw_fd < 0)) {
+               ERROR_WITH_ERRNO("Can't open \"%"TS"\"", blob->file_on_disk);
                return WIMLIB_ERR_OPEN;
        }
        filedes_init(&fd, raw_fd);
-       out_buf_malloced = false;
-       if (cb) {
-               /* Send data to callback function */
-               if (in_chunk_size <= stack_max) {
-                       out_buf = alloca(in_chunk_size);
-               } else {
-                       out_buf = MALLOC(in_chunk_size);
-                       if (out_buf == NULL) {
-                               ret = WIMLIB_ERR_NOMEM;
-                               goto out_close;
-                       }
-                       out_buf_malloced = true;
-               }
-
-               size_t bytes_to_read;
-               while (size) {
-                       bytes_to_read = min(in_chunk_size, size);
-                       ret = full_read(&fd, out_buf, bytes_to_read);
-                       if (ret)
-                               goto read_error;
-                       ret = cb(out_buf, bytes_to_read, ctx_or_buf);
-                       if (ret)
-                               goto out_close;
-                       size -= bytes_to_read;
-               }
-       } else {
-               /* Send data directly to a buffer */
-               ret = full_read(&fd, ctx_or_buf, size);
-               if (ret)
-                       goto read_error;
-       }
-       ret = 0;
-       goto out_close;
-
-read_error:
-       ERROR_WITH_ERRNO("Error reading \"%"TS"\"", filename);
-out_close:
+       ret = read_raw_file_data(&fd, 0, size, cbs, blob->file_on_disk);
        filedes_close(&fd);
-       if (out_buf_malloced)
-               FREE(out_buf);
        return ret;
 }
-#endif /* !__WIN32__ */
 
+#ifdef WITH_FUSE
 static int
-read_buffer_prefix(const struct wim_lookup_table_entry *lte,
-                  u64 size, consume_data_callback_t cb,
-                  u32 in_chunk_size,
-                  void *ctx_or_buf, int _ignored_flags)
+read_staging_file_prefix(const struct blob_descriptor *blob, u64 size,
+                        const struct read_blob_callbacks *cbs)
 {
+       int raw_fd;
+       struct filedes fd;
+       int ret;
 
-       if (cb) {
-               int ret;
-               u32 chunk_size;
-
-               for (u64 offset = 0; offset < size; offset += chunk_size) {
-                       chunk_size = min(in_chunk_size, size - offset);
-                       ret = cb((const u8*)lte->attached_buffer + offset,
-                                chunk_size, ctx_or_buf);
-                       if (ret)
-                               return ret;
-               }
-       } else {
-               memcpy(ctx_or_buf, lte->attached_buffer, size);
+       raw_fd = openat(blob->staging_dir_fd, blob->staging_file_name,
+                       O_RDONLY | O_NOFOLLOW);
+       if (unlikely(raw_fd < 0)) {
+               ERROR_WITH_ERRNO("Can't open staging file \"%s\"",
+                                blob->staging_file_name);
+               return WIMLIB_ERR_OPEN;
        }
-       return 0;
+       filedes_init(&fd, raw_fd);
+       ret = read_raw_file_data(&fd, 0, size, cbs, blob->staging_file_name);
+       filedes_close(&fd);
+       return ret;
 }
+#endif
 
-typedef int (*read_resource_prefix_handler_t)(const struct wim_lookup_table_entry *lte,
-                                             u64 size,
-                                             consume_data_callback_t cb,
-                                             u32 in_chunk_size,
-                                             void *ctx_or_buf,
-                                             int flags);
+/* This function handles the trivial case of reading blob data that is, in fact,
+ * already located in an in-memory buffer.  */
+static int
+read_buffer_prefix(const struct blob_descriptor *blob,
+                  u64 size, const struct read_blob_callbacks *cbs)
+{
+       if (unlikely(!size))
+               return 0;
+       return call_consume_chunk(blob->attached_buffer, size, cbs);
+}
+
+typedef int (*read_blob_prefix_handler_t)(const struct blob_descriptor *blob,
+                                         u64 size,
+                                         const struct read_blob_callbacks *cbs);
 
 /*
- * read_resource_prefix()-
- *
- * Read the first @size bytes from a generic "resource", which may be located in
- * the WIM (compressed or uncompressed), in an external file, or directly in an
- * in-memory buffer.
- *
- * Feed the data either to a callback function (cb != NULL, passing it
- * ctx_or_buf), or write it directly into a buffer (cb == NULL, ctx_or_buf
- * specifies the buffer, which must have room for @size bytes).
+ * Read the first @size bytes from a generic "blob", which may be located in any
+ * one of several locations, such as in a WIM resource (possibly compressed), in
+ * an external file, or directly in an in-memory buffer.  The blob data will be
+ * fed to the cbs->consume_chunk() callback function in chunks that are nonempty
+ * but otherwise are of unspecified size.
  *
- * When using a callback function, it is called with chunks up to 32768 bytes in
- * size until the resource is exhausted.
- *
- * If the resource is located in a WIM file, @flags can be set as documented in
- * read_partial_wim_resource().  Otherwise @flags are ignored.
+ * Returns 0 on success; nonzero on error.  A nonzero value will be returned if
+ * the blob data cannot be successfully read (for a number of different reasons,
+ * depending on the blob location), or if cbs->consume_chunk() returned nonzero
+ * in which case that error code will be returned.
  */
-int
-read_resource_prefix(const struct wim_lookup_table_entry *lte,
-                    u64 size, consume_data_callback_t cb, u32 in_chunk_size,
-                    void *ctx_or_buf, int flags)
+static int
+read_blob_prefix(const struct blob_descriptor *blob, u64 size,
+                const struct read_blob_callbacks *cbs)
 {
-       static const read_resource_prefix_handler_t handlers[] = {
-               [RESOURCE_IN_WIM]             = read_wim_resource_prefix,
-       #ifdef __WIN32__
-               [RESOURCE_IN_FILE_ON_DISK]    = read_win32_file_prefix,
-       #else
-               [RESOURCE_IN_FILE_ON_DISK]    = read_file_on_disk_prefix,
-       #endif
-               [RESOURCE_IN_ATTACHED_BUFFER] = read_buffer_prefix,
+       static const read_blob_prefix_handler_t handlers[] = {
+               [BLOB_IN_WIM] = read_wim_blob_prefix,
+               [BLOB_IN_FILE_ON_DISK] = read_file_on_disk_prefix,
+               [BLOB_IN_ATTACHED_BUFFER] = read_buffer_prefix,
        #ifdef WITH_FUSE
-               [RESOURCE_IN_STAGING_FILE]    = read_file_on_disk_prefix,
+               [BLOB_IN_STAGING_FILE] = read_staging_file_prefix,
        #endif
        #ifdef WITH_NTFS_3G
-               [RESOURCE_IN_NTFS_VOLUME]     = read_ntfs_file_prefix,
+               [BLOB_IN_NTFS_VOLUME] = read_ntfs_attribute_prefix,
        #endif
        #ifdef __WIN32__
-               [RESOURCE_WIN32_ENCRYPTED]    = read_win32_encrypted_file_prefix,
+               [BLOB_IN_WINNT_FILE_ON_DISK] = read_winnt_stream_prefix,
+               [BLOB_WIN32_ENCRYPTED] = read_win32_encrypted_file_prefix,
        #endif
        };
-       wimlib_assert(lte->resource_location < ARRAY_LEN(handlers)
-                     && handlers[lte->resource_location] != NULL);
-       wimlib_assert(cb == NULL || in_chunk_size > 0);
-       return handlers[lte->resource_location](lte, size, cb, in_chunk_size, ctx_or_buf, flags);
+       wimlib_assert(blob->blob_location < ARRAY_LEN(handlers)
+                     && handlers[blob->blob_location] != NULL);
+       wimlib_assert(size <= blob->size);
+       return handlers[blob->blob_location](blob, size, cbs);
+}
+
+/* Read the full data of the specified blob, passing the data into the specified
+ * callbacks (all of which are optional).  */
+int
+read_blob_with_cbs(struct blob_descriptor *blob,
+                  const struct read_blob_callbacks *cbs)
+{
+       int ret;
+
+       ret = call_begin_blob(blob, cbs);
+       if (unlikely(ret))
+               return ret;
+
+       ret = read_blob_prefix(blob, blob->size, cbs);
+
+       return call_end_blob(blob, ret, cbs);
 }
 
+/* Read the full uncompressed data of the specified blob into the specified
+ * buffer, which must have space for at least blob->size bytes.  The SHA-1
+ * message digest is *not* checked.  */
 int
-read_full_resource_into_buf(const struct wim_lookup_table_entry *lte,
-                           void *buf)
+read_blob_into_buf(const struct blob_descriptor *blob, void *buf)
 {
-       return read_resource_prefix(lte, wim_resource_size(lte), NULL, 0, buf, 0);
+       struct read_blob_callbacks cbs = {
+               .consume_chunk  = bufferer_cb,
+               .ctx            = &buf,
+       };
+       return read_blob_prefix(blob, blob->size, &cbs);
 }
 
+/* Retrieve the full uncompressed data of the specified blob.  A buffer large
+ * enough hold the data is allocated and returned in @buf_ret.  The SHA-1
+ * message digest is *not* checked.  */
 int
-read_full_resource_into_alloc_buf(const struct wim_lookup_table_entry *lte,
-                                 void **buf_ret)
+read_blob_into_alloc_buf(const struct blob_descriptor *blob, void **buf_ret)
 {
        int ret;
        void *buf;
 
-       if ((size_t)lte->resource_entry.original_size !=
-           lte->resource_entry.original_size)
-       {
-               ERROR("Can't read %"PRIu64" byte resource into "
-                     "memory", lte->resource_entry.original_size);
+       if (unlikely((size_t)blob->size != blob->size)) {
+               ERROR("Can't read %"PRIu64" byte blob into memory", blob->size);
                return WIMLIB_ERR_NOMEM;
        }
 
-       buf = MALLOC(lte->resource_entry.original_size);
-       if (!buf)
+       buf = MALLOC(blob->size);
+       if (unlikely(!buf))
                return WIMLIB_ERR_NOMEM;
 
-       ret = read_full_resource_into_buf(lte, buf);
-       if (ret) {
+       ret = read_blob_into_buf(blob, buf);
+       if (unlikely(ret)) {
                FREE(buf);
                return ret;
        }
@@ -1016,185 +807,565 @@ read_full_resource_into_alloc_buf(const struct wim_lookup_table_entry *lte,
        return 0;
 }
 
+/* Retrieve the full uncompressed data of a WIM resource specified as a raw
+ * `wim_reshdr' and the corresponding WIM file.  A buffer large enough hold the
+ * data is allocated and returned in @buf_ret.  */
+int
+wim_reshdr_to_data(const struct wim_reshdr *reshdr, WIMStruct *wim,
+                  void **buf_ret)
+{
+       struct wim_resource_descriptor rdesc;
+       struct blob_descriptor blob;
+
+       wim_reshdr_to_desc_and_blob(reshdr, wim, &rdesc, &blob);
+
+       return read_blob_into_alloc_buf(&blob, buf_ret);
+}
+
+/* Calculate the SHA-1 message digest of the uncompressed data of the specified
+ * WIM resource.  */
 int
-res_entry_to_data(const struct resource_entry *res_entry,
-                 WIMStruct *wim, void **buf_ret)
+wim_reshdr_to_hash(const struct wim_reshdr *reshdr, WIMStruct *wim,
+                  u8 hash[SHA1_HASH_SIZE])
 {
+       struct wim_resource_descriptor rdesc;
+       struct blob_descriptor blob;
        int ret;
-       struct wim_lookup_table_entry *lte;
 
-       lte = new_lookup_table_entry();
-       if (!lte)
-               return WIMLIB_ERR_NOMEM;
+       wim_reshdr_to_desc_and_blob(reshdr, wim, &rdesc, &blob);
+       blob.unhashed = 1;
 
-       copy_resource_entry(&lte->resource_entry, res_entry);
-       lte->unhashed = 1;
-       lte->part_number = wim->hdr.part_number;
-       lte_init_wim(lte, wim);
+       ret = sha1_blob(&blob);
+       if (unlikely(ret))
+               return ret;
 
-       ret = read_full_resource_into_alloc_buf(lte, buf_ret);
-       free_lookup_table_entry(lte);
-       return ret;
+       copy_hash(hash, blob.hash);
+       return 0;
+}
+
+struct blobifier_context {
+       struct read_blob_callbacks cbs;
+       struct blob_descriptor *cur_blob;
+       struct blob_descriptor *next_blob;
+       u64 cur_blob_offset;
+       struct blob_descriptor *final_blob;
+       size_t list_head_offset;
+};
+
+static struct blob_descriptor *
+next_blob(struct blob_descriptor *blob, size_t list_head_offset)
+{
+       struct list_head *cur;
+
+       cur = (struct list_head*)((u8*)blob + list_head_offset);
+
+       return (struct blob_descriptor*)((u8*)cur->next - list_head_offset);
+}
+
+/* A consume_chunk() implementation that translates raw resource data into
+ * blobs, calling the begin_blob, consume_chunk, and end_blob callbacks as
+ * appropriate.  */
+static int
+blobifier_cb(const void *chunk, size_t size, void *_ctx)
+{
+       struct blobifier_context *ctx = _ctx;
+       int ret;
+
+       wimlib_assert(ctx->cur_blob != NULL);
+       wimlib_assert(size <= ctx->cur_blob->size - ctx->cur_blob_offset);
+
+       if (ctx->cur_blob_offset == 0) {
+               /* Starting a new blob.  */
+               ret = call_begin_blob(ctx->cur_blob, &ctx->cbs);
+               if (ret)
+                       return ret;
+       }
+
+       ctx->cur_blob_offset += size;
+
+       ret = call_consume_chunk(chunk, size, &ctx->cbs);
+       if (ret)
+               return ret;
+
+       if (ctx->cur_blob_offset == ctx->cur_blob->size) {
+               /* Finished reading all the data for a blob.  */
+
+               ctx->cur_blob_offset = 0;
+
+               ret = call_end_blob(ctx->cur_blob, 0, &ctx->cbs);
+               if (ret)
+                       return ret;
+
+               /* Advance to next blob.  */
+               ctx->cur_blob = ctx->next_blob;
+               if (ctx->cur_blob != NULL) {
+                       if (ctx->cur_blob != ctx->final_blob)
+                               ctx->next_blob = next_blob(ctx->cur_blob,
+                                                          ctx->list_head_offset);
+                       else
+                               ctx->next_blob = NULL;
+               }
+       }
+       return 0;
 }
 
-struct extract_ctx {
+struct hasher_context {
        SHA_CTX sha_ctx;
-       consume_data_callback_t extract_chunk;
-       void *extract_chunk_arg;
+       int flags;
+       struct read_blob_callbacks cbs;
 };
 
+/* Callback for starting to read a blob while calculating its SHA-1 message
+ * digest.  */
+static int
+hasher_begin_blob(struct blob_descriptor *blob, void *_ctx)
+{
+       struct hasher_context *ctx = _ctx;
+
+       sha1_init(&ctx->sha_ctx);
+
+       return call_begin_blob(blob, &ctx->cbs);
+}
+
+/* A consume_chunk() implementation that continues calculating the SHA-1 message
+ * digest of the blob being read, then optionally passes the data on to another
+ * consume_chunk() implementation.  This allows checking the SHA-1 message
+ * digest of a blob being extracted, for example.  */
+static int
+hasher_consume_chunk(const void *chunk, size_t size, void *_ctx)
+{
+       struct hasher_context *ctx = _ctx;
+
+       sha1_update(&ctx->sha_ctx, chunk, size);
+
+       return call_consume_chunk(chunk, size, &ctx->cbs);
+}
+
+static int
+report_sha1_mismatch_error(const struct blob_descriptor *blob,
+                          const u8 actual_hash[SHA1_HASH_SIZE])
+{
+       tchar expected_hashstr[SHA1_HASH_SIZE * 2 + 1];
+       tchar actual_hashstr[SHA1_HASH_SIZE * 2 + 1];
+
+       wimlib_assert(blob->blob_location != BLOB_NONEXISTENT);
+       wimlib_assert(blob->blob_location != BLOB_IN_ATTACHED_BUFFER);
+
+       sprint_hash(blob->hash, expected_hashstr);
+       sprint_hash(actual_hash, actual_hashstr);
+
+       if (blob_is_in_file(blob)) {
+               ERROR("A file was concurrently modified!\n"
+                     "        Path: \"%"TS"\"\n"
+                     "        Expected SHA-1: %"TS"\n"
+                     "        Actual SHA-1: %"TS"\n",
+                     blob->file_on_disk, expected_hashstr, actual_hashstr);
+               return WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
+       } else if (blob->blob_location == BLOB_IN_WIM) {
+               const struct wim_resource_descriptor *rdesc = blob->rdesc;
+               ERROR("A WIM resource is corrupted!\n"
+                     "        WIM file: \"%"TS"\"\n"
+                     "        Blob uncompressed size: %"PRIu64"\n"
+                     "        Resource offset in WIM: %"PRIu64"\n"
+                     "        Resource uncompressed size: %"PRIu64"\n"
+                     "        Resource size in WIM: %"PRIu64"\n"
+                     "        Resource flags: 0x%x%"TS"\n"
+                     "        Resource compression type: %"TS"\n"
+                     "        Resource compression chunk size: %"PRIu32"\n"
+                     "        Expected SHA-1: %"TS"\n"
+                     "        Actual SHA-1: %"TS"\n",
+                     rdesc->wim->filename,
+                     blob->size,
+                     rdesc->offset_in_wim,
+                     rdesc->uncompressed_size,
+                     rdesc->size_in_wim,
+                     (unsigned int)rdesc->flags,
+                     (rdesc->is_pipable ? T(", pipable") : T("")),
+                     wimlib_get_compression_type_string(
+                                               rdesc->compression_type),
+                     rdesc->chunk_size,
+                     expected_hashstr, actual_hashstr);
+               return WIMLIB_ERR_INVALID_RESOURCE_HASH;
+       } else {
+               ERROR("File data was concurrently modified!\n"
+                     "        Location ID: %d\n"
+                     "        Expected SHA-1: %"TS"\n"
+                     "        Actual SHA-1: %"TS"\n",
+                     (int)blob->blob_location,
+                     expected_hashstr, actual_hashstr);
+               return WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
+       }
+}
+
+/* Callback for finishing reading a blob while calculating its SHA-1 message
+ * digest.  */
+static int
+hasher_end_blob(struct blob_descriptor *blob, int status, void *_ctx)
+{
+       struct hasher_context *ctx = _ctx;
+       u8 hash[SHA1_HASH_SIZE];
+       int ret;
+
+       if (unlikely(status)) {
+               /* Error occurred; the full blob may not have been read.  */
+               ret = status;
+               goto out_next_cb;
+       }
+
+       /* Retrieve the final SHA-1 message digest.  */
+       sha1_final(hash, &ctx->sha_ctx);
+
+       /* Set the SHA-1 message digest of the blob, or compare the calculated
+        * value with stored value.  */
+       if (blob->unhashed) {
+               if (ctx->flags & COMPUTE_MISSING_BLOB_HASHES)
+                       copy_hash(blob->hash, hash);
+       } else if ((ctx->flags & VERIFY_BLOB_HASHES) &&
+                  unlikely(!hashes_equal(hash, blob->hash)))
+       {
+               ret = report_sha1_mismatch_error(blob, hash);
+               goto out_next_cb;
+       }
+       ret = 0;
+out_next_cb:
+       return call_end_blob(blob, ret, &ctx->cbs);
+}
+
+/* Read the full data of the specified blob, passing the data into the specified
+ * callbacks (all of which are optional) and either checking or computing the
+ * SHA-1 message digest of the blob.  */
+int
+read_blob_with_sha1(struct blob_descriptor *blob,
+                   const struct read_blob_callbacks *cbs)
+{
+       struct hasher_context hasher_ctx = {
+               .flags = VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES,
+               .cbs = *cbs,
+       };
+       struct read_blob_callbacks hasher_cbs = {
+               .begin_blob     = hasher_begin_blob,
+               .consume_chunk  = hasher_consume_chunk,
+               .end_blob       = hasher_end_blob,
+               .ctx            = &hasher_ctx,
+       };
+       return read_blob_with_cbs(blob, &hasher_cbs);
+}
+
 static int
-extract_chunk_sha1_wrapper(const void *chunk, size_t chunk_size,
-                          void *_ctx)
+read_blobs_in_solid_resource(struct blob_descriptor *first_blob,
+                            struct blob_descriptor *last_blob,
+                            size_t blob_count,
+                            size_t list_head_offset,
+                            const struct read_blob_callbacks *sink_cbs)
 {
-       struct extract_ctx *ctx = _ctx;
+       struct data_range *ranges;
+       bool ranges_malloced;
+       struct blob_descriptor *cur_blob;
+       size_t i;
+       int ret;
+       u64 ranges_alloc_size;
+
+       /* Setup data ranges array (one range per blob to read); this way
+        * read_compressed_wim_resource() does not need to be aware of blobs.
+        */
+
+       ranges_alloc_size = (u64)blob_count * sizeof(ranges[0]);
 
-       sha1_update(&ctx->sha_ctx, chunk, chunk_size);
-       return ctx->extract_chunk(chunk, chunk_size, ctx->extract_chunk_arg);
+       if (unlikely((size_t)ranges_alloc_size != ranges_alloc_size))
+               goto oom;
+
+       if (ranges_alloc_size <= STACK_MAX) {
+               ranges = alloca(ranges_alloc_size);
+               ranges_malloced = false;
+       } else {
+               ranges = MALLOC(ranges_alloc_size);
+               if (unlikely(!ranges))
+                       goto oom;
+               ranges_malloced = true;
+       }
+
+       for (i = 0, cur_blob = first_blob;
+            i < blob_count;
+            i++, cur_blob = next_blob(cur_blob, list_head_offset))
+       {
+               ranges[i].offset = cur_blob->offset_in_res;
+               ranges[i].size = cur_blob->size;
+       }
+
+       struct blobifier_context blobifier_ctx = {
+               .cbs                    = *sink_cbs,
+               .cur_blob               = first_blob,
+               .next_blob              = next_blob(first_blob, list_head_offset),
+               .cur_blob_offset        = 0,
+               .final_blob             = last_blob,
+               .list_head_offset       = list_head_offset,
+       };
+       struct read_blob_callbacks cbs = {
+               .consume_chunk  = blobifier_cb,
+               .ctx            = &blobifier_ctx,
+       };
+
+       ret = read_compressed_wim_resource(first_blob->rdesc, ranges,
+                                          blob_count, &cbs);
+
+       if (ranges_malloced)
+               FREE(ranges);
+
+       if (unlikely(ret && blobifier_ctx.cur_blob_offset != 0)) {
+               ret = call_end_blob(blobifier_ctx.cur_blob, ret,
+                                   &blobifier_ctx.cbs);
+       }
+       return ret;
+
+oom:
+       ERROR("Too many blobs in one resource!");
+       return WIMLIB_ERR_NOMEM;
 }
 
-/* Extracts the first @size bytes of a WIM resource to somewhere.  In the
- * process, the SHA1 message digest of the resource is checked if the full
- * resource is being extracted.
+/*
+ * Read a list of blobs, each of which may be in any supported location (e.g.
+ * in a WIM or in an external file).  This function optimizes the case where
+ * multiple blobs are combined into a single solid compressed WIM resource by
+ * reading the blobs in sequential order, only decompressing the solid resource
+ * one time.
+ *
+ * @blob_list
+ *     List of blobs to read.
+ * @list_head_offset
+ *     Offset of the `struct list_head' within each `struct blob_descriptor'
+ *     that makes up the @blob_list.
+ * @cbs
+ *     Callback functions to accept the blob data.
+ * @flags
+ *     Bitwise OR of zero or more of the following flags:
+ *
+ *     VERIFY_BLOB_HASHES:
+ *             For all blobs being read that have already had SHA-1 message
+ *             digests computed, calculate the SHA-1 message digest of the read
+ *             data and compare it with the previously computed value.  If they
+ *             do not match, return WIMLIB_ERR_INVALID_RESOURCE_HASH.
  *
- * @extract_chunk is a function that is called to extract each chunk of the
- * resource. */
+ *     COMPUTE_MISSING_BLOB_HASHES
+ *             For all blobs being read that have not yet had their SHA-1
+ *             message digests computed, calculate and save their SHA-1 message
+ *             digests.
+ *
+ *     BLOB_LIST_ALREADY_SORTED
+ *             @blob_list is already sorted in sequential order for reading.
+ *
+ * The callback functions are allowed to delete the current blob from the list
+ * if necessary.
+ *
+ * Returns 0 on success; a nonzero error code on failure.  Failure can occur due
+ * to an error reading the data or due to an error status being returned by any
+ * of the callback functions.
+ */
 int
-extract_wim_resource(const struct wim_lookup_table_entry *lte,
-                    u64 size,
-                    consume_data_callback_t extract_chunk,
-                    void *extract_chunk_arg)
+read_blob_list(struct list_head *blob_list, size_t list_head_offset,
+              const struct read_blob_callbacks *cbs, int flags)
 {
        int ret;
-       if (size == wim_resource_size(lte)) {
-               /* Do SHA1 */
-               struct extract_ctx ctx;
-               ctx.extract_chunk = extract_chunk;
-               ctx.extract_chunk_arg = extract_chunk_arg;
-               sha1_init(&ctx.sha_ctx);
-               ret = read_resource_prefix(lte, size,
-                                          extract_chunk_sha1_wrapper,
-                                          wim_resource_chunk_size(lte),
-                                          &ctx, 0);
-               if (ret == 0) {
-                       u8 hash[SHA1_HASH_SIZE];
-                       sha1_final(hash, &ctx.sha_ctx);
-                       if (!hashes_equal(hash, lte->hash)) {
-                               if (wimlib_print_errors) {
-                                       ERROR("Invalid SHA1 message digest "
-                                             "on the following WIM resource:");
-                                       print_lookup_table_entry(lte, stderr);
-                                       if (lte->resource_location == RESOURCE_IN_WIM)
-                                               ERROR("The WIM file appears to be corrupt!");
-                               }
-                               ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
+       struct list_head *cur, *next;
+       struct blob_descriptor *blob;
+       struct hasher_context *hasher_ctx;
+       struct read_blob_callbacks *sink_cbs;
+
+       if (!(flags & BLOB_LIST_ALREADY_SORTED)) {
+               ret = sort_blob_list_by_sequential_order(blob_list,
+                                                        list_head_offset);
+               if (ret)
+                       return ret;
+       }
+
+       if (flags & (VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES)) {
+               hasher_ctx = alloca(sizeof(*hasher_ctx));
+               *hasher_ctx = (struct hasher_context) {
+                       .flags  = flags,
+                       .cbs    = *cbs,
+               };
+               sink_cbs = alloca(sizeof(*sink_cbs));
+               *sink_cbs = (struct read_blob_callbacks) {
+                       .begin_blob     = hasher_begin_blob,
+                       .consume_chunk  = hasher_consume_chunk,
+                       .end_blob       = hasher_end_blob,
+                       .ctx            = hasher_ctx,
+               };
+       } else {
+               sink_cbs = (struct read_blob_callbacks *)cbs;
+       }
+
+       for (cur = blob_list->next, next = cur->next;
+            cur != blob_list;
+            cur = next, next = cur->next)
+       {
+               blob = (struct blob_descriptor*)((u8*)cur - list_head_offset);
+
+               if (blob->blob_location == BLOB_IN_WIM &&
+                   blob->size != blob->rdesc->uncompressed_size)
+               {
+                       struct blob_descriptor *blob_next, *blob_last;
+                       struct list_head *next2;
+                       size_t blob_count;
+
+                       /* The next blob is a proper sub-sequence of a WIM
+                        * resource.  See if there are other blobs in the same
+                        * resource that need to be read.  Since
+                        * sort_blob_list_by_sequential_order() sorted the blobs
+                        * by offset in the WIM, this can be determined by
+                        * simply scanning forward in the list.  */
+
+                       blob_last = blob;
+                       blob_count = 1;
+                       for (next2 = next;
+                            next2 != blob_list
+                            && (blob_next = (struct blob_descriptor*)
+                                               ((u8*)next2 - list_head_offset),
+                                blob_next->blob_location == BLOB_IN_WIM
+                                && blob_next->rdesc == blob->rdesc);
+                            next2 = next2->next)
+                       {
+                               blob_last = blob_next;
+                               blob_count++;
+                       }
+                       if (blob_count > 1) {
+                               /* Reading multiple blobs combined into a single
+                                * WIM resource.  They are in the blob list,
+                                * sorted by offset; @blob specifies the first
+                                * blob in the resource that needs to be read
+                                * and @blob_last specifies the last blob in the
+                                * resource that needs to be read.  */
+                               next = next2;
+                               ret = read_blobs_in_solid_resource(blob, blob_last,
+                                                                  blob_count,
+                                                                  list_head_offset,
+                                                                  sink_cbs);
+                               if (ret)
+                                       return ret;
+                               continue;
                        }
                }
-       } else {
-               /* Don't do SHA1 */
-               ret = read_resource_prefix(lte, size, extract_chunk,
-                                          wim_resource_chunk_size(lte),
-                                          extract_chunk_arg, 0);
+
+               ret = read_blob_with_cbs(blob, sink_cbs);
+               if (unlikely(ret && ret != BEGIN_BLOB_STATUS_SKIP_BLOB))
+                       return ret;
        }
-       return ret;
+       return 0;
 }
 
 static int
-extract_wim_chunk_to_fd(const void *buf, size_t len, void *_fd_p)
+extract_chunk_to_fd(const void *chunk, size_t size, void *_fd)
 {
-       struct filedes *fd = _fd_p;
-       int ret = full_write(fd, buf, len);
-       if (ret)
+       struct filedes *fd = _fd;
+       int ret = full_write(fd, chunk, size);
+       if (unlikely(ret))
                ERROR_WITH_ERRNO("Error writing to file descriptor");
        return ret;
 }
 
+/* Extract the first @size bytes of the specified blob to the specified file
+ * descriptor.  This does *not* check the SHA-1 message digest.  */
 int
-extract_wim_resource_to_fd(const struct wim_lookup_table_entry *lte,
-                          struct filedes *fd, u64 size)
+extract_blob_prefix_to_fd(struct blob_descriptor *blob, u64 size,
+                         struct filedes *fd)
 {
-       return extract_wim_resource(lte, size, extract_wim_chunk_to_fd, fd);
+       struct read_blob_callbacks cbs = {
+               .consume_chunk  = extract_chunk_to_fd,
+               .ctx            = fd,
+       };
+       return read_blob_prefix(blob, size, &cbs);
 }
 
-
-static int
-sha1_chunk(const void *buf, size_t len, void *ctx)
+/* Extract the full uncompressed contents of the specified blob to the specified
+ * file descriptor.  This checks the SHA-1 message digest.  */
+int
+extract_blob_to_fd(struct blob_descriptor *blob, struct filedes *fd)
 {
-       sha1_update(ctx, buf, len);
-       return 0;
+       struct read_blob_callbacks cbs = {
+               .consume_chunk  = extract_chunk_to_fd,
+               .ctx            = fd,
+       };
+       return read_blob_with_sha1(blob, &cbs);
 }
 
-/* Calculate the SHA1 message digest of a stream. */
+/* Calculate the SHA-1 message digest of a blob and store it in @blob->hash.  */
 int
-sha1_resource(struct wim_lookup_table_entry *lte)
+sha1_blob(struct blob_descriptor *blob)
 {
-       int ret;
-       SHA_CTX sha_ctx;
-
-       sha1_init(&sha_ctx);
-       ret = read_resource_prefix(lte, wim_resource_size(lte),
-                                  sha1_chunk, wim_resource_chunk_size(lte),
-                                  &sha_ctx, 0);
-       if (ret == 0)
-               sha1_final(lte->hash, &sha_ctx);
-       return ret;
+       struct read_blob_callbacks cbs = {
+       };
+       return read_blob_with_sha1(blob, &cbs);
 }
 
-/* Translates a WIM resource entry from the on-disk format to an in-memory
- * format. */
+/*
+ * Convert a short WIM resource header to a stand-alone WIM resource descriptor.
+ *
+ * Note: for solid resources some fields still need to be overridden.
+ */
 void
-get_resource_entry(const struct resource_entry_disk *disk_entry,
-                  struct resource_entry *entry)
+wim_reshdr_to_desc(const struct wim_reshdr *reshdr, WIMStruct *wim,
+                  struct wim_resource_descriptor *rdesc)
 {
-       /* Note: disk_entry may not be 8 byte aligned--- in that case, the
-        * offset and original_size members will be unaligned.  (This should be
-        * okay since `struct resource_entry_disk' is declared as packed.) */
-
-       /* Read the size and flags into a bitfield portably... */
-       entry->size = (((u64)disk_entry->size[0] <<  0) |
-                      ((u64)disk_entry->size[1] <<  8) |
-                      ((u64)disk_entry->size[2] << 16) |
-                      ((u64)disk_entry->size[3] << 24) |
-                      ((u64)disk_entry->size[4] << 32) |
-                      ((u64)disk_entry->size[5] << 40) |
-                      ((u64)disk_entry->size[6] << 48));
-       entry->flags = disk_entry->flags;
-       entry->offset = le64_to_cpu(disk_entry->offset);
-       entry->original_size = le64_to_cpu(disk_entry->original_size);
-
-       /* offset and original_size are truncated to 62 bits to avoid possible
-        * overflows, when converting to a signed 64-bit integer (off_t) or when
-        * adding size or original_size.  This is okay since no one would ever
-        * actually have a WIM bigger than 4611686018427387903 bytes... */
-       if (entry->offset & 0xc000000000000000ULL) {
-               WARNING("Truncating offset in resource entry");
-               entry->offset &= 0x3fffffffffffffffULL;
-       }
-       if (entry->original_size & 0xc000000000000000ULL) {
-               WARNING("Truncating original_size in resource entry");
-               entry->original_size &= 0x3fffffffffffffffULL;
+       rdesc->wim = wim;
+       rdesc->offset_in_wim = reshdr->offset_in_wim;
+       rdesc->size_in_wim = reshdr->size_in_wim;
+       rdesc->uncompressed_size = reshdr->uncompressed_size;
+       INIT_LIST_HEAD(&rdesc->blob_list);
+       rdesc->flags = reshdr->flags;
+       rdesc->is_pipable = wim_is_pipable(wim);
+       if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
+               rdesc->compression_type = wim->compression_type;
+               rdesc->chunk_size = wim->chunk_size;
+       } else {
+               rdesc->compression_type = WIMLIB_COMPRESSION_TYPE_NONE;
+               rdesc->chunk_size = 0;
        }
 }
 
-/* Translates a WIM resource entry from an in-memory format into the on-disk
- * format. */
+/*
+ * Convert the short WIM resource header @reshdr to a stand-alone WIM resource
+ * descriptor @rdesc, then set @blob to consist of that entire resource.  This
+ * should only be used for non-solid resources!
+ */
+void
+wim_reshdr_to_desc_and_blob(const struct wim_reshdr *reshdr, WIMStruct *wim,
+                           struct wim_resource_descriptor *rdesc,
+                           struct blob_descriptor *blob)
+{
+       wim_reshdr_to_desc(reshdr, wim, rdesc);
+       blob->size = rdesc->uncompressed_size;
+       blob_set_is_located_in_wim_resource(blob, rdesc, 0);
+}
+
+/* Import a WIM resource header from the on-disk format.  */
+void
+get_wim_reshdr(const struct wim_reshdr_disk *disk_reshdr,
+              struct wim_reshdr *reshdr)
+{
+       reshdr->offset_in_wim = le64_to_cpu(disk_reshdr->offset_in_wim);
+       reshdr->size_in_wim = (((u64)disk_reshdr->size_in_wim[0] <<  0) |
+                              ((u64)disk_reshdr->size_in_wim[1] <<  8) |
+                              ((u64)disk_reshdr->size_in_wim[2] << 16) |
+                              ((u64)disk_reshdr->size_in_wim[3] << 24) |
+                              ((u64)disk_reshdr->size_in_wim[4] << 32) |
+                              ((u64)disk_reshdr->size_in_wim[5] << 40) |
+                              ((u64)disk_reshdr->size_in_wim[6] << 48));
+       reshdr->uncompressed_size = le64_to_cpu(disk_reshdr->uncompressed_size);
+       reshdr->flags = disk_reshdr->flags;
+}
+
+/* Export a WIM resource header to the on-disk format.  */
 void
-put_resource_entry(const struct resource_entry *entry,
-                  struct resource_entry_disk *disk_entry)
+put_wim_reshdr(const struct wim_reshdr *reshdr,
+              struct wim_reshdr_disk *disk_reshdr)
 {
-       /* Note: disk_entry may not be 8 byte aligned--- in that case, the
-        * offset and original_size members will be unaligned.  (This should be
-        * okay since `struct resource_entry_disk' is declared as packed.) */
-       u64 size = entry->size;
-
-       disk_entry->size[0] = size >>  0;
-       disk_entry->size[1] = size >>  8;
-       disk_entry->size[2] = size >> 16;
-       disk_entry->size[3] = size >> 24;
-       disk_entry->size[4] = size >> 32;
-       disk_entry->size[5] = size >> 40;
-       disk_entry->size[6] = size >> 48;
-       disk_entry->flags = entry->flags;
-       disk_entry->offset = cpu_to_le64(entry->offset);
-       disk_entry->original_size = cpu_to_le64(entry->original_size);
+       disk_reshdr->size_in_wim[0] = reshdr->size_in_wim  >>  0;
+       disk_reshdr->size_in_wim[1] = reshdr->size_in_wim  >>  8;
+       disk_reshdr->size_in_wim[2] = reshdr->size_in_wim  >> 16;
+       disk_reshdr->size_in_wim[3] = reshdr->size_in_wim  >> 24;
+       disk_reshdr->size_in_wim[4] = reshdr->size_in_wim  >> 32;
+       disk_reshdr->size_in_wim[5] = reshdr->size_in_wim  >> 40;
+       disk_reshdr->size_in_wim[6] = reshdr->size_in_wim  >> 48;
+       disk_reshdr->flags = reshdr->flags;
+       disk_reshdr->offset_in_wim = cpu_to_le64(reshdr->offset_in_wim);
+       disk_reshdr->uncompressed_size = cpu_to_le64(reshdr->uncompressed_size);
 }