]> wimlib.net Git - wimlib/blobdiff - src/write.c
Improve helper functions for setting blob locations
[wimlib] / src / write.c
index 75193dfc78b3365529ab9f135b1dd13ba3e5cc88..6be215e3e76570bd38aa12fc28afec740911f92f 100644 (file)
@@ -6,22 +6,20 @@
  */
 
 /*
- * Copyright (C) 2012, 2013 Eric Biggers
+ * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
  *
- * This file is part of wimlib, a library for working with WIM files.
+ * This file is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your option) any
+ * later version.
  *
- * wimlib is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 3 of the License, or (at your option)
- * any later version.
- *
- * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
- * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
  * details.
  *
- * You should have received a copy of the GNU General Public License
- * along with wimlib; if not, see http://www.gnu.org/licenses/.
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this file; if not, see http://www.gnu.org/licenses/.
  */
 
 #ifdef HAVE_CONFIG_H
 #  include <sys/file.h>
 #endif
 
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "wimlib/alloca.h"
+#include "wimlib/assert.h"
+#include "wimlib/blob_table.h"
+#include "wimlib/chunk_compressor.h"
 #include "wimlib/endianness.h"
 #include "wimlib/error.h"
 #include "wimlib/file_io.h"
 #include "wimlib/header.h"
+#include "wimlib/inode.h"
 #include "wimlib/integrity.h"
-#include "wimlib/lookup_table.h"
 #include "wimlib/metadata.h"
+#include "wimlib/paths.h"
+#include "wimlib/progress.h"
 #include "wimlib/resource.h"
-#include "wimlib/write.h"
-#include "wimlib/xml.h"
-
+#include "wimlib/solid.h"
 #ifdef __WIN32__
-#  include "wimlib/win32.h" /* win32_get_number_of_processors() */
+#  include "wimlib/win32.h" /* win32_rename_replacement() */
 #endif
+#include "wimlib/write.h"
+#include "wimlib/xml.h"
 
-#ifdef ENABLE_MULTITHREADED_COMPRESSION
-#  include <pthread.h>
-#endif
-
-#include <unistd.h>
-#include <fcntl.h>
-#include <errno.h>
-
-#ifdef WITH_NTFS_3G
-#  include <time.h>
-#  include <ntfs-3g/attrib.h>
-#  include <ntfs-3g/inode.h>
-#  include <ntfs-3g/dir.h>
-#endif
 
-#ifdef HAVE_ALLOCA_H
-#  include <alloca.h>
-#else
-#  include <stdlib.h>
-#endif
+/* wimlib internal flags used when writing resources.  */
+#define WRITE_RESOURCE_FLAG_RECOMPRESS         0x00000001
+#define WRITE_RESOURCE_FLAG_PIPABLE            0x00000002
+#define WRITE_RESOURCE_FLAG_SOLID              0x00000004
+#define WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE        0x00000008
+#define WRITE_RESOURCE_FLAG_SOLID_SORT         0x00000010
 
-#include <limits.h>
+static inline int
+write_flags_to_resource_flags(int write_flags)
+{
+       int write_resource_flags = 0;
 
-#ifndef __WIN32__
-#  include <sys/uio.h> /* for `struct iovec' */
-#endif
+       if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+               write_resource_flags |= WRITE_RESOURCE_FLAG_RECOMPRESS;
+       if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
+               write_resource_flags |= WRITE_RESOURCE_FLAG_PIPABLE;
+       if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
+               write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
+       if (write_flags & WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES)
+               write_resource_flags |= WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE;
+       if ((write_flags & (WIMLIB_WRITE_FLAG_SOLID |
+                           WIMLIB_WRITE_FLAG_NO_SOLID_SORT)) ==
+           WIMLIB_WRITE_FLAG_SOLID)
+               write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID_SORT;
+       return write_resource_flags;
+}
 
-/* Chunk table that's located at the beginning of each compressed resource in
- * the WIM.  (This is not the on-disk format; the on-disk format just has an
- * array of offsets.) */
-struct chunk_table {
-       off_t file_offset;
-       u64 original_resource_size;
-       u64 num_chunks;
-       u64 table_disk_size;
-       unsigned bytes_per_chunk_entry;
-       void *cur_offset_p;
-       union {
-               u32 cur_offset_u32;
-               u64 cur_offset_u64;
-       };
-       /* Beginning of chunk offsets, in either 32-bit or 64-bit little endian
-        * integers, including the first offset of 0, which will not be written.
-        * */
-       u8 offsets[] _aligned_attribute(8);
+struct filter_context {
+       int write_flags;
+       WIMStruct *wim;
 };
 
 /*
- * Allocates and initializes a chunk table, and reserves space for it in the
- * output file.
+ * Determine whether the specified blob should be filtered out from the write.
+ *
+ * Return values:
+ *
+ *  < 0 : The blob should be hard-filtered; that is, not included in the output
+ *       WIM file at all.
+ *    0 : The blob should not be filtered out.
+ *  > 0 : The blob should be soft-filtered; that is, it already exists in the
+ *       WIM file and may not need to be written again.
  */
 static int
-begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
-                            int out_fd,
-                            off_t file_offset,
-                            struct chunk_table **chunk_tab_ret)
-{
-       u64 size = wim_resource_size(lte);
-       u64 num_chunks = wim_resource_chunks(lte);
-       unsigned bytes_per_chunk_entry = (size > (1ULL << 32)) ? 8 : 4;
-       size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
-       struct chunk_table *chunk_tab = CALLOC(1, alloc_size);
-
-       DEBUG("Beginning chunk table for stream with size %"PRIu64, size);
-
-       if (!chunk_tab) {
-               ERROR("Failed to allocate chunk table for %"PRIu64" byte "
-                     "resource", size);
-               return WIMLIB_ERR_NOMEM;
-       }
-       chunk_tab->file_offset = file_offset;
-       chunk_tab->num_chunks = num_chunks;
-       chunk_tab->original_resource_size = size;
-       chunk_tab->bytes_per_chunk_entry = bytes_per_chunk_entry;
-       chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
-                                    (num_chunks - 1);
-       chunk_tab->cur_offset_p = chunk_tab->offsets;
-
-       /* We don't know the correct offsets yet; this just writes zeroes to
-        * reserve space for the table, so we can go back to it later after
-        * we've written the compressed chunks following it. */
-       if (full_write(out_fd, chunk_tab->offsets,
-                      chunk_tab->table_disk_size) != chunk_tab->table_disk_size)
-       {
-               ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
-                                "file resource");
-               FREE(chunk_tab);
-               return WIMLIB_ERR_WRITE;
-       }
-       *chunk_tab_ret = chunk_tab;
-       return 0;
-}
-
-/* Add the offset for the next chunk to the chunk table being constructed for a
- * compressed stream. */
-static void
-chunk_tab_record_chunk(struct chunk_table *chunk_tab, unsigned out_chunk_size)
+blob_filtered(const struct blob_descriptor *blob,
+             const struct filter_context *ctx)
 {
-       if (chunk_tab->bytes_per_chunk_entry == 4) {
-               *(le32*)chunk_tab->cur_offset_p = cpu_to_le32(chunk_tab->cur_offset_u32);
-               chunk_tab->cur_offset_p = (le32*)chunk_tab->cur_offset_p + 1;
-               chunk_tab->cur_offset_u32 += out_chunk_size;
-       } else {
-               *(le64*)chunk_tab->cur_offset_p = cpu_to_le64(chunk_tab->cur_offset_u64);
-               chunk_tab->cur_offset_p = (le64*)chunk_tab->cur_offset_p + 1;
-               chunk_tab->cur_offset_u64 += out_chunk_size;
-       }
-}
+       int write_flags;
+       WIMStruct *wim;
 
-/*
- * compress_func_t- Pointer to a function to compresses a chunk
- *                  of a WIM resource.  This may be either
- *                  wimlib_xpress_compress() (xpress-compress.c) or
- *                  wimlib_lzx_compress() (lzx-compress.c).
- *
- * @chunk:       Uncompressed data of the chunk.
- * @chunk_size:          Size of the uncompressed chunk, in bytes.
- * @out:         Pointer to output buffer of size at least (@chunk_size - 1) bytes.
- *
- * Returns the size of the compressed data written to @out in bytes, or 0 if the
- * data could not be compressed to (@chunk_size - 1) bytes or fewer.
- *
- * As a special requirement, the compression code is optimized for the WIM
- * format and therefore requires (@chunk_size <= 32768).
- *
- * As another special requirement, the compression code will read up to 8 bytes
- * off the end of the @chunk array for performance reasons.  The values of these
- * bytes will not affect the output of the compression, but the calling code
- * must make sure that the buffer holding the uncompressed chunk is actually at
- * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
- * mapped memory that will not cause a memory access violation if accessed.
- */
-typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
-                                   void *out);
+       if (ctx == NULL)
+               return 0;
 
-static compress_func_t
-get_compress_func(int out_ctype)
-{
-       if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
-               return wimlib_lzx_compress;
-       else
-               return wimlib_xpress_compress;
-}
+       write_flags = ctx->write_flags;
+       wim = ctx->wim;
+
+       if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE &&
+           blob->blob_location == BLOB_IN_WIM &&
+           blob->rdesc->wim == wim)
+               return 1;
+
+       if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS &&
+           blob->blob_location == BLOB_IN_WIM &&
+           blob->rdesc->wim != wim)
+               return -1;
 
-/*
- * Writes a chunk of a WIM resource to an output file.
- *
- * @chunk:       Uncompressed data of the chunk.
- * @chunk_size:          Size of the chunk (<= WIM_CHUNK_SIZE)
- * @out_fd:      File descriptor to write the chunk to.
- * @compress:     Compression function to use (NULL if writing uncompressed
- *                     data).
- * @chunk_tab:   Pointer to chunk table being created.  It is updated with the
- *                     offset of the chunk we write.
- *
- * Returns 0 on success; nonzero on failure.
- */
-static int
-write_wim_resource_chunk(const void * restrict chunk,
-                        unsigned chunk_size,
-                        int out_fd,
-                        compress_func_t compress,
-                        struct chunk_table * restrict chunk_tab)
-{
-       const void *out_chunk;
-       unsigned out_chunk_size;
-       if (compress) {
-               void *compressed_chunk = alloca(chunk_size);
-
-               out_chunk_size = (*compress)(chunk, chunk_size, compressed_chunk);
-               if (out_chunk_size) {
-                       /* Write compressed */
-                       out_chunk = compressed_chunk;
-               } else {
-                       /* Write uncompressed */
-                       out_chunk = chunk;
-                       out_chunk_size = chunk_size;
-               }
-               chunk_tab_record_chunk(chunk_tab, out_chunk_size);
-       } else {
-               /* Write uncompressed */
-               out_chunk = chunk;
-               out_chunk_size = chunk_size;
-       }
-       if (full_write(out_fd, out_chunk, out_chunk_size) != out_chunk_size) {
-               ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
-               return WIMLIB_ERR_WRITE;
-       }
        return 0;
 }
 
-/*
- * Finishes a WIM chunk table and writes it to the output file at the correct
- * offset.
- *
- * The final size of the full compressed resource is returned in the
- * @compressed_size_p.
- */
-static int
-finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
-                             int out_fd, u64 *compressed_size_p)
-{
-       size_t bytes_written;
-
-       bytes_written = full_pwrite(out_fd,
-                                   chunk_tab->offsets + chunk_tab->bytes_per_chunk_entry,
-                                   chunk_tab->table_disk_size,
-                                   chunk_tab->file_offset);
-       if (bytes_written != chunk_tab->table_disk_size) {
-               ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
-                                "file resource");
-               return WIMLIB_ERR_WRITE;
-       }
-       if (chunk_tab->bytes_per_chunk_entry == 4)
-               *compressed_size_p = chunk_tab->cur_offset_u32 + chunk_tab->table_disk_size;
-       else
-               *compressed_size_p = chunk_tab->cur_offset_u64 + chunk_tab->table_disk_size;
-       return 0;
+static bool
+blob_hard_filtered(const struct blob_descriptor *blob,
+                  struct filter_context *ctx)
+{
+       return blob_filtered(blob, ctx) < 0;
 }
 
-static int
-seek_and_truncate(int out_fd, off_t offset)
+static inline int
+may_soft_filter_blobs(const struct filter_context *ctx)
 {
-       if (lseek(out_fd, offset, SEEK_SET) == -1 ||
-           ftruncate(out_fd, offset))
-       {
-               ERROR_WITH_ERRNO("Failed to truncate output WIM file");
-               return WIMLIB_ERR_WRITE;
-       } else {
+       if (ctx == NULL)
                return 0;
-       }
+       return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE;
 }
 
-static int
-finalize_and_check_sha1(SHA_CTX * restrict sha_ctx,
-                       struct wim_lookup_table_entry * restrict lte)
-{
-       u8 md[SHA1_HASH_SIZE];
-       sha1_final(md, sha_ctx);
-       if (lte->unhashed) {
-               copy_hash(lte->hash, md);
-       } else if (!hashes_equal(md, lte->hash)) {
-               ERROR("WIM resource has incorrect hash!");
-               if (lte_filename_valid(lte)) {
-                       ERROR("We were reading it from \"%"TS"\"; maybe "
-                             "it changed while we were reading it.",
-                             lte->file_on_disk);
-               }
-               return WIMLIB_ERR_INVALID_RESOURCE_HASH;
-       }
-       return 0;
+static inline int
+may_hard_filter_blobs(const struct filter_context *ctx)
+{
+       if (ctx == NULL)
+               return 0;
+       return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS;
 }
 
+static inline int
+may_filter_blobs(const struct filter_context *ctx)
+{
+       return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx));
+}
 
-struct write_resource_ctx {
-       compress_func_t compress;
-       struct chunk_table *chunk_tab;
-       int out_fd;
-       SHA_CTX sha_ctx;
-       bool doing_sha;
-};
-
-static int
-write_resource_cb(const void *restrict chunk, size_t chunk_size,
-                 void *restrict _ctx)
+/* Return true if the specified resource is compressed and the compressed data
+ * can be reused with the specified output parameters.  */
+static bool
+can_raw_copy(const struct blob_descriptor *blob,
+            int write_resource_flags, int out_ctype, u32 out_chunk_size)
 {
-       struct write_resource_ctx *ctx = _ctx;
+       const struct wim_resource_descriptor *rdesc;
 
-       if (ctx->doing_sha)
-               sha1_update(&ctx->sha_ctx, chunk, chunk_size);
-       return write_wim_resource_chunk(chunk, chunk_size,
-                                       ctx->out_fd, ctx->compress,
-                                       ctx->chunk_tab);
-}
+       if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS)
+               return false;
 
-/*
- * Write a resource to an output WIM.
- *
- * @lte:  Lookup table entry for the resource, which could be in another WIM,
- *        in an external file, or in another location.
- *
- * @out_fd:  File descriptor opened to the output WIM.
- *
- * @out_ctype:  One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate
- *              which compression algorithm to use.
- *
- * @out_res_entry:  On success, this is filled in with the offset, flags,
- *                  compressed size, and uncompressed size of the resource
- *                  in the output WIM.
- *
- * @flags:  WIMLIB_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed
- *          even if it could otherwise be copied directly from the input.
- *
- * Additional notes:  The SHA1 message digest of the uncompressed data is
- * calculated (except when doing a raw copy --- see below).  If the @unhashed
- * flag is set on the lookup table entry, this message digest is simply copied
- * to it; otherwise, the message digest is compared with the existing one, and
- * the function will fail if they do not match.
- */
-int
-write_wim_resource(struct wim_lookup_table_entry *lte,
-                  int out_fd, int out_ctype,
-                  struct resource_entry *out_res_entry,
-                  int flags)
-{
-       struct write_resource_ctx write_ctx;
-       u64 read_size;
-       u64 new_size;
-       off_t offset;
-       int ret;
+       if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
+               return false;
 
-       flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS;
+       if (blob->blob_location != BLOB_IN_WIM)
+               return false;
 
-       /* Get current position in output WIM */
-       offset = filedes_offset(out_fd);
-       if (offset == -1) {
-               ERROR_WITH_ERRNO("Can't get position in output WIM");
-               return WIMLIB_ERR_WRITE;
-       }
+       rdesc = blob->rdesc;
 
-       /* If we are not forcing the data to be recompressed, and the input
-        * resource is located in a WIM with the same compression type as that
-        * desired other than no compression, we can simply copy the compressed
-        * data without recompressing it.  This also means we must skip
-        * calculating the SHA1, as we never will see the uncompressed data. */
-       if (!(flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
-           lte->resource_location == RESOURCE_IN_WIM &&
-           out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
-           lte->wim->compression_type == out_ctype)
-       {
-               flags |= WIMLIB_RESOURCE_FLAG_RAW;
-               write_ctx.doing_sha = false;
-               read_size = lte->resource_entry.size;
-       } else {
-               write_ctx.doing_sha = true;
-               sha1_init(&write_ctx.sha_ctx);
-               read_size = lte->resource_entry.original_size;
-       }
+       if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
+               return false;
 
-       /* Initialize the chunk table and set the compression function if
-        * compressing the resource. */
-       if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
-           (flags & WIMLIB_RESOURCE_FLAG_RAW)) {
-               write_ctx.compress = NULL;
-               write_ctx.chunk_tab = NULL;
-       } else {
-               write_ctx.compress = get_compress_func(out_ctype);
-               ret = begin_wim_resource_chunk_tab(lte, out_fd,
-                                                  offset,
-                                                  &write_ctx.chunk_tab);
-               if (ret)
-                       return ret;
+       if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
+               /* Normal compressed resource: Must use same compression type
+                * and chunk size.  */
+               return (rdesc->compression_type == out_ctype &&
+                       rdesc->chunk_size == out_chunk_size);
        }
 
-       /* Write the entire resource by reading the entire resource and feeding
-        * the data through the write_resource_cb function. */
-       write_ctx.out_fd = out_fd;
-try_write_again:
-       ret = read_resource_prefix(lte, read_size,
-                                  write_resource_cb, &write_ctx, flags);
-       if (ret)
-               goto out_free_chunk_tab;
+       if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) &&
+           (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
+       {
+               /* Solid resource: Such resources may contain multiple blobs,
+                * and in general only a subset of them need to be written.  As
+                * a heuristic, re-use the raw data if more than two-thirds the
+                * uncompressed size is being written.  */
 
-       /* Verify SHA1 message digest of the resource, or set the hash for the
-        * first time. */
-       if (write_ctx.doing_sha) {
-               ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte);
-               if (ret)
-                       goto out_free_chunk_tab;
-       }
-
-       out_res_entry->flags = lte->resource_entry.flags;
-       out_res_entry->original_size = wim_resource_size(lte);
-       out_res_entry->offset = offset;
-       if (flags & WIMLIB_RESOURCE_FLAG_RAW) {
-               /* Doing a raw write:  The new compressed size is the same as
-                * the compressed size in the other WIM. */
-               new_size = lte->resource_entry.size;
-       } else if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) {
-               /* Using WIMLIB_COMPRESSION_TYPE_NONE:  The new compressed size
-                * is the original size. */
-               new_size = lte->resource_entry.original_size;
-               out_res_entry->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
-       } else {
-               /* Using a different compression type:  Call
-                * finish_wim_resource_chunk_tab() and it will provide the new
-                * compressed size. */
-               ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, out_fd,
-                                                   &new_size);
-               if (ret)
-                       goto out_free_chunk_tab;
-               if (new_size >= wim_resource_size(lte)) {
-                       /* Oops!  We compressed the resource to larger than the original
-                        * size.  Write the resource uncompressed instead. */
-                       DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
-                             "writing uncompressed instead",
-                             wim_resource_size(lte), new_size);
-                       ret = seek_and_truncate(out_fd, offset);
-                       if (ret)
-                               goto out_free_chunk_tab;
-                       write_ctx.compress = NULL;
-                       write_ctx.doing_sha = false;
-                       out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
-                       goto try_write_again;
-               }
-               out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
-       }
-       out_res_entry->size = new_size;
-       ret = 0;
-out_free_chunk_tab:
-       FREE(write_ctx.chunk_tab);
-       return ret;
-}
+               /* Note: solid resources contain a header that specifies the
+                * compression type and chunk size; therefore we don't need to
+                * check if they are compatible with @out_ctype and
+                * @out_chunk_size.  */
 
-#ifdef ENABLE_MULTITHREADED_COMPRESSION
+               struct blob_descriptor *res_blob;
+               u64 write_size = 0;
 
-/* Blocking shared queue (solves the producer-consumer problem) */
-struct shared_queue {
-       unsigned size;
-       unsigned front;
-       unsigned back;
-       unsigned filled_slots;
-       void **array;
-       pthread_mutex_t lock;
-       pthread_cond_t msg_avail_cond;
-       pthread_cond_t space_avail_cond;
-};
+               list_for_each_entry(res_blob, &rdesc->blob_list, rdesc_node)
+                       if (res_blob->will_be_in_output_wim)
+                               write_size += res_blob->size;
 
-static int
-shared_queue_init(struct shared_queue *q, unsigned size)
-{
-       wimlib_assert(size != 0);
-       q->array = CALLOC(sizeof(q->array[0]), size);
-       if (!q->array)
-               goto err;
-       q->filled_slots = 0;
-       q->front = 0;
-       q->back = size - 1;
-       q->size = size;
-       if (pthread_mutex_init(&q->lock, NULL)) {
-               ERROR_WITH_ERRNO("Failed to initialize mutex");
-               goto err;
-       }
-       if (pthread_cond_init(&q->msg_avail_cond, NULL)) {
-               ERROR_WITH_ERRNO("Failed to initialize condition variable");
-               goto err_destroy_lock;
-       }
-       if (pthread_cond_init(&q->space_avail_cond, NULL)) {
-               ERROR_WITH_ERRNO("Failed to initialize condition variable");
-               goto err_destroy_msg_avail_cond;
+               return (write_size > rdesc->uncompressed_size * 2 / 3);
        }
-       return 0;
-err_destroy_msg_avail_cond:
-       pthread_cond_destroy(&q->msg_avail_cond);
-err_destroy_lock:
-       pthread_mutex_destroy(&q->lock);
-err:
-       return WIMLIB_ERR_NOMEM;
+
+       return false;
 }
 
-static void
-shared_queue_destroy(struct shared_queue *q)
+static u32
+reshdr_flags_for_blob(const struct blob_descriptor *blob)
 {
-       FREE(q->array);
-       pthread_mutex_destroy(&q->lock);
-       pthread_cond_destroy(&q->msg_avail_cond);
-       pthread_cond_destroy(&q->space_avail_cond);
+       u32 reshdr_flags = 0;
+       if (blob->is_metadata)
+               reshdr_flags |= WIM_RESHDR_FLAG_METADATA;
+       return reshdr_flags;
 }
 
 static void
-shared_queue_put(struct shared_queue *q, void *obj)
+blob_set_out_reshdr_for_reuse(struct blob_descriptor *blob)
 {
-       pthread_mutex_lock(&q->lock);
-       while (q->filled_slots == q->size)
-               pthread_cond_wait(&q->space_avail_cond, &q->lock);
+       const struct wim_resource_descriptor *rdesc;
 
-       q->back = (q->back + 1) % q->size;
-       q->array[q->back] = obj;
-       q->filled_slots++;
+       wimlib_assert(blob->blob_location == BLOB_IN_WIM);
+       rdesc = blob->rdesc;
 
-       pthread_cond_broadcast(&q->msg_avail_cond);
-       pthread_mutex_unlock(&q->lock);
+       if (rdesc->flags & WIM_RESHDR_FLAG_SOLID) {
+               blob->out_reshdr.offset_in_wim = blob->offset_in_res;
+               blob->out_reshdr.uncompressed_size = 0;
+               blob->out_reshdr.size_in_wim = blob->size;
+
+               blob->out_res_offset_in_wim = rdesc->offset_in_wim;
+               blob->out_res_size_in_wim = rdesc->size_in_wim;
+               blob->out_res_uncompressed_size = rdesc->uncompressed_size;
+       } else {
+               blob->out_reshdr.offset_in_wim = rdesc->offset_in_wim;
+               blob->out_reshdr.uncompressed_size = rdesc->uncompressed_size;
+               blob->out_reshdr.size_in_wim = rdesc->size_in_wim;
+       }
+       blob->out_reshdr.flags = rdesc->flags;
 }
 
-static void *
-shared_queue_get(struct shared_queue *q)
-{
-       void *obj;
 
-       pthread_mutex_lock(&q->lock);
-       while (q->filled_slots == 0)
-               pthread_cond_wait(&q->msg_avail_cond, &q->lock);
+/* Write the header for a blob in a pipable WIM.  */
+static int
+write_pwm_blob_header(const struct blob_descriptor *blob,
+                     struct filedes *out_fd, bool compressed)
+{
+       struct pwm_blob_hdr blob_hdr;
+       u32 reshdr_flags;
+       int ret;
 
-       obj = q->array[q->front];
-       q->array[q->front] = NULL;
-       q->front = (q->front + 1) % q->size;
-       q->filled_slots--;
+       wimlib_assert(!blob->unhashed);
 
-       pthread_cond_broadcast(&q->space_avail_cond);
-       pthread_mutex_unlock(&q->lock);
-       return obj;
+       blob_hdr.magic = cpu_to_le64(PWM_BLOB_MAGIC);
+       blob_hdr.uncompressed_size = cpu_to_le64(blob->size);
+       copy_hash(blob_hdr.hash, blob->hash);
+       reshdr_flags = reshdr_flags_for_blob(blob);
+       if (compressed)
+               reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
+       blob_hdr.flags = cpu_to_le32(reshdr_flags);
+       ret = full_write(out_fd, &blob_hdr, sizeof(blob_hdr));
+       if (ret)
+               ERROR_WITH_ERRNO("Write error");
+       return ret;
 }
 
-struct compressor_thread_params {
-       struct shared_queue *res_to_compress_queue;
-       struct shared_queue *compressed_res_queue;
-       compress_func_t compress;
-};
-
-#define MAX_CHUNKS_PER_MSG 2
-
-struct message {
-       struct wim_lookup_table_entry *lte;
-       u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
-       u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
-       unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
-       struct iovec out_chunks[MAX_CHUNKS_PER_MSG];
-       size_t total_out_bytes;
-       unsigned num_chunks;
-       struct list_head list;
-       bool complete;
-       u64 begin_chunk;
+struct write_blobs_progress_data {
+       wimlib_progress_func_t progfunc;
+       void *progctx;
+       union wimlib_progress_info progress;
+       uint64_t next_progress;
 };
 
-static void
-compress_chunks(struct message *msg, compress_func_t compress)
-{
-       msg->total_out_bytes = 0;
-       for (unsigned i = 0; i < msg->num_chunks; i++) {
-               unsigned len = compress(msg->uncompressed_chunks[i],
-                                       msg->uncompressed_chunk_sizes[i],
-                                       msg->compressed_chunks[i]);
-               void *out_chunk;
-               unsigned out_len;
-               if (len) {
-                       /* To be written compressed */
-                       out_chunk = msg->compressed_chunks[i];
-                       out_len = len;
-               } else {
-                       /* To be written uncompressed */
-                       out_chunk = msg->uncompressed_chunks[i];
-                       out_len = msg->uncompressed_chunk_sizes[i];
-               }
-               msg->out_chunks[i].iov_base = out_chunk;
-               msg->out_chunks[i].iov_len = out_len;
-               msg->total_out_bytes += out_len;
-       }
-}
-
-/* Compressor thread routine.  This is a lot simpler than the main thread
- * routine: just repeatedly get a group of chunks from the
- * res_to_compress_queue, compress them, and put them in the
- * compressed_res_queue.  A NULL pointer indicates that the thread should stop.
- * */
-static void *
-compressor_thread_proc(void *arg)
+static int
+do_write_blobs_progress(struct write_blobs_progress_data *progress_data,
+                       u64 complete_size, u32 complete_count, bool discarded)
 {
-       struct compressor_thread_params *params = arg;
-       struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
-       struct shared_queue *compressed_res_queue = params->compressed_res_queue;
-       compress_func_t compress = params->compress;
-       struct message *msg;
-
-       DEBUG("Compressor thread ready");
-       while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
-               compress_chunks(msg, compress);
-               shared_queue_put(compressed_res_queue, msg);
-       }
-       DEBUG("Compressor thread terminating");
-       return NULL;
-}
-#endif /* ENABLE_MULTITHREADED_COMPRESSION */
+       union wimlib_progress_info *progress = &progress_data->progress;
+       int ret;
 
-static void
-do_write_streams_progress(union wimlib_progress_info *progress,
-                         wimlib_progress_func_t progress_func,
-                         uint64_t size_added,
-                         bool stream_discarded)
-{
-       if (stream_discarded) {
-               progress->write_streams.total_bytes -= size_added;
-               if (progress->write_streams._private != ~(uint64_t)0 &&
-                   progress->write_streams._private > progress->write_streams.total_bytes)
+       if (discarded) {
+               progress->write_streams.total_bytes -= complete_size;
+               progress->write_streams.total_streams -= complete_count;
+               if (progress_data->next_progress != ~(uint64_t)0 &&
+                   progress_data->next_progress > progress->write_streams.total_bytes)
                {
-                       progress->write_streams._private = progress->write_streams.total_bytes;
+                       progress_data->next_progress = progress->write_streams.total_bytes;
                }
        } else {
-               progress->write_streams.completed_bytes += size_added;
+               progress->write_streams.completed_bytes += complete_size;
+               progress->write_streams.completed_streams += complete_count;
        }
-       progress->write_streams.completed_streams++;
-       if (progress_func &&
-           progress->write_streams.completed_bytes >= progress->write_streams._private)
+
+       if (progress->write_streams.completed_bytes >= progress_data->next_progress)
        {
-               progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
-                             progress);
-               if (progress->write_streams._private == progress->write_streams.total_bytes) {
-                       progress->write_streams._private = ~(uint64_t)0;
+               ret = call_progress(progress_data->progfunc,
+                                   WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+                                   progress,
+                                   progress_data->progctx);
+               if (ret)
+                       return ret;
+
+               if (progress_data->next_progress == progress->write_streams.total_bytes) {
+                       progress_data->next_progress = ~(uint64_t)0;
                } else {
-                       progress->write_streams._private =
-                               min(progress->write_streams.total_bytes,
-                                   progress->write_streams.completed_bytes +
-                                       progress->write_streams.total_bytes / 100);
+                       /* Handle rate-limiting of messages  */
+
+                       /* Send new message as soon as another 1/128 of the
+                        * total has been written.  (Arbitrary number.)  */
+                       progress_data->next_progress =
+                               progress->write_streams.completed_bytes +
+                                       progress->write_streams.total_bytes / 128;
+
+                       /* ... Unless that would be more than 5000000 bytes, in
+                        * which case send the next after the next 5000000
+                        * bytes.  (Another arbitrary number.)  */
+                       if (progress->write_streams.completed_bytes + 5000000 <
+                           progress_data->next_progress)
+                               progress_data->next_progress =
+                                       progress->write_streams.completed_bytes + 5000000;
+
+                       /* ... But always send a message as soon as we're
+                        * completely done.  */
+                       if (progress->write_streams.total_bytes <
+                           progress_data->next_progress)
+                               progress_data->next_progress =
+                                       progress->write_streams.total_bytes;
                }
        }
+       return 0;
 }
 
-struct serial_write_stream_ctx {
-       int out_fd;
+struct write_blobs_ctx {
+       /* File descriptor to which the blobs are being written.  */
+       struct filedes *out_fd;
+
+       /* Blob table for the WIMStruct on whose behalf the blobs are being
+        * written.  */
+       struct blob_table *blob_table;
+
+       /* Compression format to use.  */
        int out_ctype;
+
+       /* Maximum uncompressed chunk size in compressed resources to use.  */
+       u32 out_chunk_size;
+
+       /* Flags that affect how the blobs will be written.  */
        int write_resource_flags;
-};
 
-static int
-serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx)
-{
-       struct serial_write_stream_ctx *ctx = _ctx;
-       return write_wim_resource(lte, ctx->out_fd,
-                                 ctx->out_ctype, &lte->output_resource_entry,
-                                 ctx->write_resource_flags);
-}
+       /* Data used for issuing WRITE_STREAMS progress.  */
+       struct write_blobs_progress_data progress_data;
 
-/* Write a list of streams, taking into account that some streams may be
- * duplicates that are checksummed and discarded on the fly, and also delegating
- * the actual writing of a stream to a function @write_stream_cb, which is
- * passed the context @write_stream_ctx. */
-static int
-do_write_stream_list(struct list_head *stream_list,
-                    struct wim_lookup_table *lookup_table,
-                    int (*write_stream_cb)(struct wim_lookup_table_entry *, void *),
-                    void *write_stream_ctx,
-                    wimlib_progress_func_t progress_func,
-                    union wimlib_progress_info *progress)
-{
-       int ret = 0;
-       struct wim_lookup_table_entry *lte;
-       bool stream_discarded;
-
-       /* For each stream in @stream_list ... */
-       while (!list_empty(stream_list)) {
-               stream_discarded = false;
-               lte = container_of(stream_list->next,
-                                  struct wim_lookup_table_entry,
-                                  write_streams_list);
-               list_del(&lte->write_streams_list);
-               if (lte->unhashed && !lte->unique_size) {
-                       /* Unhashed stream that shares a size with some other
-                        * stream in the WIM we are writing.  The stream must be
-                        * checksummed to know if we need to write it or not. */
-                       struct wim_lookup_table_entry *tmp;
-                       u32 orig_refcnt = lte->out_refcnt;
-
-                       ret = hash_unhashed_stream(lte, lookup_table, &tmp);
-                       if (ret)
-                               break;
-                       if (tmp != lte) {
-                               lte = tmp;
-                               /* We found a duplicate stream. */
-                               if (orig_refcnt != tmp->out_refcnt) {
-                                       /* We have already written, or are going
-                                        * to write, the duplicate stream.  So
-                                        * just skip to the next stream. */
-                                       DEBUG("Discarding duplicate stream of length %"PRIu64,
-                                             wim_resource_size(lte));
-                                       lte->no_progress = 0;
-                                       stream_discarded = true;
-                                       goto skip_to_progress;
-                               }
-                       }
-               }
+       struct filter_context *filter_ctx;
 
-               /* Here, @lte is either a hashed stream or an unhashed stream
-                * with a unique size.  In either case we know that the stream
-                * has to be written.  In either case the SHA1 message digest
-                * will be calculated over the stream while writing it; however,
-                * in the former case this is done merely to check the data,
-                * while in the latter case this is done because we do not have
-                * the SHA1 message digest yet.  */
-               wimlib_assert(lte->out_refcnt != 0);
-               lte->deferred = 0;
-               lte->no_progress = 0;
-               ret = (*write_stream_cb)(lte, write_stream_ctx);
-               if (ret)
-                       break;
-               /* In parallel mode, some streams are deferred for later,
-                * serialized processing; ignore them here. */
-               if (lte->deferred)
-                       continue;
-               if (lte->unhashed) {
-                       list_del(&lte->unhashed_list);
-                       lookup_table_insert(lookup_table, lte);
-                       lte->unhashed = 0;
-               }
-       skip_to_progress:
-               if (!lte->no_progress) {
-                       do_write_streams_progress(progress,
-                                                 progress_func,
-                                                 wim_resource_size(lte),
-                                                 stream_discarded);
-               }
-       }
-       return ret;
-}
+       /* Upper bound on the total number of bytes that need to be compressed.
+        * */
+       u64 num_bytes_to_compress;
 
-static int
-do_write_stream_list_serial(struct list_head *stream_list,
-                           struct wim_lookup_table *lookup_table,
-                           int out_fd,
-                           int out_ctype,
-                           int write_resource_flags,
-                           wimlib_progress_func_t progress_func,
-                           union wimlib_progress_info *progress)
-{
-       struct serial_write_stream_ctx ctx = {
-               .out_fd = out_fd,
-               .out_ctype = out_ctype,
-               .write_resource_flags = write_resource_flags,
-       };
-       return do_write_stream_list(stream_list,
-                                   lookup_table,
-                                   serial_write_stream,
-                                   &ctx,
-                                   progress_func,
-                                   progress);
-}
+       /* Pointer to the chunk_compressor implementation being used for
+        * compressing chunks of data, or NULL if chunks are being written
+        * uncompressed.  */
+       struct chunk_compressor *compressor;
 
-static inline int
-write_flags_to_resource_flags(int write_flags)
-{
-       int resource_flags = 0;
+       /* A buffer of size @out_chunk_size that has been loaned out from the
+        * chunk compressor and is currently being filled with the uncompressed
+        * data of the next chunk.  */
+       u8 *cur_chunk_buf;
 
-       if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
-               resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS;
-       return resource_flags;
-}
+       /* Number of bytes in @cur_chunk_buf that are currently filled.  */
+       size_t cur_chunk_buf_filled;
 
-static int
-write_stream_list_serial(struct list_head *stream_list,
-                        struct wim_lookup_table *lookup_table,
-                        int out_fd,
-                        int out_ctype,
-                        int write_resource_flags,
-                        wimlib_progress_func_t progress_func,
-                        union wimlib_progress_info *progress)
-{
-       DEBUG("Writing stream list (serial version)");
-       progress->write_streams.num_threads = 1;
-       if (progress_func)
-               progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
-       return do_write_stream_list_serial(stream_list,
-                                          lookup_table,
-                                          out_fd,
-                                          out_ctype,
-                                          write_resource_flags,
-                                          progress_func,
-                                          progress);
-}
-
-#ifdef ENABLE_MULTITHREADED_COMPRESSION
-static int
-write_wim_chunks(struct message *msg, int out_fd,
-                struct chunk_table *chunk_tab)
-{
-       for (unsigned i = 0; i < msg->num_chunks; i++)
-               chunk_tab_record_chunk(chunk_tab, msg->out_chunks[i].iov_len);
-       if (full_writev(out_fd, msg->out_chunks,
-                       msg->num_chunks) != msg->total_out_bytes)
-       {
-               ERROR_WITH_ERRNO("Failed to write WIM chunks");
-               return WIMLIB_ERR_WRITE;
-       }
-       return 0;
-}
+       /* List of blobs that currently have chunks being compressed.  */
+       struct list_head blobs_being_compressed;
 
-struct main_writer_thread_ctx {
-       struct list_head *stream_list;
-       struct wim_lookup_table *lookup_table;
-       int out_fd;
-       int out_ctype;
-       int write_resource_flags;
-       struct shared_queue *res_to_compress_queue;
-       struct shared_queue *compressed_res_queue;
-       size_t num_messages;
-       wimlib_progress_func_t progress_func;
-       union wimlib_progress_info *progress;
-
-       struct list_head available_msgs;
-       struct list_head outstanding_streams;
-       struct list_head serial_streams;
-       size_t num_outstanding_messages;
-
-       SHA_CTX next_sha_ctx;
-       u64 next_chunk;
-       u64 next_num_chunks;
-       struct wim_lookup_table_entry *next_lte;
-
-       struct message *msgs;
-       struct message *next_msg;
-       struct chunk_table *cur_chunk_tab;
+       /* List of blobs in the solid resource.  Blobs are moved here after
+        * @blobs_being_compressed only when writing a solid resource.  */
+       struct list_head blobs_in_solid_resource;
+
+       /* Current uncompressed offset in the blob being read.  */
+       u64 cur_read_blob_offset;
+
+       /* Uncompressed size of the blob currently being read.  */
+       u64 cur_read_blob_size;
+
+       /* Current uncompressed offset in the blob being written.  */
+       u64 cur_write_blob_offset;
+
+       /* Uncompressed size of resource currently being written.  */
+       u64 cur_write_res_size;
+
+       /* Array that is filled in with compressed chunk sizes as a resource is
+        * being written.  */
+       u64 *chunk_csizes;
+
+       /* Index of next entry in @chunk_csizes to fill in.  */
+       size_t chunk_index;
+
+       /* Number of entries in @chunk_csizes currently allocated.  */
+       size_t num_alloc_chunks;
+
+       /* Offset in the output file of the start of the chunks of the resource
+        * currently being written.  */
+       u64 chunks_start_offset;
 };
 
+/* Reserve space for the chunk table and prepare to accumulate the chunk table
+ * in memory.  */
 static int
-init_message(struct message *msg)
+begin_chunk_table(struct write_blobs_ctx *ctx, u64 res_expected_size)
 {
-       for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
-               msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
-               msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
-               if (msg->compressed_chunks[i] == NULL ||
-                   msg->uncompressed_chunks[i] == NULL)
+       u64 expected_num_chunks;
+       u64 expected_num_chunk_entries;
+       size_t reserve_size;
+       int ret;
+
+       /* Calculate the number of chunks and chunk entries that should be
+        * needed for the resource.  These normally will be the final values,
+        * but in SOLID mode some of the blobs we're planning to write into the
+        * resource may be duplicates, and therefore discarded, potentially
+        * decreasing the number of chunk entries needed.  */
+       expected_num_chunks = DIV_ROUND_UP(res_expected_size, ctx->out_chunk_size);
+       expected_num_chunk_entries = expected_num_chunks;
+       if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
+               expected_num_chunk_entries--;
+
+       /* Make sure the chunk_csizes array is long enough to store the
+        * compressed size of each chunk.  */
+       if (expected_num_chunks > ctx->num_alloc_chunks) {
+               u64 new_length = expected_num_chunks + 50;
+
+               if ((size_t)new_length != new_length) {
+                       ERROR("Resource size too large (%"PRIu64" bytes!",
+                             res_expected_size);
                        return WIMLIB_ERR_NOMEM;
-       }
-       return 0;
-}
+               }
 
-static void
-destroy_message(struct message *msg)
-{
-       for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
-               FREE(msg->compressed_chunks[i]);
-               FREE(msg->uncompressed_chunks[i]);
+               FREE(ctx->chunk_csizes);
+               ctx->chunk_csizes = MALLOC(new_length * sizeof(ctx->chunk_csizes[0]));
+               if (ctx->chunk_csizes == NULL) {
+                       ctx->num_alloc_chunks = 0;
+                       return WIMLIB_ERR_NOMEM;
+               }
+               ctx->num_alloc_chunks = new_length;
+       }
+
+       ctx->chunk_index = 0;
+
+       if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) {
+               /* Reserve space for the chunk table in the output file.  In the
+                * case of solid resources this reserves the upper bound for the
+                * needed space, not necessarily the exact space which will
+                * prove to be needed.  At this point, we just use @chunk_csizes
+                * for a buffer of 0's because the actual compressed chunk sizes
+                * are unknown.  */
+               reserve_size = expected_num_chunk_entries *
+                              get_chunk_entry_size(res_expected_size,
+                                                   0 != (ctx->write_resource_flags &
+                                                         WRITE_RESOURCE_FLAG_SOLID));
+               if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
+                       reserve_size += sizeof(struct alt_chunk_table_header_disk);
+               memset(ctx->chunk_csizes, 0, reserve_size);
+               ret = full_write(ctx->out_fd, ctx->chunk_csizes, reserve_size);
+               if (ret)
+                       return ret;
        }
+       return 0;
 }
 
-static void
-free_messages(struct message *msgs, size_t num_messages)
+static int
+begin_write_resource(struct write_blobs_ctx *ctx, u64 res_expected_size)
 {
-       if (msgs) {
-               for (size_t i = 0; i < num_messages; i++)
-                       destroy_message(&msgs[i]);
-               FREE(msgs);
-       }
-}
+       int ret;
 
-static struct message *
-allocate_messages(size_t num_messages)
-{
-       struct message *msgs;
+       wimlib_assert(res_expected_size != 0);
 
-       msgs = CALLOC(num_messages, sizeof(struct message));
-       if (!msgs)
-               return NULL;
-       for (size_t i = 0; i < num_messages; i++) {
-               if (init_message(&msgs[i])) {
-                       free_messages(msgs, num_messages);
-                       return NULL;
-               }
+       if (ctx->compressor != NULL) {
+               ret = begin_chunk_table(ctx, res_expected_size);
+               if (ret)
+                       return ret;
        }
-       return msgs;
-}
 
-static void
-main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx)
-{
-       while (ctx->num_outstanding_messages--)
-               shared_queue_get(ctx->compressed_res_queue);
-       free_messages(ctx->msgs, ctx->num_messages);
-       FREE(ctx->cur_chunk_tab);
+       /* Output file descriptor is now positioned at the offset at which to
+        * write the first chunk of the resource.  */
+       ctx->chunks_start_offset = ctx->out_fd->offset;
+       ctx->cur_write_blob_offset = 0;
+       ctx->cur_write_res_size = res_expected_size;
+       return 0;
 }
 
 static int
-main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx)
+end_chunk_table(struct write_blobs_ctx *ctx, u64 res_actual_size,
+               u64 *res_start_offset_ret, u64 *res_store_size_ret)
 {
-       /* Pre-allocate all the buffers that will be needed to do the chunk
-        * compression. */
-       ctx->msgs = allocate_messages(ctx->num_messages);
-       if (!ctx->msgs)
-               return WIMLIB_ERR_NOMEM;
+       size_t actual_num_chunks;
+       size_t actual_num_chunk_entries;
+       size_t chunk_entry_size;
+       int ret;
 
-       /* Initially, all the messages are available to use. */
-       INIT_LIST_HEAD(&ctx->available_msgs);
-       for (size_t i = 0; i < ctx->num_messages; i++)
-               list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs);
+       actual_num_chunks = ctx->chunk_index;
+       actual_num_chunk_entries = actual_num_chunks;
+       if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
+               actual_num_chunk_entries--;
 
-       /* outstanding_streams is the list of streams that currently have had
-        * chunks sent off for compression.
-        *
-        * The first stream in outstanding_streams is the stream that is
-        * currently being written.
-        *
-        * The last stream in outstanding_streams is the stream that is
-        * currently being read and having chunks fed to the compressor threads.
-        * */
-       INIT_LIST_HEAD(&ctx->outstanding_streams);
-       ctx->num_outstanding_messages = 0;
+       chunk_entry_size = get_chunk_entry_size(res_actual_size,
+                                               0 != (ctx->write_resource_flags &
+                                                     WRITE_RESOURCE_FLAG_SOLID));
 
-       ctx->next_msg = NULL;
+       typedef le64 _may_alias_attribute aliased_le64_t;
+       typedef le32 _may_alias_attribute aliased_le32_t;
 
-       /* Resources that don't need any chunks compressed are added to this
-        * list and written directly by the main thread. */
-       INIT_LIST_HEAD(&ctx->serial_streams);
+       if (chunk_entry_size == 4) {
+               aliased_le32_t *entries = (aliased_le32_t*)ctx->chunk_csizes;
 
-       ctx->cur_chunk_tab = NULL;
+               if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+                       for (size_t i = 0; i < actual_num_chunk_entries; i++)
+                               entries[i] = cpu_to_le32(ctx->chunk_csizes[i]);
+               } else {
+                       u32 offset = ctx->chunk_csizes[0];
+                       for (size_t i = 0; i < actual_num_chunk_entries; i++) {
+                               u32 next_size = ctx->chunk_csizes[i + 1];
+                               entries[i] = cpu_to_le32(offset);
+                               offset += next_size;
+                       }
+               }
+       } else {
+               aliased_le64_t *entries = (aliased_le64_t*)ctx->chunk_csizes;
 
-       return 0;
-}
+               if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+                       for (size_t i = 0; i < actual_num_chunk_entries; i++)
+                               entries[i] = cpu_to_le64(ctx->chunk_csizes[i]);
+               } else {
+                       u64 offset = ctx->chunk_csizes[0];
+                       for (size_t i = 0; i < actual_num_chunk_entries; i++) {
+                               u64 next_size = ctx->chunk_csizes[i + 1];
+                               entries[i] = cpu_to_le64(offset);
+                               offset += next_size;
+                       }
+               }
+       }
 
-static int
-receive_compressed_chunks(struct main_writer_thread_ctx *ctx)
-{
-       struct message *msg;
-       struct wim_lookup_table_entry *cur_lte;
-       int ret;
+       size_t chunk_table_size = actual_num_chunk_entries * chunk_entry_size;
+       u64 res_start_offset;
+       u64 res_end_offset;
 
-       wimlib_assert(!list_empty(&ctx->outstanding_streams));
-       wimlib_assert(ctx->num_outstanding_messages != 0);
+       if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
+               ret = full_write(ctx->out_fd, ctx->chunk_csizes, chunk_table_size);
+               if (ret)
+                       goto write_error;
+               res_end_offset = ctx->out_fd->offset;
+               res_start_offset = ctx->chunks_start_offset;
+       } else {
+               res_end_offset = ctx->out_fd->offset;
 
-       cur_lte = container_of(ctx->outstanding_streams.next,
-                              struct wim_lookup_table_entry,
-                              being_compressed_list);
+               u64 chunk_table_offset;
 
-       /* Get the next message from the queue and process it.
-        * The message will contain 1 or more data chunks that have been
-        * compressed. */
-       msg = shared_queue_get(ctx->compressed_res_queue);
-       msg->complete = true;
-       --ctx->num_outstanding_messages;
+               chunk_table_offset = ctx->chunks_start_offset - chunk_table_size;
 
-       /* Is this the next chunk in the current resource?  If it's not
-        * (i.e., an earlier chunk in a same or different resource
-        * hasn't been compressed yet), do nothing, and keep this
-        * message around until all earlier chunks are received.
-        *
-        * Otherwise, write all the chunks we can. */
-       while (cur_lte != NULL &&
-              !list_empty(&cur_lte->msg_list)
-              && (msg = container_of(cur_lte->msg_list.next,
-                                     struct message,
-                                     list))->complete)
-       {
-               list_move(&msg->list, &ctx->available_msgs);
-               if (msg->begin_chunk == 0) {
-                       /* This is the first set of chunks.  Leave space
-                        * for the chunk table in the output file. */
-                       off_t cur_offset = filedes_offset(ctx->out_fd);
-                       if (cur_offset == -1)
-                               return WIMLIB_ERR_WRITE;
-                       ret = begin_wim_resource_chunk_tab(cur_lte,
-                                                          ctx->out_fd,
-                                                          cur_offset,
-                                                          &ctx->cur_chunk_tab);
-                       if (ret)
-                               return ret;
+               if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+                       struct alt_chunk_table_header_disk hdr;
+
+                       hdr.res_usize = cpu_to_le64(res_actual_size);
+                       hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size);
+                       hdr.compression_format = cpu_to_le32(ctx->out_ctype);
+
+                       BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1);
+                       BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2);
+                       BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
+
+                       ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr),
+                                         chunk_table_offset - sizeof(hdr));
+                       if (ret)
+                               goto write_error;
+                       res_start_offset = chunk_table_offset - sizeof(hdr);
+               } else {
+                       res_start_offset = chunk_table_offset;
                }
 
-               /* Write the compressed chunks from the message. */
-               ret = write_wim_chunks(msg, ctx->out_fd, ctx->cur_chunk_tab);
+               ret = full_pwrite(ctx->out_fd, ctx->chunk_csizes,
+                                 chunk_table_size, chunk_table_offset);
+               if (ret)
+                       goto write_error;
+       }
+
+       *res_start_offset_ret = res_start_offset;
+       *res_store_size_ret = res_end_offset - res_start_offset;
+
+       return 0;
+
+write_error:
+       ERROR_WITH_ERRNO("Write error");
+       return ret;
+}
+
+/* Finish writing a WIM resource by writing or updating the chunk table (if not
+ * writing the data uncompressed) and loading its metadata into @out_reshdr.  */
+static int
+end_write_resource(struct write_blobs_ctx *ctx, struct wim_reshdr *out_reshdr)
+{
+       int ret;
+       u64 res_size_in_wim;
+       u64 res_uncompressed_size;
+       u64 res_offset_in_wim;
+
+       wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size ||
+                     (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID));
+       res_uncompressed_size = ctx->cur_write_res_size;
+
+       if (ctx->compressor) {
+               ret = end_chunk_table(ctx, res_uncompressed_size,
+                                     &res_offset_in_wim, &res_size_in_wim);
                if (ret)
                        return ret;
+       } else {
+               res_offset_in_wim = ctx->chunks_start_offset;
+               res_size_in_wim = ctx->out_fd->offset - res_offset_in_wim;
+       }
+       out_reshdr->uncompressed_size = res_uncompressed_size;
+       out_reshdr->size_in_wim = res_size_in_wim;
+       out_reshdr->offset_in_wim = res_offset_in_wim;
+       DEBUG("Finished writing resource: %"PRIu64" => %"PRIu64" @ %"PRIu64"",
+             res_uncompressed_size, res_size_in_wim, res_offset_in_wim);
+       return 0;
+}
 
-               /* Was this the last chunk of the stream?  If so, finish
-                * it. */
-               if (list_empty(&cur_lte->msg_list) &&
-                   msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
-               {
-                       u64 res_csize;
-                       off_t offset;
+/* Call when no more data from the file at @path is needed.  */
+static int
+done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx)
+{
+       union wimlib_progress_info info;
 
-                       ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
-                                                           ctx->out_fd,
-                                                           &res_csize);
-                       if (ret)
-                               return ret;
+       info.done_with_file.path_to_file = path;
 
-                       list_del(&cur_lte->being_compressed_list);
+       return call_progress(progfunc, WIMLIB_PROGRESS_MSG_DONE_WITH_FILE,
+                            &info, progctx);
+}
 
-                       /* Grab the offset of this stream in the output file
-                        * from the chunk table before we free it. */
-                       offset = ctx->cur_chunk_tab->file_offset;
+static int
+do_done_with_blob(struct blob_descriptor *blob,
+                 wimlib_progress_func_t progfunc, void *progctx)
+{
+       int ret;
+       struct wim_inode *inode;
 
-                       FREE(ctx->cur_chunk_tab);
-                       ctx->cur_chunk_tab = NULL;
+       if (!blob->may_send_done_with_file)
+               return 0;
 
-                       if (res_csize >= wim_resource_size(cur_lte)) {
-                               /* Oops!  We compressed the resource to
-                                * larger than the original size.  Write
-                                * the resource uncompressed instead. */
-                               DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
-                                     "writing uncompressed instead",
-                                     wim_resource_size(cur_lte), res_csize);
-                               ret = seek_and_truncate(ctx->out_fd, offset);
-                               if (ret)
-                                       return ret;
-                               ret = write_wim_resource(cur_lte,
-                                                        ctx->out_fd,
-                                                        WIMLIB_COMPRESSION_TYPE_NONE,
-                                                        &cur_lte->output_resource_entry,
-                                                        ctx->write_resource_flags);
-                               if (ret)
-                                       return ret;
-                       } else {
-                               cur_lte->output_resource_entry.size =
-                                       res_csize;
+       inode = blob->file_inode;
+
+       wimlib_assert(inode != NULL);
+       wimlib_assert(inode->num_remaining_streams > 0);
+       if (--inode->num_remaining_streams > 0)
+               return 0;
 
-                               cur_lte->output_resource_entry.original_size =
-                                       cur_lte->resource_entry.original_size;
+#ifdef __WIN32__
+       /* XXX: This logic really should be somewhere else.  */
 
-                               cur_lte->output_resource_entry.offset =
-                                       offset;
+       /* We want the path to the file, but blob->file_on_disk might actually
+        * refer to a named data stream.  Temporarily strip the named data
+        * stream from the path.  */
+       wchar_t *p_colon = NULL;
+       wchar_t *p_question_mark = NULL;
+       const wchar_t *p_stream_name;
 
-                               cur_lte->output_resource_entry.flags =
-                                       cur_lte->resource_entry.flags |
-                                               WIM_RESHDR_FLAG_COMPRESSED;
-                       }
+       p_stream_name = path_stream_name(blob->file_on_disk);
+       if (unlikely(p_stream_name)) {
+               p_colon = (wchar_t *)(p_stream_name - 1);
+               wimlib_assert(*p_colon == L':');
+               *p_colon = L'\0';
+       }
+
+       /* We also should use a fake Win32 path instead of a NT path  */
+       if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) {
+               p_question_mark = &blob->file_on_disk[1];
+               *p_question_mark = L'\\';
+       }
+#endif
+
+       ret = done_with_file(blob->file_on_disk, progfunc, progctx);
+
+#ifdef __WIN32__
+       if (p_colon)
+               *p_colon = L':';
+       if (p_question_mark)
+               *p_question_mark = L'?';
+#endif
+       return ret;
+}
+
+/* Handle WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES mode.  */
+static inline int
+done_with_blob(struct blob_descriptor *blob, struct write_blobs_ctx *ctx)
+{
+       if (likely(!(ctx->write_resource_flags &
+                    WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE)))
+               return 0;
+       return do_done_with_blob(blob, ctx->progress_data.progfunc,
+                                ctx->progress_data.progctx);
+}
+
+/* Begin processing a blob for writing.  */
+static int
+write_blob_begin_read(struct blob_descriptor *blob, void *_ctx)
+{
+       struct write_blobs_ctx *ctx = _ctx;
+       int ret;
+
+       wimlib_assert(blob->size > 0);
 
-                       do_write_streams_progress(ctx->progress,
-                                                 ctx->progress_func,
-                                                 wim_resource_size(cur_lte),
-                                                 false);
-
-                       /* Since we just finished writing a stream, write any
-                        * streams that have been added to the serial_streams
-                        * list for direct writing by the main thread (e.g.
-                        * resources that don't need to be compressed because
-                        * the desired compression type is the same as the
-                        * previous compression type). */
-                       if (!list_empty(&ctx->serial_streams)) {
-                               ret = do_write_stream_list_serial(&ctx->serial_streams,
-                                                                 ctx->lookup_table,
-                                                                 ctx->out_fd,
-                                                                 ctx->out_ctype,
-                                                                 ctx->write_resource_flags,
-                                                                 ctx->progress_func,
-                                                                 ctx->progress);
+       ctx->cur_read_blob_offset = 0;
+       ctx->cur_read_blob_size = blob->size;
+
+       /* As an optimization, we allow some blobs to be "unhashed", meaning
+        * their SHA-1 message digests are unknown.  This is the case with blobs
+        * that are added by scanning a directory tree with wimlib_add_image(),
+        * for example.  Since WIM uses single-instance blobs, we don't know
+        * whether such each such blob really need to written until it is
+        * actually checksummed, unless it has a unique size.  In such cases we
+        * read and checksum the blob in this function, thereby advancing ahead
+        * of read_blob_list(), which will still provide the data again to
+        * write_blob_process_chunk().  This is okay because an unhashed blob
+        * cannot be in a WIM resource, which might be costly to decompress.  */
+       if (ctx->blob_table != NULL && blob->unhashed && !blob->unique_size) {
+
+               struct blob_descriptor *new_blob;
+
+               ret = hash_unhashed_blob(blob, ctx->blob_table, &new_blob);
+               if (ret)
+                       return ret;
+               if (new_blob != blob) {
+                       /* Duplicate blob detected.  */
+
+                       if (new_blob->will_be_in_output_wim ||
+                           blob_filtered(new_blob, ctx->filter_ctx))
+                       {
+                               /* The duplicate blob is already being included
+                                * in the output WIM, or it would be filtered
+                                * out if it had been.  Skip writing this blob
+                                * (and reading it again) entirely, passing its
+                                * output reference count to the duplicate blob
+                                * in the former case.  */
+                               DEBUG("Discarding duplicate blob of "
+                                     "length %"PRIu64, blob->size);
+                               ret = do_write_blobs_progress(&ctx->progress_data,
+                                                             blob->size, 1, true);
+                               list_del(&blob->write_blobs_list);
+                               list_del(&blob->blob_table_list);
+                               if (new_blob->will_be_in_output_wim)
+                                       new_blob->out_refcnt += blob->out_refcnt;
+                               if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
+                                       ctx->cur_write_res_size -= blob->size;
+                               if (!ret)
+                                       ret = done_with_blob(blob, ctx);
+                               free_blob_descriptor(blob);
                                if (ret)
                                        return ret;
-                       }
-
-                       /* Advance to the next stream to write. */
-                       if (list_empty(&ctx->outstanding_streams)) {
-                               cur_lte = NULL;
+                               return BEGIN_BLOB_STATUS_SKIP_BLOB;
                        } else {
-                               cur_lte = container_of(ctx->outstanding_streams.next,
-                                                      struct wim_lookup_table_entry,
-                                                      being_compressed_list);
+                               /* The duplicate blob can validly be written,
+                                * but was not marked as such.  Discard the
+                                * current blob descriptor and use the
+                                * duplicate, but actually freeing the current
+                                * blob descriptor must wait until
+                                * read_blob_list() has finished reading its
+                                * data.  */
+                               DEBUG("Blob duplicate, but not already "
+                                     "selected for writing.");
+                               list_replace(&blob->write_blobs_list,
+                                            &new_blob->write_blobs_list);
+                               list_replace(&blob->blob_table_list,
+                                            &new_blob->blob_table_list);
+                               blob->will_be_in_output_wim = 0;
+                               new_blob->out_refcnt = blob->out_refcnt;
+                               new_blob->will_be_in_output_wim = 1;
+                               new_blob->may_send_done_with_file = 0;
+                               blob = new_blob;
                        }
                }
        }
+       list_move_tail(&blob->write_blobs_list, &ctx->blobs_being_compressed);
        return 0;
 }
 
-/* Called when the main thread has read a new chunk of data. */
+/* Rewrite a blob that was just written compressed as uncompressed instead.
+ */
 static int
-main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
+write_blob_uncompressed(struct blob_descriptor *blob, struct filedes *out_fd)
 {
-       struct main_writer_thread_ctx *ctx = _ctx;
        int ret;
-       struct message *next_msg;
-       u64 next_chunk_in_msg;
-
-       /* Update SHA1 message digest for the stream currently being read by the
-        * main thread. */
-       sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
-
-       /* We send chunks of data to the compressor chunks in batches which we
-        * refer to as "messages".  @next_msg is the message that is currently
-        * being prepared to send off.  If it is NULL, that indicates that we
-        * need to start a new message. */
-       next_msg = ctx->next_msg;
-       if (!next_msg) {
-               /* We need to start a new message.  First check to see if there
-                * is a message available in the list of available messages.  If
-                * so, we can just take one.  If not, all the messages (there is
-                * a fixed number of them, proportional to the number of
-                * threads) have been sent off to the compressor threads, so we
-                * receive messages from the compressor threads containing
-                * compressed chunks of data.
-                *
-                * We may need to receive multiple messages before one is
-                * actually available to use because messages received that are
-                * *not* for the very next set of chunks to compress must be
-                * buffered until it's time to write those chunks. */
-               while (list_empty(&ctx->available_msgs)) {
-                       ret = receive_compressed_chunks(ctx);
+       u64 begin_offset = blob->out_reshdr.offset_in_wim;
+       u64 end_offset = out_fd->offset;
+
+       if (filedes_seek(out_fd, begin_offset) == -1)
+               return 0;
+
+       ret = extract_full_blob_to_fd(blob, out_fd);
+       if (ret) {
+               /* Error reading the uncompressed data.  */
+               if (out_fd->offset == begin_offset &&
+                   filedes_seek(out_fd, end_offset) != -1)
+               {
+                       /* Nothing was actually written yet, and we successfully
+                        * seeked to the end of the compressed resource, so
+                        * don't issue a hard error; just keep the compressed
+                        * resource instead.  */
+                       WARNING("Recovered compressed blob of "
+                               "size %"PRIu64", continuing on.", blob->size);
+                       return 0;
+               }
+               return ret;
+       }
+
+       wimlib_assert(out_fd->offset - begin_offset == blob->size);
+
+       if (out_fd->offset < end_offset &&
+           0 != ftruncate(out_fd->fd, out_fd->offset))
+       {
+               ERROR_WITH_ERRNO("Can't truncate output file to "
+                                "offset %"PRIu64, out_fd->offset);
+               return WIMLIB_ERR_WRITE;
+       }
+
+       blob->out_reshdr.size_in_wim = blob->size;
+       blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED |
+                                   WIM_RESHDR_FLAG_SOLID);
+       return 0;
+}
+
+/* Returns true if the specified blob, which was written as a non-solid
+ * resource, should be truncated from the WIM file and re-written uncompressed.
+ * blob->out_reshdr must be filled in from the initial write of the blob.  */
+static bool
+should_rewrite_blob_uncompressed(const struct write_blobs_ctx *ctx,
+                                const struct blob_descriptor *blob)
+{
+       /* If the compressed data is smaller than the uncompressed data, prefer
+        * the compressed data.  */
+       if (blob->out_reshdr.size_in_wim < blob->out_reshdr.uncompressed_size)
+               return false;
+
+       /* If we're not actually writing compressed data, then there's no need
+        * for re-writing.  */
+       if (!ctx->compressor)
+               return false;
+
+       /* If writing a pipable WIM, everything we write to the output is final
+        * (it might actually be a pipe!).  */
+       if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)
+               return false;
+
+       /* If the blob that would need to be re-read is located in a solid
+        * resource in another WIM file, then re-reading it would be costly.  So
+        * don't do it.
+        *
+        * Exception: if the compressed size happens to be *exactly* the same as
+        * the uncompressed size, then the blob *must* be written uncompressed
+        * in order to remain compatible with the Windows Overlay Filesystem
+        * Filter Driver (WOF).
+        *
+        * TODO: we are currently assuming that the optimization for
+        * single-chunk resources in maybe_rewrite_blob_uncompressed() prevents
+        * this case from being triggered too often.  To fully prevent excessive
+        * decompressions in degenerate cases, we really should obtain the
+        * uncompressed data by decompressing the compressed data we wrote to
+        * the output file.
+        */
+       if (blob->blob_location == BLOB_IN_WIM &&
+           blob->size != blob->rdesc->uncompressed_size &&
+           blob->size != blob->out_reshdr.size_in_wim)
+               return false;
+
+       return true;
+}
+
+static int
+maybe_rewrite_blob_uncompressed(struct write_blobs_ctx *ctx,
+                               struct blob_descriptor *blob)
+{
+       if (!should_rewrite_blob_uncompressed(ctx, blob))
+               return 0;
+
+       /* Regular (non-solid) WIM resources with exactly one chunk and
+        * compressed size equal to uncompressed size are exactly the same as
+        * the corresponding compressed data --- since there must be 0 entries
+        * in the chunk table and the only chunk must be stored uncompressed.
+        * In this case, there's no need to rewrite anything.  */
+       if (ctx->chunk_index == 1 &&
+           blob->out_reshdr.size_in_wim == blob->out_reshdr.uncompressed_size)
+       {
+               blob->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
+               return 0;
+       }
+
+       return write_blob_uncompressed(blob, ctx->out_fd);
+}
+
+/* Write the next chunk of (typically compressed) data to the output WIM,
+ * handling the writing of the chunk table.  */
+static int
+write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
+           size_t csize, size_t usize)
+{
+       int ret;
+       struct blob_descriptor *blob;
+       u32 completed_blob_count;
+       u32 completed_size;
+
+       blob = list_entry(ctx->blobs_being_compressed.next,
+                         struct blob_descriptor, write_blobs_list);
+
+       if (ctx->cur_write_blob_offset == 0 &&
+           !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
+       {
+               /* Starting to write a new blob in non-solid mode.  */
+
+               if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
+                       DEBUG("Writing pipable WIM blob header "
+                             "(offset=%"PRIu64")", ctx->out_fd->offset);
+                       ret = write_pwm_blob_header(blob, ctx->out_fd,
+                                                   ctx->compressor != NULL);
+                       if (ret)
+                               return ret;
+               }
+
+               ret = begin_write_resource(ctx, blob->size);
+               if (ret)
+                       return ret;
+       }
+
+       if (ctx->compressor != NULL) {
+               /* Record the compresed chunk size.  */
+               wimlib_assert(ctx->chunk_index < ctx->num_alloc_chunks);
+               ctx->chunk_csizes[ctx->chunk_index++] = csize;
+
+              /* If writing a pipable WIM, before the chunk data write a chunk
+               * header that provides the compressed chunk size.  */
+               if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
+                       struct pwm_chunk_hdr chunk_hdr = {
+                               .compressed_size = cpu_to_le32(csize),
+                       };
+                       ret = full_write(ctx->out_fd, &chunk_hdr,
+                                        sizeof(chunk_hdr));
+                       if (ret)
+                               goto write_error;
+               }
+       }
+
+       /* Write the chunk data.  */
+       ret = full_write(ctx->out_fd, cchunk, csize);
+       if (ret)
+               goto write_error;
+
+       ctx->cur_write_blob_offset += usize;
+
+       completed_size = usize;
+       completed_blob_count = 0;
+       if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+               /* Wrote chunk in solid mode.  It may have finished multiple
+                * blobs.  */
+               struct blob_descriptor *next_blob;
+
+               while (blob && ctx->cur_write_blob_offset >= blob->size) {
+
+                       ctx->cur_write_blob_offset -= blob->size;
+
+                       if (ctx->cur_write_blob_offset)
+                               next_blob = list_entry(blob->write_blobs_list.next,
+                                                     struct blob_descriptor,
+                                                     write_blobs_list);
+                       else
+                               next_blob = NULL;
+
+                       ret = done_with_blob(blob, ctx);
                        if (ret)
                                return ret;
+                       list_move_tail(&blob->write_blobs_list, &ctx->blobs_in_solid_resource);
+                       completed_blob_count++;
+
+                       blob = next_blob;
                }
+       } else {
+               /* Wrote chunk in non-solid mode.  It may have finished a
+                * blob.  */
+               if (ctx->cur_write_blob_offset == blob->size) {
+
+                       wimlib_assert(ctx->cur_write_blob_offset ==
+                                     ctx->cur_write_res_size);
+
+                       ret = end_write_resource(ctx, &blob->out_reshdr);
+                       if (ret)
+                               return ret;
+
+                       blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
+                       if (ctx->compressor != NULL)
+                               blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
+
+                       ret = maybe_rewrite_blob_uncompressed(ctx, blob);
+                       if (ret)
+                               return ret;
 
-               next_msg = container_of(ctx->available_msgs.next,
-                                       struct message, list);
-               list_del(&next_msg->list);
-               next_msg->complete = false;
-               next_msg->begin_chunk = ctx->next_chunk;
-               next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
-                                          ctx->next_num_chunks - ctx->next_chunk);
-               ctx->next_msg = next_msg;
-       }
-
-       /* Fill in the next chunk to compress */
-       next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
-
-       next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
-       memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
-              chunk, chunk_size);
-       ctx->next_chunk++;
-       if (++next_chunk_in_msg == next_msg->num_chunks) {
-               /* Send off an array of chunks to compress */
-               list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
-               shared_queue_put(ctx->res_to_compress_queue, next_msg);
-               ++ctx->num_outstanding_messages;
-               ctx->next_msg = NULL;
+                       wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size);
+
+                       ctx->cur_write_blob_offset = 0;
+
+                       ret = done_with_blob(blob, ctx);
+                       if (ret)
+                               return ret;
+                       list_del(&blob->write_blobs_list);
+                       completed_blob_count++;
+               }
+       }
+
+       return do_write_blobs_progress(&ctx->progress_data, completed_size,
+                                      completed_blob_count, false);
+
+write_error:
+       ERROR_WITH_ERRNO("Write error");
+       return ret;
+}
+
+static int
+prepare_chunk_buffer(struct write_blobs_ctx *ctx)
+{
+       /* While we are unable to get a new chunk buffer due to too many chunks
+        * already outstanding, retrieve and write the next compressed chunk. */
+       while (!(ctx->cur_chunk_buf =
+                ctx->compressor->get_chunk_buffer(ctx->compressor)))
+       {
+               const void *cchunk;
+               u32 csize;
+               u32 usize;
+               bool bret;
+               int ret;
+
+               bret = ctx->compressor->get_compression_result(ctx->compressor,
+                                                              &cchunk,
+                                                              &csize,
+                                                              &usize);
+               wimlib_assert(bret);
+
+               ret = write_chunk(ctx, cchunk, csize, usize);
+               if (ret)
+                       return ret;
        }
        return 0;
 }
 
+/* Process the next chunk of data to be written to a WIM resource.  */
+static int
+write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
+{
+       struct write_blobs_ctx *ctx = _ctx;
+       int ret;
+       const u8 *chunkptr, *chunkend;
+
+       wimlib_assert(size != 0);
+
+       if (ctx->compressor == NULL) {
+               /* Write chunk uncompressed.  */
+                ret = write_chunk(ctx, chunk, size, size);
+                if (ret)
+                        return ret;
+                ctx->cur_read_blob_offset += size;
+                return 0;
+       }
+
+       /* Submit the chunk for compression, but take into account that the
+        * @size the chunk was provided in may not correspond to the
+        * @out_chunk_size being used for compression.  */
+       chunkptr = chunk;
+       chunkend = chunkptr + size;
+       do {
+               size_t needed_chunk_size;
+               size_t bytes_consumed;
+
+               if (!ctx->cur_chunk_buf) {
+                       ret = prepare_chunk_buffer(ctx);
+                       if (ret)
+                               return ret;
+               }
+
+               if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+                       needed_chunk_size = ctx->out_chunk_size;
+               } else {
+                       needed_chunk_size = min(ctx->out_chunk_size,
+                                               ctx->cur_chunk_buf_filled +
+                                                       (ctx->cur_read_blob_size -
+                                                        ctx->cur_read_blob_offset));
+               }
+
+               bytes_consumed = min(chunkend - chunkptr,
+                                    needed_chunk_size - ctx->cur_chunk_buf_filled);
+
+               memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled],
+                      chunkptr, bytes_consumed);
+
+               chunkptr += bytes_consumed;
+               ctx->cur_read_blob_offset += bytes_consumed;
+               ctx->cur_chunk_buf_filled += bytes_consumed;
+
+               if (ctx->cur_chunk_buf_filled == needed_chunk_size) {
+                       ctx->compressor->signal_chunk_filled(ctx->compressor,
+                                                            ctx->cur_chunk_buf_filled);
+                       ctx->cur_chunk_buf = NULL;
+                       ctx->cur_chunk_buf_filled = 0;
+               }
+       } while (chunkptr != chunkend);
+       return 0;
+}
+
+/* Finish processing a blob for writing.  It may not have been completely
+ * written yet, as the chunk_compressor implementation may still have chunks
+ * buffered or being compressed.  */
+static int
+write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx)
+{
+       struct write_blobs_ctx *ctx = _ctx;
+
+       wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status);
+
+       if (!blob->will_be_in_output_wim) {
+               /* The blob was a duplicate.  Now that its data has finished
+                * being read, it is being discarded in favor of the duplicate
+                * entry.  It therefore is no longer needed, and we can fire the
+                * DONE_WITH_FILE callback because the file will not be read
+                * again.
+                *
+                * Note: we can't yet fire DONE_WITH_FILE for non-duplicate
+                * blobs, since it needs to be possible to re-read the file if
+                * it does not compress to less than its original size.  */
+               if (!status)
+                       status = done_with_blob(blob, ctx);
+               free_blob_descriptor(blob);
+       } else if (!status && blob->unhashed && ctx->blob_table != NULL) {
+               /* The blob was not a duplicate and was previously unhashed.
+                * Since we passed COMPUTE_MISSING_BLOB_HASHES to
+                * read_blob_list(), blob->hash is now computed and valid.  So
+                * turn this blob into a "hashed" blob.  */
+               list_del(&blob->unhashed_list);
+               blob_table_insert(ctx->blob_table, blob);
+               blob->unhashed = 0;
+       }
+       return status;
+}
+
+/* Compute statistics about a list of blobs that will be written.
+ *
+ * Assumes the blobs are sorted such that all blobs located in each distinct WIM
+ * (specified by WIMStruct) are together.  */
+static void
+compute_blob_list_stats(struct list_head *blob_list,
+                       struct write_blobs_ctx *ctx)
+{
+       struct blob_descriptor *blob;
+       u64 total_bytes = 0;
+       u64 num_blobs = 0;
+       u64 total_parts = 0;
+       WIMStruct *prev_wim_part = NULL;
+
+       list_for_each_entry(blob, blob_list, write_blobs_list) {
+               num_blobs++;
+               total_bytes += blob->size;
+               if (blob->blob_location == BLOB_IN_WIM) {
+                       if (prev_wim_part != blob->rdesc->wim) {
+                               prev_wim_part = blob->rdesc->wim;
+                               total_parts++;
+                       }
+               }
+       }
+       ctx->progress_data.progress.write_streams.total_bytes       = total_bytes;
+       ctx->progress_data.progress.write_streams.total_streams     = num_blobs;
+       ctx->progress_data.progress.write_streams.completed_bytes   = 0;
+       ctx->progress_data.progress.write_streams.completed_streams = 0;
+       ctx->progress_data.progress.write_streams.compression_type  = ctx->out_ctype;
+       ctx->progress_data.progress.write_streams.total_parts       = total_parts;
+       ctx->progress_data.progress.write_streams.completed_parts   = 0;
+       ctx->progress_data.next_progress = 0;
+}
+
+/* Find blobs in @blob_list that can be copied to the output WIM in raw form
+ * rather than compressed.  Delete these blobs from @blob_list and move them to
+ * @raw_copy_blobs.  Return the total uncompressed size of the blobs that need
+ * to be compressed.  */
+static u64
+find_raw_copy_blobs(struct list_head *blob_list,
+                   int write_resource_flags,
+                   int out_ctype,
+                   u32 out_chunk_size,
+                   struct list_head *raw_copy_blobs)
+{
+       struct blob_descriptor *blob, *tmp;
+       u64 num_bytes_to_compress = 0;
+
+       INIT_LIST_HEAD(raw_copy_blobs);
+
+       /* Initialize temporary raw_copy_ok flag.  */
+       list_for_each_entry(blob, blob_list, write_blobs_list)
+               if (blob->blob_location == BLOB_IN_WIM)
+                       blob->rdesc->raw_copy_ok = 0;
+
+       list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
+               if (blob->blob_location == BLOB_IN_WIM &&
+                   blob->rdesc->raw_copy_ok)
+               {
+                       list_move_tail(&blob->write_blobs_list,
+                                      raw_copy_blobs);
+               } else if (can_raw_copy(blob, write_resource_flags,
+                                       out_ctype, out_chunk_size))
+               {
+                       blob->rdesc->raw_copy_ok = 1;
+                       list_move_tail(&blob->write_blobs_list,
+                                      raw_copy_blobs);
+               } else {
+                       num_bytes_to_compress += blob->size;
+               }
+       }
+
+       return num_bytes_to_compress;
+}
+
+/* Copy a raw compressed resource located in another WIM file to the WIM file
+ * being written.  */
 static int
-main_writer_thread_finish(void *_ctx)
+write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc,
+                       struct filedes *out_fd)
 {
-       struct main_writer_thread_ctx *ctx = _ctx;
+       u64 cur_read_offset;
+       u64 end_read_offset;
+       u8 buf[BUFFER_SIZE];
+       size_t bytes_to_read;
        int ret;
-       while (ctx->num_outstanding_messages != 0) {
-               ret = receive_compressed_chunks(ctx);
+       struct filedes *in_fd;
+       struct blob_descriptor *blob;
+       u64 out_offset_in_wim;
+
+       DEBUG("Copying raw compressed data (size_in_wim=%"PRIu64", "
+             "uncompressed_size=%"PRIu64")",
+             in_rdesc->size_in_wim, in_rdesc->uncompressed_size);
+
+       /* Copy the raw data.  */
+       cur_read_offset = in_rdesc->offset_in_wim;
+       end_read_offset = cur_read_offset + in_rdesc->size_in_wim;
+
+       out_offset_in_wim = out_fd->offset;
+
+       if (in_rdesc->is_pipable) {
+               if (cur_read_offset < sizeof(struct pwm_blob_hdr))
+                       return WIMLIB_ERR_INVALID_PIPABLE_WIM;
+               cur_read_offset -= sizeof(struct pwm_blob_hdr);
+               out_offset_in_wim += sizeof(struct pwm_blob_hdr);
+       }
+       in_fd = &in_rdesc->wim->in_fd;
+       wimlib_assert(cur_read_offset != end_read_offset);
+       do {
+
+               bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
+
+               ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
+               if (ret)
+                       return ret;
+
+               ret = full_write(out_fd, buf, bytes_to_read);
                if (ret)
                        return ret;
+
+               cur_read_offset += bytes_to_read;
+
+       } while (cur_read_offset != end_read_offset);
+
+       list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) {
+               if (blob->will_be_in_output_wim) {
+                       blob_set_out_reshdr_for_reuse(blob);
+                       if (in_rdesc->flags & WIM_RESHDR_FLAG_SOLID)
+                               blob->out_res_offset_in_wim = out_offset_in_wim;
+                       else
+                               blob->out_reshdr.offset_in_wim = out_offset_in_wim;
+
+               }
        }
-       wimlib_assert(list_empty(&ctx->outstanding_streams));
-       return do_write_stream_list_serial(&ctx->serial_streams,
-                                          ctx->lookup_table,
-                                          ctx->out_fd,
-                                          ctx->out_ctype,
-                                          ctx->write_resource_flags,
-                                          ctx->progress_func,
-                                          ctx->progress);
+       return 0;
 }
 
+/* Copy a list of raw compressed resources located in other WIM file(s) to the
+ * WIM file being written.  */
 static int
-submit_stream_for_compression(struct wim_lookup_table_entry *lte,
-                             struct main_writer_thread_ctx *ctx)
+write_raw_copy_resources(struct list_head *raw_copy_blobs,
+                        struct filedes *out_fd,
+                        struct write_blobs_progress_data *progress_data)
 {
+       struct blob_descriptor *blob;
        int ret;
 
-       /* Read the entire stream @lte, feeding its data chunks to the
-        * compressor threads.  Also SHA1-sum the stream; this is required in
-        * the case that @lte is unhashed, and a nice additional verification
-        * when @lte is already hashed. */
-       sha1_init(&ctx->next_sha_ctx);
-       ctx->next_chunk = 0;
-       ctx->next_num_chunks = wim_resource_chunks(lte);
-       ctx->next_lte = lte;
-       INIT_LIST_HEAD(&lte->msg_list);
-       list_add_tail(&lte->being_compressed_list, &ctx->outstanding_streams);
-       ret = read_resource_prefix(lte, wim_resource_size(lte),
-                                  main_writer_thread_cb, ctx, 0);
-       if (ret == 0) {
-               wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
-               ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
+       list_for_each_entry(blob, raw_copy_blobs, write_blobs_list)
+               blob->rdesc->raw_copy_ok = 1;
+
+       list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) {
+               if (blob->rdesc->raw_copy_ok) {
+                       /* Write each solid resource only one time.  */
+                       ret = write_raw_copy_resource(blob->rdesc, out_fd);
+                       if (ret)
+                               return ret;
+                       blob->rdesc->raw_copy_ok = 0;
+               }
+               ret = do_write_blobs_progress(progress_data, blob->size,
+                                             1, false);
+               if (ret)
+                       return ret;
        }
-       return ret;
+       return 0;
 }
 
+/* Wait for and write all chunks pending in the compressor.  */
 static int
-main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx)
+finish_remaining_chunks(struct write_blobs_ctx *ctx)
 {
-       struct main_writer_thread_ctx *ctx = _ctx;
+       const void *cdata;
+       u32 csize;
+       u32 usize;
        int ret;
 
-       if (wim_resource_size(lte) < 1000 ||
-           ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
-           (lte->resource_location == RESOURCE_IN_WIM &&
-            !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
-            lte->wim->compression_type == ctx->out_ctype))
+       if (ctx->compressor == NULL)
+               return 0;
+
+       if (ctx->cur_chunk_buf_filled != 0) {
+               ctx->compressor->signal_chunk_filled(ctx->compressor,
+                                                    ctx->cur_chunk_buf_filled);
+       }
+
+       while (ctx->compressor->get_compression_result(ctx->compressor, &cdata,
+                                                      &csize, &usize))
        {
-               /* Stream is too small or isn't being compressed.  Process it by
-                * the main thread when we have a chance.  We can't necessarily
-                * process it right here, as the main thread could be in the
-                * middle of writing a different stream. */
-               list_add_tail(&lte->write_streams_list, &ctx->serial_streams);
-               lte->deferred = 1;
-               ret = 0;
-       } else {
-               ret = submit_stream_for_compression(lte, ctx);
+               ret = write_chunk(ctx, cdata, csize, usize);
+               if (ret)
+                       return ret;
        }
-       lte->no_progress = 1;
-       return ret;
+       return 0;
 }
 
-static long
-get_default_num_threads(void)
+static void
+remove_empty_blobs(struct list_head *blob_list)
 {
+       struct blob_descriptor *blob, *tmp;
+
+       list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
+               wimlib_assert(blob->will_be_in_output_wim);
+               if (blob->size == 0) {
+                       list_del(&blob->write_blobs_list);
+                       blob->out_reshdr.offset_in_wim = 0;
+                       blob->out_reshdr.size_in_wim = 0;
+                       blob->out_reshdr.uncompressed_size = 0;
+                       blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
+               }
+       }
+}
+
+static inline bool
+blob_is_in_file(const struct blob_descriptor *blob)
+{
+       return blob->blob_location == BLOB_IN_FILE_ON_DISK
 #ifdef __WIN32__
-       return win32_get_number_of_processors();
-#else
-       return sysconf(_SC_NPROCESSORS_ONLN);
+           || blob->blob_location == BLOB_IN_WINNT_FILE_ON_DISK
+           || blob->blob_location == BLOB_WIN32_ENCRYPTED
 #endif
+          ;
 }
 
-/* Equivalent to write_stream_list_serial(), except this takes a @num_threads
- * parameter and will perform compression using that many threads.  Falls
- * back to write_stream_list_serial() on certain errors, such as a failure to
- * create the number of threads requested.
- *
- * High level description of the algorithm for writing compressed streams in
- * parallel:  We perform compression on chunks of size WIM_CHUNK_SIZE bytes
- * rather than on full files.  The currently executing thread becomes the main
- * thread and is entirely in charge of reading the data to compress (which may
- * be in any location understood by the resource code--- such as in an external
- * file being captured, or in another WIM file from which an image is being
- * exported) and actually writing the compressed data to the output file.
- * Additional threads are "compressor threads" and all execute the
- * compressor_thread_proc, where they repeatedly retrieve buffers of data from
- * the main thread, compress them, and hand them back to the main thread.
- *
- * Certain streams, such as streams that do not need to be compressed (e.g.
- * input compression type same as output compression type) or streams of very
- * small size are placed in a list (main_writer_thread_ctx.serial_list) and
- * handled entirely by the main thread at an appropriate time.
- *
- * At any given point in time, multiple streams may be having chunks compressed
- * concurrently.  The stream that the main thread is currently *reading* may be
- * later in the list that the stream that the main thread is currently
- * *writing*.
- */
-static int
-write_stream_list_parallel(struct list_head *stream_list,
-                          struct wim_lookup_table *lookup_table,
-                          int out_fd,
-                          int out_ctype,
-                          int write_resource_flags,
-                          wimlib_progress_func_t progress_func,
-                          union wimlib_progress_info *progress,
-                          unsigned num_threads)
+static void
+init_done_with_file_info(struct list_head *blob_list)
 {
-       int ret;
-       struct shared_queue res_to_compress_queue;
-       struct shared_queue compressed_res_queue;
-       pthread_t *compressor_threads = NULL;
-
-       if (num_threads == 0) {
-               long nthreads = get_default_num_threads();
-               if (nthreads < 1 || nthreads > UINT_MAX) {
-                       WARNING("Could not determine number of processors! Assuming 1");
-                       goto out_serial;
-               } else if (nthreads == 1) {
-                       goto out_serial_quiet;
+       struct blob_descriptor *blob;
+
+       list_for_each_entry(blob, blob_list, write_blobs_list) {
+               if (blob_is_in_file(blob)) {
+                       blob->file_inode->num_remaining_streams = 0;
+                       blob->may_send_done_with_file = 1;
                } else {
-                       num_threads = nthreads;
+                       blob->may_send_done_with_file = 0;
                }
        }
 
-       DEBUG("Writing stream list (parallel version, num_threads=%u)",
-             num_threads);
+       list_for_each_entry(blob, blob_list, write_blobs_list)
+               if (blob->may_send_done_with_file)
+                       blob->file_inode->num_remaining_streams++;
+}
+
+/*
+ * Write a list of blobs to the output WIM file.
+ *
+ * @blob_list
+ *     The list of blobs to write, specified by a list of 'struct blob_descriptor' linked
+ *     by the 'write_blobs_list' member.
+ *
+ * @out_fd
+ *     The file descriptor, opened for writing, to which to write the blobs.
+ *
+ * @write_resource_flags
+ *     Flags to modify how the blobs are written:
+ *
+ *     WRITE_RESOURCE_FLAG_RECOMPRESS:
+ *             Force compression of all resources, even if they could otherwise
+ *             be re-used by copying the raw data, due to being located in a WIM
+ *             file with compatible compression parameters.
+ *
+ *     WRITE_RESOURCE_FLAG_PIPABLE:
+ *             Write the resources in the wimlib-specific pipable format, and
+ *             furthermore do so in such a way that no seeking backwards in
+ *             @out_fd will be performed (so it may be a pipe).
+ *
+ *     WRITE_RESOURCE_FLAG_SOLID:
+ *             Combine all the blobs into a single resource rather than writing
+ *             them in separate resources.  This flag is only valid if the WIM
+ *             version number has been, or will be, set to WIM_VERSION_SOLID.
+ *             This flag may not be combined with WRITE_RESOURCE_FLAG_PIPABLE.
+ *
+ * @out_ctype
+ *     Compression format to use in the output resources, specified as one of
+ *     the WIMLIB_COMPRESSION_TYPE_* constants.  WIMLIB_COMPRESSION_TYPE_NONE
+ *     is allowed.
+ *
+ * @out_chunk_size
+ *     Compression chunk size to use in the output resources.  It must be a
+ *     valid chunk size for the specified compression format @out_ctype, unless
+ *     @out_ctype is WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter
+ *     is ignored.
+ *
+ * @num_threads
+ *     Number of threads to use to compress data.  If 0, a default number of
+ *     threads will be chosen.  The number of threads still may be decreased
+ *     from the specified value if insufficient memory is detected.
+ *
+ * @blob_table
+ *     If on-the-fly deduplication of unhashed blobs is desired, this parameter
+ *     must be pointer to the blob table for the WIMStruct on whose behalf the
+ *     blobs are being written.  Otherwise, this parameter can be NULL.
+ *
+ * @filter_ctx
+ *     If on-the-fly deduplication of unhashed blobs is desired, this parameter
+ *     can be a pointer to a context for blob filtering used to detect whether
+ *     the duplicate blob has been hard-filtered or not.  If no blobs are
+ *     hard-filtered or no blobs are unhashed, this parameter can be NULL.
+ *
+ * This function will write the blobs in @blob_list to resources in
+ * consecutive positions in the output WIM file, or to a single solid resource
+ * if WRITE_RESOURCE_FLAG_SOLID was specified in @write_resource_flags.  In both
+ * cases, the @out_reshdr of the `struct blob_descriptor' for each blob written will be
+ * updated to specify its location, size, and flags in the output WIM.  In the
+ * solid resource case, WIM_RESHDR_FLAG_SOLID will be set in the @flags field of
+ * each @out_reshdr, and furthermore @out_res_offset_in_wim and
+ * @out_res_size_in_wim of each @out_reshdr will be set to the offset and size,
+ * respectively, in the output WIM of the solid resource containing the
+ * corresponding blob.
+ *
+ * Each of the blobs to write may be in any location supported by the
+ * resource-handling code (specifically, read_blob_list()), such as the contents
+ * of external file that has been logically added to the output WIM, or a blob
+ * in another WIM file that has been imported, or even a blob in the "same" WIM
+ * file of which a modified copy is being written.  In the case that a blob is
+ * already in a WIM file and uses compatible compression parameters, by default
+ * this function will re-use the raw data instead of decompressing it, then
+ * recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS
+ * specified in @write_resource_flags, this is not done.
+ *
+ * As a further requirement, this function requires that the
+ * @will_be_in_output_wim member be set to 1 on all blobs in @blob_list as well
+ * as any other blobs not in @blob_list that will be in the output WIM file, but
+ * set to 0 on any other blobs in the output WIM's blob table or sharing a solid
+ * resource with a blob in @blob_list.  Still furthermore, if on-the-fly
+ * deduplication of blobs is possible, then all blobs in @blob_list must also be
+ * linked by @blob_table_list along with any other blobs that have
+ * @will_be_in_output_wim set.
+ *
+ * This function handles on-the-fly deduplication of blobs for which SHA-1
+ * message digests have not yet been calculated.  Such blobs may or may not need
+ * to be written.  If @blob_table is non-NULL, then each blob in @blob_list that
+ * has @unhashed set but not @unique_size set is checksummed immediately before
+ * it would otherwise be read for writing in order to determine if it is
+ * identical to another blob already being written or one that would be filtered
+ * out of the output WIM using blob_filtered() with the context @filter_ctx.
+ * Each such duplicate blob will be removed from @blob_list, its reference count
+ * transfered to the pre-existing duplicate blob, its memory freed, and will not
+ * be written.  Alternatively, if a blob in @blob_list is a duplicate with any
+ * blob in @blob_table that has not been marked for writing or would not be
+ * hard-filtered, it is freed and the pre-existing duplicate is written instead,
+ * taking ownership of the reference count and slot in the @blob_table_list.
+ *
+ * Returns 0 if every blob was either written successfully or did not need to be
+ * written; otherwise returns a non-zero error code.
+ */
+static int
+write_blob_list(struct list_head *blob_list,
+               struct filedes *out_fd,
+               int write_resource_flags,
+               int out_ctype,
+               u32 out_chunk_size,
+               unsigned num_threads,
+               struct blob_table *blob_table,
+               struct filter_context *filter_ctx,
+               wimlib_progress_func_t progfunc,
+               void *progctx)
+{
+       int ret;
+       struct write_blobs_ctx ctx;
+       struct list_head raw_copy_blobs;
 
-       progress->write_streams.num_threads = num_threads;
+       wimlib_assert((write_resource_flags &
+                      (WRITE_RESOURCE_FLAG_SOLID |
+                       WRITE_RESOURCE_FLAG_PIPABLE)) !=
+                               (WRITE_RESOURCE_FLAG_SOLID |
+                                WRITE_RESOURCE_FLAG_PIPABLE));
 
-       static const size_t MESSAGES_PER_THREAD = 2;
-       size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
+       remove_empty_blobs(blob_list);
 
-       DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
+       if (list_empty(blob_list)) {
+               DEBUG("No blobs to write.");
+               return 0;
+       }
 
-       ret = shared_queue_init(&res_to_compress_queue, queue_size);
+       /* If needed, set auxiliary information so that we can detect when the
+        * library has finished using each external file.  */
+       if (unlikely(write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE))
+               init_done_with_file_info(blob_list);
+
+       memset(&ctx, 0, sizeof(ctx));
+
+       ctx.out_fd = out_fd;
+       ctx.blob_table = blob_table;
+       ctx.out_ctype = out_ctype;
+       ctx.out_chunk_size = out_chunk_size;
+       ctx.write_resource_flags = write_resource_flags;
+       ctx.filter_ctx = filter_ctx;
+
+       /*
+        * We normally sort the blobs to write by a "sequential" order that is
+        * optimized for reading.  But when using solid compression, we instead
+        * sort the blobs by file extension and file name (when applicable; and
+        * we don't do this for blobs from solid resources) so that similar
+        * files are grouped together, which improves the compression ratio.
+        * This is somewhat of a hack since a blob does not necessarily
+        * correspond one-to-one with a filename, nor is there any guarantee
+        * that two files with similar names or extensions are actually similar
+        * in content.  A potential TODO is to sort the blobs based on some
+        * measure of similarity of their actual contents.
+        */
+
+       ret = sort_blob_list_by_sequential_order(blob_list,
+                                                offsetof(struct blob_descriptor,
+                                                         write_blobs_list));
        if (ret)
-               goto out_serial;
+               return ret;
 
-       ret = shared_queue_init(&compressed_res_queue, queue_size);
-       if (ret)
-               goto out_destroy_res_to_compress_queue;
-
-       struct compressor_thread_params params;
-       params.res_to_compress_queue = &res_to_compress_queue;
-       params.compressed_res_queue = &compressed_res_queue;
-       params.compress = get_compress_func(out_ctype);
-
-       compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
-       if (!compressor_threads) {
-               ret = WIMLIB_ERR_NOMEM;
-               goto out_destroy_compressed_res_queue;
-       }
-
-       for (unsigned i = 0; i < num_threads; i++) {
-               DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
-               ret = pthread_create(&compressor_threads[i], NULL,
-                                    compressor_thread_proc, &params);
-               if (ret != 0) {
-                       ret = -1;
-                       ERROR_WITH_ERRNO("Failed to create compressor "
-                                        "thread %u of %u",
-                                        i + 1, num_threads);
-                       num_threads = i;
-                       goto out_join;
+       compute_blob_list_stats(blob_list, &ctx);
+
+       if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) {
+               ret = sort_blob_list_for_solid_compression(blob_list);
+               if (unlikely(ret))
+                       WARNING("Failed to sort blobs for solid compression. Continuing anyways.");
+       }
+
+       ctx.progress_data.progfunc = progfunc;
+       ctx.progress_data.progctx = progctx;
+
+       ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list,
+                                                       write_resource_flags,
+                                                       out_ctype,
+                                                       out_chunk_size,
+                                                       &raw_copy_blobs);
+
+       DEBUG("Writing blob list "
+             "(offset = %"PRIu64", write_resource_flags=0x%08x, "
+             "out_ctype=%d, out_chunk_size=%u, num_threads=%u, "
+             "total_bytes=%"PRIu64", num_bytes_to_compress=%"PRIu64")",
+             out_fd->offset, write_resource_flags,
+             out_ctype, out_chunk_size, num_threads,
+             ctx.progress_data.progress.write_streams.total_bytes,
+             ctx.num_bytes_to_compress);
+
+       if (ctx.num_bytes_to_compress == 0) {
+               DEBUG("No compression needed; skipping to raw copy!");
+               goto out_write_raw_copy_resources;
+       }
+
+       /* Unless uncompressed output was required, allocate a chunk_compressor
+        * to do compression.  There are serial and parallel implementations of
+        * the chunk_compressor interface.  We default to parallel using the
+        * specified number of threads, unless the upper bound on the number
+        * bytes needing to be compressed is less than a heuristic value.  */
+       if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
+
+       #ifdef ENABLE_MULTITHREADED_COMPRESSION
+               if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) {
+                       ret = new_parallel_chunk_compressor(out_ctype,
+                                                           out_chunk_size,
+                                                           num_threads, 0,
+                                                           &ctx.compressor);
+                       if (ret > 0) {
+                               WARNING("Couldn't create parallel chunk compressor: %"TS".\n"
+                                       "          Falling back to single-threaded compression.",
+                                       wimlib_get_error_string(ret));
+                       }
+               }
+       #endif
+
+               if (ctx.compressor == NULL) {
+                       ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
+                                                         &ctx.compressor);
+                       if (ret)
+                               goto out_destroy_context;
                }
        }
 
-       if (progress_func)
-               progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
-
-       struct main_writer_thread_ctx ctx;
-       ctx.stream_list           = stream_list;
-       ctx.lookup_table          = lookup_table;
-       ctx.out_fd                = out_fd;
-       ctx.out_ctype             = out_ctype;
-       ctx.res_to_compress_queue = &res_to_compress_queue;
-       ctx.compressed_res_queue  = &compressed_res_queue;
-       ctx.num_messages          = queue_size;
-       ctx.write_resource_flags  = write_resource_flags;
-       ctx.progress_func         = progress_func;
-       ctx.progress              = progress;
-       ret = main_writer_thread_init_ctx(&ctx);
+       if (ctx.compressor)
+               ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
+       else
+               ctx.progress_data.progress.write_streams.num_threads = 1;
+
+       DEBUG("Actually using %u threads",
+             ctx.progress_data.progress.write_streams.num_threads);
+
+       INIT_LIST_HEAD(&ctx.blobs_being_compressed);
+       INIT_LIST_HEAD(&ctx.blobs_in_solid_resource);
+
+       ret = call_progress(ctx.progress_data.progfunc,
+                           WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+                           &ctx.progress_data.progress,
+                           ctx.progress_data.progctx);
+       if (ret)
+               goto out_destroy_context;
+
+       if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+               ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
+               if (ret)
+                       goto out_destroy_context;
+       }
+
+       /* Read the list of blobs needing to be compressed, using the specified
+        * callbacks to execute processing of the data.  */
+
+       struct read_blob_list_callbacks cbs = {
+               .begin_blob             = write_blob_begin_read,
+               .begin_blob_ctx         = &ctx,
+               .consume_chunk          = write_blob_process_chunk,
+               .consume_chunk_ctx      = &ctx,
+               .end_blob               = write_blob_end_read,
+               .end_blob_ctx           = &ctx,
+       };
+
+       ret = read_blob_list(blob_list,
+                            offsetof(struct blob_descriptor, write_blobs_list),
+                            &cbs,
+                            BLOB_LIST_ALREADY_SORTED |
+                               VERIFY_BLOB_HASHES |
+                               COMPUTE_MISSING_BLOB_HASHES);
+
        if (ret)
-               goto out_join;
-       ret = do_write_stream_list(stream_list, lookup_table,
-                                  main_thread_process_next_stream,
-                                  &ctx, progress_func, progress);
+               goto out_destroy_context;
+
+       ret = finish_remaining_chunks(&ctx);
        if (ret)
-               goto out_destroy_ctx;
-
-       /* The main thread has finished reading all streams that are going to be
-        * compressed in parallel, and it now needs to wait for all remaining
-        * chunks to be compressed so that the remaining streams can actually be
-        * written to the output file.  Furthermore, any remaining streams that
-        * had processing deferred to the main thread need to be handled.  These
-        * tasks are done by the main_writer_thread_finish() function. */
-       ret = main_writer_thread_finish(&ctx);
-out_destroy_ctx:
-       main_writer_thread_destroy_ctx(&ctx);
-out_join:
-       for (unsigned i = 0; i < num_threads; i++)
-               shared_queue_put(&res_to_compress_queue, NULL);
-
-       for (unsigned i = 0; i < num_threads; i++) {
-               if (pthread_join(compressor_threads[i], NULL)) {
-                       WARNING_WITH_ERRNO("Failed to join compressor "
-                                          "thread %u of %u",
-                                          i + 1, num_threads);
+               goto out_destroy_context;
+
+       if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+               struct wim_reshdr reshdr;
+               struct blob_descriptor *blob;
+               u64 offset_in_res;
+
+               ret = end_write_resource(&ctx, &reshdr);
+               if (ret)
+                       goto out_destroy_context;
+
+               DEBUG("Ending solid resource: %lu %lu %lu.",
+                     reshdr.offset_in_wim,
+                     reshdr.size_in_wim,
+                     reshdr.uncompressed_size);
+
+               offset_in_res = 0;
+               list_for_each_entry(blob, &ctx.blobs_in_solid_resource, write_blobs_list) {
+                       blob->out_reshdr.size_in_wim = blob->size;
+                       blob->out_reshdr.flags = reshdr_flags_for_blob(blob) |
+                                                WIM_RESHDR_FLAG_SOLID;
+                       blob->out_reshdr.uncompressed_size = 0;
+                       blob->out_reshdr.offset_in_wim = offset_in_res;
+                       blob->out_res_offset_in_wim = reshdr.offset_in_wim;
+                       blob->out_res_size_in_wim = reshdr.size_in_wim;
+                       blob->out_res_uncompressed_size = reshdr.uncompressed_size;
+                       offset_in_res += blob->size;
                }
+               wimlib_assert(offset_in_res == reshdr.uncompressed_size);
        }
-       FREE(compressor_threads);
-out_destroy_compressed_res_queue:
-       shared_queue_destroy(&compressed_res_queue);
-out_destroy_res_to_compress_queue:
-       shared_queue_destroy(&res_to_compress_queue);
-       if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
-               return ret;
-out_serial:
-       WARNING("Falling back to single-threaded compression");
-out_serial_quiet:
-       return write_stream_list_serial(stream_list,
-                                       lookup_table,
-                                       out_fd,
-                                       out_ctype,
-                                       write_resource_flags,
-                                       progress_func,
-                                       progress);
 
+out_write_raw_copy_resources:
+       /* Copy any compressed resources for which the raw data can be reused
+        * without decompression.  */
+       ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd,
+                                      &ctx.progress_data);
+
+out_destroy_context:
+       FREE(ctx.chunk_csizes);
+       if (ctx.compressor)
+               ctx.compressor->destroy(ctx.compressor);
+       DEBUG("Done (ret=%d)", ret);
+       return ret;
 }
-#endif
 
-/*
- * Write a list of streams to a WIM (@out_fd) using the compression type
- * @out_ctype and up to @num_threads compressor threads.
- */
+
 static int
-write_stream_list(struct list_head *stream_list,
-                 struct wim_lookup_table *lookup_table,
-                 int out_fd, int out_ctype, int write_flags,
-                 unsigned num_threads, wimlib_progress_func_t progress_func)
+wim_write_blob_list(WIMStruct *wim,
+                   struct list_head *blob_list,
+                   int write_flags,
+                   unsigned num_threads,
+                   struct filter_context *filter_ctx)
 {
-       struct wim_lookup_table_entry *lte;
-       size_t num_streams = 0;
-       u64 total_bytes = 0;
-       u64 total_compression_bytes = 0;
-       union wimlib_progress_info progress;
-       int ret;
+       int out_ctype;
+       u32 out_chunk_size;
        int write_resource_flags;
 
-       if (list_empty(stream_list))
-               return 0;
-
        write_resource_flags = write_flags_to_resource_flags(write_flags);
 
-       /* Calculate the total size of the streams to be written.  Note: this
-        * will be the uncompressed size, as we may not know the compressed size
-        * yet, and also this will assume that every unhashed stream will be
-        * written (which will not necessarily be the case). */
-       list_for_each_entry(lte, stream_list, write_streams_list) {
-               num_streams++;
-               total_bytes += wim_resource_size(lte);
-               if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
-                      && (wim_resource_compression_type(lte) != out_ctype ||
-                          (write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS)))
-               {
-                       total_compression_bytes += wim_resource_size(lte);
-               }
+       /* wimlib v1.7.0: create a solid WIM file by default if the WIM version
+        * has been set to WIM_VERSION_SOLID and at least one blob in the WIM's
+        * blob table is located in a solid resource (may be the same WIM, or a
+        * different one in the case of export).  */
+       if (wim->hdr.wim_version == WIM_VERSION_SOLID &&
+           wim_has_solid_resources(wim))
+       {
+               write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
        }
-       progress.write_streams.total_bytes       = total_bytes;
-       progress.write_streams.total_streams     = num_streams;
-       progress.write_streams.completed_bytes   = 0;
-       progress.write_streams.completed_streams = 0;
-       progress.write_streams.num_threads       = num_threads;
-       progress.write_streams.compression_type  = out_ctype;
-       progress.write_streams._private          = 0;
-
-#ifdef ENABLE_MULTITHREADED_COMPRESSION
-       if (total_compression_bytes >= 2000000 && num_threads != 1)
-               ret = write_stream_list_parallel(stream_list,
-                                                lookup_table,
-                                                out_fd,
-                                                out_ctype,
-                                                write_resource_flags,
-                                                progress_func,
-                                                &progress,
-                                                num_threads);
-       else
-#endif
-               ret = write_stream_list_serial(stream_list,
-                                              lookup_table,
-                                              out_fd,
-                                              out_ctype,
-                                              write_resource_flags,
-                                              progress_func,
-                                              &progress);
-       return ret;
+
+       if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+               out_chunk_size = wim->out_solid_chunk_size;
+               out_ctype = wim->out_solid_compression_type;
+       } else {
+               out_chunk_size = wim->out_chunk_size;
+               out_ctype = wim->out_compression_type;
+       }
+
+       return write_blob_list(blob_list,
+                              &wim->out_fd,
+                              write_resource_flags,
+                              out_ctype,
+                              out_chunk_size,
+                              num_threads,
+                              wim->blob_table,
+                              filter_ctx,
+                              wim->progfunc,
+                              wim->progctx);
+}
+
+/* Write the contents of the specified blob as a WIM resource.  */
+static int
+write_wim_resource(struct blob_descriptor *blob,
+                  struct filedes *out_fd,
+                  int out_ctype,
+                  u32 out_chunk_size,
+                  int write_resource_flags)
+{
+       LIST_HEAD(blob_list);
+       list_add(&blob->write_blobs_list, &blob_list);
+       blob->will_be_in_output_wim = 1;
+       return write_blob_list(&blob_list,
+                              out_fd,
+                              write_resource_flags & ~WRITE_RESOURCE_FLAG_SOLID,
+                              out_ctype,
+                              out_chunk_size,
+                              1,
+                              NULL,
+                              NULL,
+                              NULL,
+                              NULL);
+}
+
+/* Write the contents of the specified buffer as a WIM resource.  */
+int
+write_wim_resource_from_buffer(const void *buf,
+                              size_t buf_size,
+                              bool is_metadata,
+                              struct filedes *out_fd,
+                              int out_ctype,
+                              u32 out_chunk_size,
+                              struct wim_reshdr *out_reshdr,
+                              u8 *hash_ret,
+                              int write_resource_flags)
+{
+       int ret;
+       struct blob_descriptor blob;
+
+       blob_set_is_located_in_attached_buffer(&blob, (void *)buf, buf_size);
+       sha1_buffer(buf, buf_size, blob.hash);
+       blob.unhashed = 0;
+       blob.is_metadata = is_metadata;
+
+       ret = write_wim_resource(&blob, out_fd, out_ctype, out_chunk_size,
+                                write_resource_flags);
+       if (ret)
+               return ret;
+
+       copy_reshdr(out_reshdr, &blob.out_reshdr);
+
+       if (hash_ret)
+               copy_hash(hash_ret, blob.hash);
+       return 0;
 }
 
-struct stream_size_table {
+struct blob_size_table {
        struct hlist_head *array;
        size_t num_entries;
        size_t capacity;
 };
 
 static int
-init_stream_size_table(struct stream_size_table *tab, size_t capacity)
+init_blob_size_table(struct blob_size_table *tab, size_t capacity)
 {
        tab->array = CALLOC(capacity, sizeof(tab->array[0]));
-       if (!tab->array)
+       if (tab->array == NULL)
                return WIMLIB_ERR_NOMEM;
        tab->num_entries = 0;
        tab->capacity = capacity;
@@ -1478,521 +1823,1265 @@ init_stream_size_table(struct stream_size_table *tab, size_t capacity)
 }
 
 static void
-destroy_stream_size_table(struct stream_size_table *tab)
+destroy_blob_size_table(struct blob_size_table *tab)
 {
        FREE(tab->array);
 }
 
 static int
-stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
+blob_size_table_insert(struct blob_descriptor *blob, void *_tab)
 {
-       struct stream_size_table *tab = _tab;
+       struct blob_size_table *tab = _tab;
        size_t pos;
-       struct wim_lookup_table_entry *same_size_lte;
+       struct blob_descriptor *same_size_blob;
        struct hlist_node *tmp;
 
-       pos = hash_u64(wim_resource_size(lte)) % tab->capacity;
-       lte->unique_size = 1;
-       hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
-               if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) {
-                       lte->unique_size = 0;
-                       same_size_lte->unique_size = 0;
+       pos = hash_u64(blob->size) % tab->capacity;
+       blob->unique_size = 1;
+       hlist_for_each_entry(same_size_blob, tmp, &tab->array[pos], hash_list_2) {
+               if (same_size_blob->size == blob->size) {
+                       blob->unique_size = 0;
+                       same_size_blob->unique_size = 0;
                        break;
                }
        }
 
-       hlist_add_head(&lte->hash_list_2, &tab->array[pos]);
+       hlist_add_head(&blob->hash_list_2, &tab->array[pos]);
        tab->num_entries++;
        return 0;
 }
 
-
-struct lte_overwrite_prepare_args {
+struct find_blobs_ctx {
        WIMStruct *wim;
-       off_t end_offset;
-       struct list_head stream_list;
-       struct stream_size_table stream_size_tab;
+       int write_flags;
+       struct list_head blob_list;
+       struct blob_size_table blob_size_tab;
 };
 
-/* First phase of preparing streams for an in-place overwrite.  This is called
- * on all streams, both hashed and unhashed, except the metadata resources. */
+static void
+reference_blob_for_write(struct blob_descriptor *blob,
+                        struct list_head *blob_list, u32 nref)
+{
+       if (!blob->will_be_in_output_wim) {
+               blob->out_refcnt = 0;
+               list_add_tail(&blob->write_blobs_list, blob_list);
+               blob->will_be_in_output_wim = 1;
+       }
+       blob->out_refcnt += nref;
+}
+
 static int
-lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args)
+fully_reference_blob_for_write(struct blob_descriptor *blob, void *_blob_list)
 {
-       struct lte_overwrite_prepare_args *args = _args;
+       struct list_head *blob_list = _blob_list;
+       blob->will_be_in_output_wim = 0;
+       reference_blob_for_write(blob, blob_list, blob->refcnt);
+       return 0;
+}
 
-       wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA));
-       if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim)
-               list_add_tail(&lte->write_streams_list, &args->stream_list);
-       lte->out_refcnt = lte->refcnt;
-       stream_size_table_insert(lte, &args->stream_size_tab);
+static int
+inode_find_blobs_to_reference(const struct wim_inode *inode,
+                             const struct blob_table *table,
+                             struct list_head *blob_list)
+{
+       wimlib_assert(inode->i_nlink > 0);
+
+       for (unsigned i = 0; i < inode->i_num_streams; i++) {
+               struct blob_descriptor *blob;
+
+               blob = stream_blob(&inode->i_streams[i], table);
+               if (blob)
+                       reference_blob_for_write(blob, blob_list, inode->i_nlink);
+               else if (!is_zero_hash(stream_hash(&inode->i_streams[i])))
+                       return WIMLIB_ERR_RESOURCE_NOT_FOUND;
+       }
        return 0;
 }
 
-/* Second phase of preparing streams for an in-place overwrite.  This is called
- * on existing metadata resources and hashed streams, but not unhashed streams.
- *
- * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so
- * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as
- * the latter uses lte->hash_list_2, while the former expects to set
- * lte->output_resource_entry. */
 static int
-lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args)
+do_blob_set_not_in_output_wim(struct blob_descriptor *blob, void *_ignore)
 {
-       struct lte_overwrite_prepare_args *args = _args;
+       blob->will_be_in_output_wim = 0;
+       return 0;
+}
 
-       if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) {
-               /* We can't do an in place overwrite on the WIM if there are
-                * streams after the XML data. */
-               if (lte->resource_entry.offset +
-                   lte->resource_entry.size > args->end_offset)
-               {
-                       if (wimlib_print_errors) {
-                               ERROR("The following resource is after the XML data:");
-                               print_lookup_table_entry(lte, stderr);
-                       }
-                       return WIMLIB_ERR_RESOURCE_ORDER;
-               }
-               copy_resource_entry(&lte->output_resource_entry,
-                                   &lte->resource_entry);
+static int
+image_find_blobs_to_reference(WIMStruct *wim)
+{
+       struct wim_image_metadata *imd;
+       struct wim_inode *inode;
+       struct blob_descriptor *blob;
+       struct list_head *blob_list;
+       int ret;
+
+       imd = wim_get_current_image_metadata(wim);
+
+       image_for_each_unhashed_blob(blob, imd)
+               blob->will_be_in_output_wim = 0;
+
+       blob_list = wim->private;
+       image_for_each_inode(inode, imd) {
+               ret = inode_find_blobs_to_reference(inode,
+                                                   wim->blob_table,
+                                                   blob_list);
+               if (ret)
+                       return ret;
        }
        return 0;
 }
 
-/* Given a WIM that we are going to overwrite in place with zero or more
- * additional streams added, construct a list the list of new unique streams
- * ('struct wim_lookup_table_entry's) that must be written, plus any unhashed
- * streams that need to be added but may be identical to other hashed or
- * unhashed streams.  These unhashed streams are checksummed while the streams
- * are being written.  To aid this process, the member @unique_size is set to 1
- * on streams that have a unique size and therefore must be written.
- *
- * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
- * indicate the number of times the stream is referenced in only the streams
- * that are being written; this may still be adjusted later when unhashed
- * streams are being resolved.
- */
 static int
-prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset,
-                             struct list_head *stream_list)
+prepare_unfiltered_list_of_blobs_in_output_wim(WIMStruct *wim,
+                                              int image,
+                                              int blobs_ok,
+                                              struct list_head *blob_list_ret)
 {
        int ret;
-       struct lte_overwrite_prepare_args args;
-       unsigned i;
 
-       args.wim = wim;
-       args.end_offset = end_offset;
-       ret = init_stream_size_table(&args.stream_size_tab,
-                                    wim->lookup_table->capacity);
-       if (ret)
-               return ret;
+       INIT_LIST_HEAD(blob_list_ret);
 
-       INIT_LIST_HEAD(&args.stream_list);
-       for (i = 0; i < wim->hdr.image_count; i++) {
+       if (blobs_ok && (image == WIMLIB_ALL_IMAGES ||
+                        (image == 1 && wim->hdr.image_count == 1)))
+       {
+               /* Fast case:  Assume that all blobs are being written and that
+                * the reference counts are correct.  */
+               struct blob_descriptor *blob;
                struct wim_image_metadata *imd;
-               struct wim_lookup_table_entry *lte;
+               unsigned i;
 
-               imd = wim->image_metadata[i];
-               image_for_each_unhashed_stream(lte, imd)
-                       lte_overwrite_prepare(lte, &args);
-       }
-       for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args);
-       list_transfer(&args.stream_list, stream_list);
+               for_blob_in_table(wim->blob_table,
+                                 fully_reference_blob_for_write,
+                                 blob_list_ret);
 
-       for (i = 0; i < wim->hdr.image_count; i++) {
-               ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte,
-                                             &args);
+               for (i = 0; i < wim->hdr.image_count; i++) {
+                       imd = wim->image_metadata[i];
+                       image_for_each_unhashed_blob(blob, imd)
+                               fully_reference_blob_for_write(blob, blob_list_ret);
+               }
+       } else {
+               /* Slow case:  Walk through the images being written and
+                * determine the blobs referenced.  */
+               for_blob_in_table(wim->blob_table,
+                                 do_blob_set_not_in_output_wim, NULL);
+               wim->private = blob_list_ret;
+               ret = for_image(wim, image, image_find_blobs_to_reference);
                if (ret)
-                       goto out_destroy_stream_size_table;
+                       return ret;
        }
-       ret = for_lookup_table_entry(wim->lookup_table,
-                                    lte_overwrite_prepare_2, &args);
-out_destroy_stream_size_table:
-       destroy_stream_size_table(&args.stream_size_tab);
-       return ret;
-}
 
+       return 0;
+}
 
-struct find_streams_ctx {
-       struct list_head stream_list;
-       struct stream_size_table stream_size_tab;
+struct insert_other_if_hard_filtered_ctx {
+       struct blob_size_table *tab;
+       struct filter_context *filter_ctx;
 };
 
+static int
+insert_other_if_hard_filtered(struct blob_descriptor *blob, void *_ctx)
+{
+       struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
+
+       if (!blob->will_be_in_output_wim &&
+           blob_hard_filtered(blob, ctx->filter_ctx))
+               blob_size_table_insert(blob, ctx->tab);
+       return 0;
+}
+
+static int
+determine_blob_size_uniquity(struct list_head *blob_list,
+                            struct blob_table *lt,
+                            struct filter_context *filter_ctx)
+{
+       int ret;
+       struct blob_size_table tab;
+       struct blob_descriptor *blob;
+
+       ret = init_blob_size_table(&tab, 9001);
+       if (ret)
+               return ret;
+
+       if (may_hard_filter_blobs(filter_ctx)) {
+               struct insert_other_if_hard_filtered_ctx ctx = {
+                       .tab = &tab,
+                       .filter_ctx = filter_ctx,
+               };
+               for_blob_in_table(lt, insert_other_if_hard_filtered, &ctx);
+       }
+
+       list_for_each_entry(blob, blob_list, write_blobs_list)
+               blob_size_table_insert(blob, &tab);
+
+       destroy_blob_size_table(&tab);
+       return 0;
+}
+
 static void
-inode_find_streams_to_write(struct wim_inode *inode,
-                           struct wim_lookup_table *table,
-                           struct list_head *stream_list,
-                           struct stream_size_table *tab)
-{
-       struct wim_lookup_table_entry *lte;
-       for (unsigned i = 0; i <= inode->i_num_ads; i++) {
-               lte = inode_stream_lte(inode, i, table);
-               if (lte) {
-                       if (lte->out_refcnt == 0) {
-                               if (lte->unhashed)
-                                       stream_size_table_insert(lte, tab);
-                               list_add_tail(&lte->write_streams_list, stream_list);
+filter_blob_list_for_write(struct list_head *blob_list,
+                          struct filter_context *filter_ctx)
+{
+       struct blob_descriptor *blob, *tmp;
+
+       list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
+               int status = blob_filtered(blob, filter_ctx);
+
+               if (status == 0) {
+                       /* Not filtered.  */
+                       continue;
+               } else {
+                       if (status > 0) {
+                               /* Soft filtered.  */
+                       } else {
+                               /* Hard filtered.  */
+                               blob->will_be_in_output_wim = 0;
+                               list_del(&blob->blob_table_list);
                        }
-                       lte->out_refcnt += inode->i_nlink;
+                       list_del(&blob->write_blobs_list);
                }
        }
 }
 
+/*
+ * prepare_blob_list_for_write() -
+ *
+ * Prepare the list of blobs to write for writing a WIM containing the specified
+ * image(s) with the specified write flags.
+ *
+ * @wim
+ *     The WIMStruct on whose behalf the write is occurring.
+ *
+ * @image
+ *     Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES.
+ *
+ * @write_flags
+ *     WIMLIB_WRITE_FLAG_* flags for the write operation:
+ *
+ *     STREAMS_OK:  For writes of all images, assume that all blobs in the blob
+ *     table of @wim and the per-image lists of unhashed blobs should be taken
+ *     as-is, and image metadata should not be searched for references.  This
+ *     does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below.
+ *
+ *     OVERWRITE:  Blobs already present in @wim shall not be returned in
+ *     @blob_list_ret.
+ *
+ *     SKIP_EXTERNAL_WIMS:  Blobs already present in a WIM file, but not @wim,
+ *     shall be returned in neither @blob_list_ret nor @blob_table_list_ret.
+ *
+ * @blob_list_ret
+ *     List of blobs, linked by write_blobs_list, that need to be written will
+ *     be returned here.
+ *
+ *     Note that this function assumes that unhashed blobs will be written; it
+ *     does not take into account that they may become duplicates when actually
+ *     hashed.
+ *
+ * @blob_table_list_ret
+ *     List of blobs, linked by blob_table_list, that need to be included in
+ *     the WIM's blob table will be returned here.  This will be a superset of
+ *     the blobs in @blob_list_ret.
+ *
+ *     This list will be a proper superset of @blob_list_ret if and only if
+ *     WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
+ *     the blobs that would otherwise need to be written were already located
+ *     in the WIM file.
+ *
+ *     All blobs in this list will have @out_refcnt set to the number of
+ *     references to the blob in the output WIM.  If
+ *     WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt
+ *     may be as low as 0.
+ *
+ * @filter_ctx_ret
+ *     A context for queries of blob filter status with blob_filtered() is
+ *     returned in this location.
+ *
+ * In addition, @will_be_in_output_wim will be set to 1 in all blobs inserted
+ * into @blob_table_list_ret and to 0 in all blobs in the blob table of @wim not
+ * inserted into @blob_table_list_ret.
+ *
+ * Still furthermore, @unique_size will be set to 1 on all blobs in
+ * @blob_list_ret that have unique size among all blobs in @blob_list_ret and
+ * among all blobs in the blob table of @wim that are ineligible for being
+ * written due to filtering.
+ *
+ * Returns 0 on success; nonzero on read error, memory allocation error, or
+ * otherwise.
+ */
 static int
-image_find_streams_to_write(WIMStruct *w)
+prepare_blob_list_for_write(WIMStruct *wim, int image,
+                           int write_flags,
+                           struct list_head *blob_list_ret,
+                           struct list_head *blob_table_list_ret,
+                           struct filter_context *filter_ctx_ret)
 {
-       struct find_streams_ctx *ctx;
-       struct wim_image_metadata *imd;
-       struct wim_inode *inode;
-       struct wim_lookup_table_entry *lte;
+       int ret;
+       struct blob_descriptor *blob;
 
-       ctx = w->private;
-       imd = wim_get_current_image_metadata(w);
+       filter_ctx_ret->write_flags = write_flags;
+       filter_ctx_ret->wim = wim;
 
-       image_for_each_unhashed_stream(lte, imd)
-               lte->out_refcnt = 0;
+       ret = prepare_unfiltered_list_of_blobs_in_output_wim(
+                               wim,
+                               image,
+                               write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK,
+                               blob_list_ret);
+       if (ret)
+               return ret;
+
+       INIT_LIST_HEAD(blob_table_list_ret);
+       list_for_each_entry(blob, blob_list_ret, write_blobs_list)
+               list_add_tail(&blob->blob_table_list, blob_table_list_ret);
+
+       ret = determine_blob_size_uniquity(blob_list_ret, wim->blob_table,
+                                          filter_ctx_ret);
+       if (ret)
+               return ret;
+
+       if (may_filter_blobs(filter_ctx_ret))
+               filter_blob_list_for_write(blob_list_ret, filter_ctx_ret);
 
-       /* Go through this image's inodes to find any streams that have not been
-        * found yet. */
-       image_for_each_inode(inode, imd) {
-               inode_find_streams_to_write(inode, w->lookup_table,
-                                           &ctx->stream_list,
-                                           &ctx->stream_size_tab);
-       }
        return 0;
 }
 
-/* Given a WIM that from which one or all of the images is being written, build
- * the list of unique streams ('struct wim_lookup_table_entry's) that must be
- * written, plus any unhashed streams that need to be written but may be
- * identical to other hashed or unhashed streams being written.  These unhashed
- * streams are checksummed while the streams are being written.  To aid this
- * process, the member @unique_size is set to 1 on streams that have a unique
- * size and therefore must be written.
- *
- * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
- * indicate the number of times the stream is referenced in only the streams
- * that are being written; this may still be adjusted later when unhashed
- * streams are being resolved.
- */
 static int
-prepare_stream_list(WIMStruct *wim, int image, struct list_head *stream_list)
+write_file_blobs(WIMStruct *wim, int image, int write_flags,
+                unsigned num_threads,
+                struct list_head *blob_list_override,
+                struct list_head *blob_table_list_ret)
+{
+       int ret;
+       struct list_head _blob_list;
+       struct list_head *blob_list;
+       struct blob_descriptor *blob;
+       struct filter_context _filter_ctx;
+       struct filter_context *filter_ctx;
+
+       if (blob_list_override == NULL) {
+               /* Normal case: prepare blob list from image(s) being written.
+                */
+               blob_list = &_blob_list;
+               filter_ctx = &_filter_ctx;
+               ret = prepare_blob_list_for_write(wim, image, write_flags,
+                                                 blob_list,
+                                                 blob_table_list_ret,
+                                                 filter_ctx);
+               if (ret)
+                       return ret;
+       } else {
+               /* Currently only as a result of wimlib_split() being called:
+                * use blob list already explicitly provided.  Use existing
+                * reference counts.  */
+               blob_list = blob_list_override;
+               filter_ctx = NULL;
+               INIT_LIST_HEAD(blob_table_list_ret);
+               list_for_each_entry(blob, blob_list, write_blobs_list) {
+                       blob->out_refcnt = blob->refcnt;
+                       blob->will_be_in_output_wim = 1;
+                       blob->unique_size = 0;
+                       list_add_tail(&blob->blob_table_list, blob_table_list_ret);
+               }
+       }
+
+       return wim_write_blob_list(wim,
+                                  blob_list,
+                                  write_flags,
+                                  num_threads,
+                                  filter_ctx);
+}
+
+static int
+write_metadata_resources(WIMStruct *wim, int image, int write_flags)
 {
        int ret;
-       struct find_streams_ctx ctx;
+       int start_image;
+       int end_image;
+       int write_resource_flags;
+
+       if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA) {
+               DEBUG("Not writing any metadata resources.");
+               return 0;
+       }
 
-       for_lookup_table_entry(wim->lookup_table, lte_zero_out_refcnt, NULL);
-       ret = init_stream_size_table(&ctx.stream_size_tab,
-                                    wim->lookup_table->capacity);
+       write_resource_flags = write_flags_to_resource_flags(write_flags);
+
+       write_resource_flags &= ~WRITE_RESOURCE_FLAG_SOLID;
+
+       DEBUG("Writing metadata resources (offset=%"PRIu64")",
+             wim->out_fd.offset);
+
+       ret = call_progress(wim->progfunc,
+                           WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN,
+                           NULL, wim->progctx);
        if (ret)
                return ret;
-       for_lookup_table_entry(wim->lookup_table, stream_size_table_insert,
-                              &ctx.stream_size_tab);
-       INIT_LIST_HEAD(&ctx.stream_list);
-       wim->private = &ctx;
-       ret = for_image(wim, image, image_find_streams_to_write);
-       destroy_stream_size_table(&ctx.stream_size_tab);
-       if (ret == 0)
-               list_transfer(&ctx.stream_list, stream_list);
+
+       if (image == WIMLIB_ALL_IMAGES) {
+               start_image = 1;
+               end_image = wim->hdr.image_count;
+       } else {
+               start_image = image;
+               end_image = image;
+       }
+
+       for (int i = start_image; i <= end_image; i++) {
+               struct wim_image_metadata *imd;
+
+               imd = wim->image_metadata[i - 1];
+               /* Build a new metadata resource only if image was modified from
+                * the original (or was newly added).  Otherwise just copy the
+                * existing one.  */
+               if (imd->modified) {
+                       DEBUG("Image %u was modified; building and writing new "
+                             "metadata resource", i);
+                       ret = write_metadata_resource(wim, i,
+                                                     write_resource_flags);
+               } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
+                       DEBUG("Image %u was not modified; re-using existing "
+                             "metadata resource.", i);
+                       blob_set_out_reshdr_for_reuse(imd->metadata_blob);
+                       ret = 0;
+               } else {
+                       DEBUG("Image %u was not modified; copying existing "
+                             "metadata resource.", i);
+                       ret = write_wim_resource(imd->metadata_blob,
+                                                &wim->out_fd,
+                                                wim->out_compression_type,
+                                                wim->out_chunk_size,
+                                                write_resource_flags);
+               }
+               if (ret)
+                       return ret;
+       }
+
+       return call_progress(wim->progfunc,
+                            WIMLIB_PROGRESS_MSG_WRITE_METADATA_END,
+                            NULL, wim->progctx);
+}
+
+static int
+open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
+{
+       int raw_fd;
+       DEBUG("Opening \"%"TS"\" for writing.", path);
+
+       raw_fd = topen(path, open_flags | O_BINARY, 0644);
+       if (raw_fd < 0) {
+               ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
+               return WIMLIB_ERR_OPEN;
+       }
+       filedes_init(&wim->out_fd, raw_fd);
+       return 0;
+}
+
+static int
+close_wim_writable(WIMStruct *wim, int write_flags)
+{
+       int ret = 0;
+
+       if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)) {
+               DEBUG("Closing WIM file.");
+               if (filedes_valid(&wim->out_fd))
+                       if (filedes_close(&wim->out_fd))
+                               ret = WIMLIB_ERR_WRITE;
+       }
+       filedes_invalidate(&wim->out_fd);
        return ret;
 }
 
-/* Writes the streams for the specified @image in @wim to @wim->out_fd.
- */
 static int
-write_wim_streams(WIMStruct *wim, int image, int write_flags,
-                 unsigned num_threads,
-                 wimlib_progress_func_t progress_func)
+cmp_blobs_by_out_rdesc(const void *p1, const void *p2)
+{
+       const struct blob_descriptor *blob1, *blob2;
+
+       blob1 = *(const struct blob_descriptor**)p1;
+       blob2 = *(const struct blob_descriptor**)p2;
+
+       if (blob1->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
+               if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
+                       if (blob1->out_res_offset_in_wim != blob2->out_res_offset_in_wim)
+                               return cmp_u64(blob1->out_res_offset_in_wim,
+                                              blob2->out_res_offset_in_wim);
+               } else {
+                       return 1;
+               }
+       } else {
+               if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID)
+                       return -1;
+       }
+       return cmp_u64(blob1->out_reshdr.offset_in_wim,
+                      blob2->out_reshdr.offset_in_wim);
+}
+
+static int
+write_blob_table(WIMStruct *wim, int image, int write_flags,
+                struct wim_reshdr *out_reshdr,
+                struct list_head *blob_table_list)
 {
        int ret;
-       struct list_head stream_list;
 
-       ret = prepare_stream_list(wim, image, &stream_list);
+       /* Set output resource metadata for blobs already present in WIM.  */
+       if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
+               struct blob_descriptor *blob;
+               list_for_each_entry(blob, blob_table_list, blob_table_list) {
+                       if (blob->blob_location == BLOB_IN_WIM &&
+                           blob->rdesc->wim == wim)
+                       {
+                               blob_set_out_reshdr_for_reuse(blob);
+                       }
+               }
+       }
+
+       ret = sort_blob_list(blob_table_list,
+                            offsetof(struct blob_descriptor, blob_table_list),
+                            cmp_blobs_by_out_rdesc);
        if (ret)
                return ret;
-       return write_stream_list(&stream_list,
-                                wim->lookup_table,
-                                wim->out_fd,
-                                wim->compression_type,
-                                write_flags,
-                                num_threads,
-                                progress_func);
+
+       /* Add entries for metadata resources.  */
+       if (!(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)) {
+               int start_image;
+               int end_image;
+
+               if (image == WIMLIB_ALL_IMAGES) {
+                       start_image = 1;
+                       end_image = wim->hdr.image_count;
+               } else {
+                       start_image = image;
+                       end_image = image;
+               }
+
+               /* Push metadata blob table entries onto the front of the list
+                * in reverse order, so that they're written in order.
+                */
+               for (int i = end_image; i >= start_image; i--) {
+                       struct blob_descriptor *metadata_blob;
+
+                       metadata_blob = wim->image_metadata[i - 1]->metadata_blob;
+                       wimlib_assert(metadata_blob->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA);
+                       metadata_blob->out_refcnt = 1;
+                       list_add(&metadata_blob->blob_table_list, blob_table_list);
+               }
+       }
+
+       return write_blob_table_from_blob_list(blob_table_list,
+                                              &wim->out_fd,
+                                              wim->hdr.part_number,
+                                              out_reshdr,
+                                              write_flags_to_resource_flags(write_flags));
 }
 
 /*
- * Finish writing a WIM file: write the lookup table, xml data, and integrity
- * table (optional), then overwrite the WIM header.
+ * finish_write():
  *
- * write_flags is a bitwise OR of the following:
+ * Finish writing a WIM file: write the blob table, xml data, and integrity
+ * table, then overwrite the WIM header.  By default, closes the WIM file
+ * descriptor (@wim->out_fd) if successful.
  *
- *     (public)  WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
- *             Include an integrity table.
+ * write_flags is a bitwise OR of the following:
  *
- *     (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE:
- *             Don't write the lookup table.
+ *     (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
+ *             Include an integrity table.
  *
- *     (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE:
- *             When (if) writing the integrity table, re-use entries from the
- *             existing integrity table, if possible.
+ *     (public) WIMLIB_WRITE_FLAG_FSYNC:
+ *             fsync() the output file before closing it.
  *
- *     (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
- *             After writing the XML data but before writing the integrity
- *             table, write a temporary WIM header and flush the stream so that
- *             the WIM is less likely to become corrupted upon abrupt program
- *             termination.
+ *     (public) WIMLIB_WRITE_FLAG_PIPABLE:
+ *             Writing a pipable WIM, possibly to a pipe; include pipable WIM
+ *             blob headers before the blob table and XML data, and also write
+ *             the WIM header at the end instead of seeking to the beginning.
+ *             Can't be combined with WIMLIB_WRITE_FLAG_CHECK_INTEGRITY.
  *
- *     (private) WIMLIB_WRITE_FLAG_FSYNC:
- *             fsync() the output file before closing it.
+ *     (private) WIMLIB_WRITE_FLAG_NO_BLOB_TABLE:
+ *             Don't write the blob table.
  *
+ *     (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
+ *             After writing the XML data but before writing the integrity
+ *             table, write a temporary WIM header and flush the file
+ *             descriptor so that the WIM is less likely to become corrupted
+ *             upon abrupt program termination.
+ *     (private) WIMLIB_WRITE_FLAG_HEADER_AT_END:
+ *             Instead of overwriting the WIM header at the beginning of the
+ *             file, simply append it to the end of the file.  (Used when
+ *             writing to pipe.)
+ *     (private) WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR:
+ *             Do not close the file descriptor @wim->out_fd on either success
+ *             on failure.
+ *     (private) WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES:
+ *             Use the existing <TOTALBYTES> stored in the in-memory XML
+ *             information, rather than setting it to the offset of the XML
+ *             data being written.
+ *     (private) WIMLIB_WRITE_FLAG_OVERWRITE
+ *             The existing WIM file is being updated in-place.  The entries
+ *             from its integrity table may be re-used.
  */
-int
-finish_write(WIMStruct *w, int image, int write_flags,
-            wimlib_progress_func_t progress_func)
+static int
+finish_write(WIMStruct *wim, int image, int write_flags,
+            struct list_head *blob_table_list)
 {
        int ret;
-       struct wim_header hdr;
+       off_t hdr_offset;
+       int write_resource_flags;
+       off_t old_blob_table_end = 0;
+       off_t new_blob_table_end;
+       u64 xml_totalbytes;
+       struct integrity_table *old_integrity_table = NULL;
 
-       /* @hdr will be the header for the new WIM.  First copy all the data
-        * from the header in the WIMStruct; then set all the fields that may
-        * have changed, including the resource entries, boot index, and image
-        * count.  */
-       memcpy(&hdr, &w->hdr, sizeof(struct wim_header));
+       DEBUG("image=%d, write_flags=%08x", image, write_flags);
 
-       /* Set image count and boot index correctly for single image writes */
-       if (image != WIMLIB_ALL_IMAGES) {
-               hdr.image_count = 1;
-               if (hdr.boot_idx == image)
-                       hdr.boot_idx = 1;
-               else
-                       hdr.boot_idx = 0;
-       }
+       write_resource_flags = write_flags_to_resource_flags(write_flags);
 
        /* In the WIM header, there is room for the resource entry for a
         * metadata resource labeled as the "boot metadata".  This entry should
         * be zeroed out if there is no bootable image (boot_idx 0).  Otherwise,
         * it should be a copy of the resource entry for the image that is
         * marked as bootable.  This is not well documented...  */
-       if (hdr.boot_idx == 0) {
-               zero_resource_entry(&hdr.boot_metadata_res_entry);
+       if (wim->hdr.boot_idx == 0) {
+               zero_reshdr(&wim->hdr.boot_metadata_reshdr);
        } else {
-               copy_resource_entry(&hdr.boot_metadata_res_entry,
-                           &w->image_metadata[ hdr.boot_idx- 1
-                                       ]->metadata_lte->output_resource_entry);
+               copy_reshdr(&wim->hdr.boot_metadata_reshdr,
+                           &wim->image_metadata[
+                               wim->hdr.boot_idx - 1]->metadata_blob->out_reshdr);
+       }
+
+       /* If overwriting the WIM file containing an integrity table in-place,
+        * we'd like to re-use the information in the old integrity table
+        * instead of recalculating it.  But we might overwrite the old
+        * integrity table when we expand the XML data.  Read it into memory
+        * just in case.  */
+       if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
+                           WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) ==
+               (WIMLIB_WRITE_FLAG_OVERWRITE |
+                WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
+           && wim_has_integrity_table(wim))
+       {
+               old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
+                                      wim->hdr.blob_table_reshdr.size_in_wim;
+               (void)read_integrity_table(wim,
+                                          old_blob_table_end - WIM_HEADER_DISK_SIZE,
+                                          &old_integrity_table);
+               /* If we couldn't read the old integrity table, we can still
+                * re-calculate the full integrity table ourselves.  Hence the
+                * ignoring of the return value.  */
+       }
+
+       /* Write blob table.  */
+       if (!(write_flags & WIMLIB_WRITE_FLAG_NO_BLOB_TABLE)) {
+               ret = write_blob_table(wim, image, write_flags,
+                                      &wim->hdr.blob_table_reshdr,
+                                      blob_table_list);
+               if (ret) {
+                       free_integrity_table(old_integrity_table);
+                       return ret;
+               }
        }
 
-       if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
-               ret = write_lookup_table(w, image, &hdr.lookup_table_res_entry);
-               if (ret)
-                       goto out_close_wim;
+       /* Write XML data.  */
+       xml_totalbytes = wim->out_fd.offset;
+       if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
+               xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING;
+       ret = write_wim_xml_data(wim, image, xml_totalbytes,
+                                &wim->hdr.xml_data_reshdr,
+                                write_resource_flags);
+       if (ret) {
+               free_integrity_table(old_integrity_table);
+               return ret;
        }
 
-       ret = write_xml_data(w->wim_info, image, w->out_fd,
-                            (write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE) ?
-                             wim_info_get_total_bytes(w->wim_info) : 0,
-                            &hdr.xml_res_entry);
-       if (ret)
-               goto out_close_wim;
-
+       /* Write integrity table (optional).  */
        if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
                if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
                        struct wim_header checkpoint_hdr;
-                       memcpy(&checkpoint_hdr, &hdr, sizeof(struct wim_header));
-                       zero_resource_entry(&checkpoint_hdr.integrity);
+                       memcpy(&checkpoint_hdr, &wim->hdr, sizeof(struct wim_header));
+                       zero_reshdr(&checkpoint_hdr.integrity_table_reshdr);
                        checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
-                       ret = write_header(&checkpoint_hdr, w->out_fd);
-                       if (ret)
-                               goto out_close_wim;
+                       ret = write_wim_header_at_offset(&checkpoint_hdr,
+                                                        &wim->out_fd, 0);
+                       if (ret) {
+                               free_integrity_table(old_integrity_table);
+                               return ret;
+                       }
                }
 
-               off_t old_lookup_table_end;
-               off_t new_lookup_table_end;
-               if (write_flags & WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE) {
-                       old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
-                                              w->hdr.lookup_table_res_entry.size;
-               } else {
-                       old_lookup_table_end = 0;
-               }
-               new_lookup_table_end = hdr.lookup_table_res_entry.offset +
-                                      hdr.lookup_table_res_entry.size;
-
-               ret = write_integrity_table(w->out_fd,
-                                           &hdr.integrity,
-                                           new_lookup_table_end,
-                                           old_lookup_table_end,
-                                           progress_func);
+               new_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
+                                    wim->hdr.blob_table_reshdr.size_in_wim;
+
+               ret = write_integrity_table(wim,
+                                           new_blob_table_end,
+                                           old_blob_table_end,
+                                           old_integrity_table);
+               free_integrity_table(old_integrity_table);
                if (ret)
-                       goto out_close_wim;
+                       return ret;
        } else {
-               zero_resource_entry(&hdr.integrity);
-       }
-
-       hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
-       ret = write_header(&hdr, w->out_fd);
+               /* No integrity table.  */
+               zero_reshdr(&wim->hdr.integrity_table_reshdr);
+       }
+
+       /* Now that all information in the WIM header has been determined, the
+        * preliminary header written earlier can be overwritten, the header of
+        * the existing WIM file can be overwritten, or the final header can be
+        * written to the end of the pipable WIM.  */
+       wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
+       hdr_offset = 0;
+       if (write_flags & WIMLIB_WRITE_FLAG_HEADER_AT_END)
+               hdr_offset = wim->out_fd.offset;
+       DEBUG("Writing new header @ %"PRIu64".", hdr_offset);
+       ret = write_wim_header_at_offset(&wim->hdr, &wim->out_fd, hdr_offset);
        if (ret)
-               goto out_close_wim;
+               return ret;
 
+       /* Possibly sync file data to disk before closing.  On POSIX systems, it
+        * is necessary to do this before using rename() to overwrite an
+        * existing file with a new file.  Otherwise, data loss would occur if
+        * the system is abruptly terminated when the metadata for the rename
+        * operation has been written to disk, but the new file data has not.
+        */
        if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
-               if (fsync(w->out_fd)) {
+               DEBUG("Syncing WIM file.");
+               if (fsync(wim->out_fd.fd)) {
                        ERROR_WITH_ERRNO("Error syncing data to WIM file");
-                       ret = WIMLIB_ERR_WRITE;
+                       return WIMLIB_ERR_WRITE;
                }
        }
-out_close_wim:
-       if (close(w->out_fd)) {
+
+       if (close_wim_writable(wim, write_flags)) {
                ERROR_WITH_ERRNO("Failed to close the output WIM file");
-               if (ret == 0)
-                       ret = WIMLIB_ERR_WRITE;
+               return WIMLIB_ERR_WRITE;
        }
-       w->out_fd = -1;
-       return ret;
+
+       return 0;
 }
 
 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
-int
-lock_wim(WIMStruct *w, int fd)
-{
-       int ret = 0;
-       if (fd != -1 && !w->wim_locked) {
-               ret = flock(fd, LOCK_EX | LOCK_NB);
-               if (ret != 0) {
-                       if (errno == EWOULDBLOCK) {
-                               ERROR("`%"TS"' is already being modified or has been "
-                                     "mounted read-write\n"
-                                     "        by another process!", w->filename);
-                               ret = WIMLIB_ERR_ALREADY_LOCKED;
-                       } else {
-                               WARNING_WITH_ERRNO("Failed to lock `%"TS"'",
-                                                  w->filename);
-                               ret = 0;
-                       }
-               } else {
-                       w->wim_locked = 1;
-               }
-       }
-       return ret;
-}
-#endif
 
-static int
-open_wim_writable(WIMStruct *w, const tchar *path, int open_flags)
+/* Set advisory lock on WIM file (if not already done so)  */
+int
+lock_wim_for_append(WIMStruct *wim)
 {
-       w->out_fd = topen(path, open_flags | O_BINARY, 0644);
-       if (w->out_fd == -1) {
-               ERROR_WITH_ERRNO("Failed to open `%"TS"' for writing", path);
-               return WIMLIB_ERR_OPEN;
+       if (wim->locked_for_append)
+               return 0;
+       if (!flock(wim->in_fd.fd, LOCK_EX | LOCK_NB)) {
+               wim->locked_for_append = 1;
+               return 0;
        }
-       return 0;
+       if (errno != EWOULDBLOCK)
+               return 0;
+       return WIMLIB_ERR_ALREADY_LOCKED;
 }
 
-
+/* Remove advisory lock on WIM file (if present)  */
 void
-close_wim_writable(WIMStruct *w)
+unlock_wim_for_append(WIMStruct *wim)
 {
-       if (w->out_fd != -1) {
-               if (close(w->out_fd))
-                       WARNING_WITH_ERRNO("Failed to close output WIM");
-               w->out_fd = -1;
+       if (wim->locked_for_append) {
+               flock(wim->in_fd.fd, LOCK_UN);
+               wim->locked_for_append = 0;
        }
 }
+#endif
 
-/* Open file stream and write dummy header for WIM. */
-int
-begin_write(WIMStruct *w, const tchar *path, int write_flags)
+/*
+ * write_pipable_wim():
+ *
+ * Perform the intermediate stages of creating a "pipable" WIM (i.e. a WIM
+ * capable of being applied from a pipe).
+ *
+ * Pipable WIMs are a wimlib-specific modification of the WIM format such that
+ * images can be applied from them sequentially when the file data is sent over
+ * a pipe.  In addition, a pipable WIM can be written sequentially to a pipe.
+ * The modifications made to the WIM format for pipable WIMs are:
+ *
+ * - Magic characters in header are "WLPWM\0\0\0" (wimlib pipable WIM) instead
+ *   of "MSWIM\0\0\0".  This lets wimlib know that the WIM is pipable and also
+ *   stops other software from trying to read the file as a normal WIM.
+ *
+ * - The header at the beginning of the file does not contain all the normal
+ *   information; in particular it will have all 0's for the blob table and XML
+ *   data resource entries.  This is because this information cannot be
+ *   determined until the blob table and XML data have been written.
+ *   Consequently, wimlib will write the full header at the very end of the
+ *   file.  The header at the end, however, is only used when reading the WIM
+ *   from a seekable file (not a pipe).
+ *
+ * - An extra copy of the XML data is placed directly after the header.  This
+ *   allows image names and sizes to be determined at an appropriate time when
+ *   reading the WIM from a pipe.  This copy of the XML data is ignored if the
+ *   WIM is read from a seekable file (not a pipe).
+ *
+ * - The format of resources, or blobs, has been modified to allow them to be
+ *   used before the "blob table" has been read.  Each blob is prefixed with a
+ *   `struct pwm_blob_hdr' that is basically an abbreviated form of `struct
+ *   blob_descriptor_disk' that only contains the SHA-1 message digest,
+ *   uncompressed blob size, and flags that indicate whether the blob is
+ *   compressed.  The data of uncompressed blobs then follows literally, while
+ *   the data of compressed blobs follows in a modified format.  Compressed
+ *   blobs do not begin with a chunk table, since the chunk table cannot be
+ *   written until all chunks have been compressed.  Instead, each compressed
+ *   chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size.
+ *   Furthermore, the chunk table is written at the end of the resource instead
+ *   of the start.  Note: chunk offsets are given in the chunk table as if the
+ *   `struct pwm_chunk_hdr's were not present; also, the chunk table is only
+ *   used if the WIM is being read from a seekable file (not a pipe).
+ *
+ * - Metadata blobs always come before non-metadata blobs.  (This does not by
+ *   itself constitute an incompatibility with normal WIMs, since this is valid
+ *   in normal WIMs.)
+ *
+ * - At least up to the end of the blobs, all components must be packed as
+ *   tightly as possible; there cannot be any "holes" in the WIM.  (This does
+ *   not by itself consititute an incompatibility with normal WIMs, since this
+ *   is valid in normal WIMs.)
+ *
+ * Note: the blob table, XML data, and header at the end are not used when
+ * applying from a pipe.  They exist to support functionality such as image
+ * application and export when the WIM is *not* read from a pipe.
+ *
+ *   Layout of pipable WIM:
+ *
+ * ---------+----------+--------------------+----------------+--------------+-----------+--------+
+ * | Header | XML data | Metadata resources | File resources |  Blob table  | XML data  | Header |
+ * ---------+----------+--------------------+----------------+--------------+-----------+--------+
+ *
+ *   Layout of normal WIM:
+ *
+ * +--------+-----------------------------+-------------------------+
+ * | Header | File and metadata resources |  Blob table  | XML data |
+ * +--------+-----------------------------+-------------------------+
+ *
+ * An optional integrity table can follow the final XML data in both normal and
+ * pipable WIMs.  However, due to implementation details, wimlib currently can
+ * only include an integrity table in a pipable WIM when writing it to a
+ * seekable file (not a pipe).
+ *
+ * Do note that since pipable WIMs are not supported by Microsoft's software,
+ * wimlib does not create them unless explicitly requested (with
+ * WIMLIB_WRITE_FLAG_PIPABLE) and as stated above they use different magic
+ * characters to identify the file.
+ */
+static int
+write_pipable_wim(WIMStruct *wim, int image, int write_flags,
+                 unsigned num_threads,
+                 struct list_head *blob_list_override,
+                 struct list_head *blob_table_list_ret)
 {
        int ret;
-       int open_flags = O_TRUNC | O_CREAT;
-       if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
-               open_flags |= O_RDWR;
-       else
-               open_flags |= O_WRONLY;
-       ret = open_wim_writable(w, path, open_flags);
+       struct wim_reshdr xml_reshdr;
+
+       WARNING("Creating a pipable WIM, which will "
+               "be incompatible\n"
+               "          with Microsoft's software (wimgapi/imagex/Dism).");
+
+       /* At this point, the header at the beginning of the file has already
+        * been written.  */
+
+       /* For efficiency, when wimlib adds an image to the WIM with
+        * wimlib_add_image(), the SHA-1 message digests of files is not
+        * calculated; instead, they are calculated while the files are being
+        * written.  However, this does not work when writing a pipable WIM,
+        * since when writing a blob to a pipable WIM, its SHA-1 message digest
+        * needs to be known before the blob data is written.  Therefore, before
+        * getting much farther, we need to pre-calculate the SHA-1 message
+        * digests of all blobs that will be written.  */
+       ret = wim_checksum_unhashed_blobs(wim);
+       if (ret)
+               return ret;
+
+       /* Write extra copy of the XML data.  */
+       ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT,
+                                &xml_reshdr, WRITE_RESOURCE_FLAG_PIPABLE);
        if (ret)
                return ret;
-       /* Write dummy header. It will be overwritten later. */
-       w->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
-       ret = write_header(&w->hdr, w->out_fd);
-       w->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
+
+       /* Write metadata resources for the image(s) being included in the
+        * output WIM.  */
+       ret = write_metadata_resources(wim, image, write_flags);
        if (ret)
                return ret;
-       if (lseek(w->out_fd, WIM_HEADER_DISK_SIZE, SEEK_SET) == -1) {
-               ERROR_WITH_ERRNO("Failed to seek to end of WIM header");
-               return WIMLIB_ERR_WRITE;
-       }
-       return 0;
+
+       /* Write blobs needed for the image(s) being included in the output WIM,
+        * or blobs needed for the split WIM part.  */
+       return write_file_blobs(wim, image, write_flags,
+                               num_threads, blob_list_override,
+                               blob_table_list_ret);
+
+       /* The blob table, XML data, and header at end are handled by
+        * finish_write().  */
 }
 
-/* Writes a stand-alone WIM to a file.  */
-WIMLIBAPI int
-wimlib_write(WIMStruct *w, const tchar *path,
-            int image, int write_flags, unsigned num_threads,
-            wimlib_progress_func_t progress_func)
+/* Write a standalone WIM or split WIM (SWM) part to a new file or to a file
+ * descriptor.  */
+int
+write_wim_part(WIMStruct *wim,
+              const void *path_or_fd,
+              int image,
+              int write_flags,
+              unsigned num_threads,
+              unsigned part_number,
+              unsigned total_parts,
+              struct list_head *blob_list_override,
+              const u8 *guid)
 {
        int ret;
+       struct wim_header hdr_save;
+       struct list_head blob_table_list;
 
-       if (!path)
-               return WIMLIB_ERR_INVALID_PARAM;
+       if (total_parts == 1)
+               DEBUG("Writing standalone WIM.");
+       else
+               DEBUG("Writing split WIM part %u/%u", part_number, total_parts);
+       if (image == WIMLIB_ALL_IMAGES)
+               DEBUG("Including all images.");
+       else
+               DEBUG("Including image %d only.", image);
+       if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
+               DEBUG("File descriptor: %d", *(const int*)path_or_fd);
+       else
+               DEBUG("Path: \"%"TS"\"", (const tchar*)path_or_fd);
+       DEBUG("Write flags: 0x%08x", write_flags);
+
+       if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
+               DEBUG("\tCHECK_INTEGRITY");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)
+               DEBUG("\tNO_CHECK_INTEGRITY");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
+               DEBUG("\tPIPABLE");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_NOT_PIPABLE)
+               DEBUG("\tNOT_PIPABLE");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+               DEBUG("\tRECOMPRESS");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_FSYNC)
+               DEBUG("\tFSYNC");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
+               DEBUG("\tREBUILD");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE)
+               DEBUG("\tSOFT_DELETE");
 
-       write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
+       if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
+               DEBUG("\tIGNORE_READONLY_FLAG");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS)
+               DEBUG("\tSKIP_EXTERNAL_WIMS");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK)
+               DEBUG("\tSTREAMS_OK");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)
+               DEBUG("\tRETAIN_GUID");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
+               DEBUG("\tSOLID");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
+               DEBUG("\tFILE_DESCRIPTOR");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
+               DEBUG("\tNO_METADATA");
+
+       if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
+               DEBUG("\tUSE_EXISTING_TOTALBYTES");
 
+       if (num_threads == 0)
+               DEBUG("Number of threads: autodetect");
+       else
+               DEBUG("Number of threads: %u", num_threads);
+       DEBUG("Progress function: %s", (wim->progfunc ? "yes" : "no"));
+       DEBUG("Blob list:         %s", (blob_list_override ? "specified" : "autodetect"));
+       DEBUG("GUID:              %s", (write_flags &
+                                       WIMLIB_WRITE_FLAG_RETAIN_GUID) ? "retain"
+                                               : guid ? "explicit" : "generate new");
+
+       /* Internally, this is always called with a valid part number and total
+        * parts.  */
+       wimlib_assert(total_parts >= 1);
+       wimlib_assert(part_number >= 1 && part_number <= total_parts);
+
+       /* A valid image (or all images) must be specified.  */
        if (image != WIMLIB_ALL_IMAGES &&
-            (image < 1 || image > w->hdr.image_count))
+            (image < 1 || image > wim->hdr.image_count))
                return WIMLIB_ERR_INVALID_IMAGE;
 
-       if (w->hdr.total_parts != 1) {
-               ERROR("Cannot call wimlib_write() on part of a split WIM");
-               return WIMLIB_ERR_SPLIT_UNSUPPORTED;
+       /* If we need to write metadata resources, make sure the ::WIMStruct has
+        * the needed information attached (e.g. is not a resource-only WIM,
+        * such as a non-first part of a split WIM).  */
+       if (!wim_has_metadata(wim) &&
+           !(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA))
+               return WIMLIB_ERR_METADATA_NOT_FOUND;
+
+       /* Check for contradictory flags.  */
+       if ((write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
+                           WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
+                               == (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
+                                   WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
+               return WIMLIB_ERR_INVALID_PARAM;
+
+       if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
+                           WIMLIB_WRITE_FLAG_NOT_PIPABLE))
+                               == (WIMLIB_WRITE_FLAG_PIPABLE |
+                                   WIMLIB_WRITE_FLAG_NOT_PIPABLE))
+               return WIMLIB_ERR_INVALID_PARAM;
+
+       /* Save previous header, then start initializing the new one.  */
+       memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
+
+       /* Set default integrity, pipable, and solid flags.  */
+       if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
+                            WIMLIB_WRITE_FLAG_NOT_PIPABLE)))
+               if (wim_is_pipable(wim)) {
+                       DEBUG("WIM is pipable; default to PIPABLE.");
+                       write_flags |= WIMLIB_WRITE_FLAG_PIPABLE;
+               }
+
+       if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
+                            WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
+               if (wim_has_integrity_table(wim)) {
+                       DEBUG("Integrity table present; default to CHECK_INTEGRITY.");
+                       write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
+               }
+
+       if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
+                           WIMLIB_WRITE_FLAG_SOLID))
+                                   == (WIMLIB_WRITE_FLAG_PIPABLE |
+                                       WIMLIB_WRITE_FLAG_SOLID))
+       {
+               ERROR("Cannot specify both PIPABLE and SOLID!");
+               return WIMLIB_ERR_INVALID_PARAM;
        }
 
-       ret = begin_write(w, path, write_flags);
-       if (ret)
-               goto out_close_wim;
+       /* Set appropriate magic number.  */
+       if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
+               wim->hdr.magic = PWM_MAGIC;
+       else
+               wim->hdr.magic = WIM_MAGIC;
 
-       ret = write_wim_streams(w, image, write_flags, num_threads,
-                               progress_func);
-       if (ret)
-               goto out_close_wim;
+       /* Set appropriate version number.  */
+       if ((write_flags & WIMLIB_WRITE_FLAG_SOLID) ||
+           wim->out_compression_type == WIMLIB_COMPRESSION_TYPE_LZMS)
+               wim->hdr.wim_version = WIM_VERSION_SOLID;
+       else
+               wim->hdr.wim_version = WIM_VERSION_DEFAULT;
+
+       /* Clear header flags that will be set automatically.  */
+       wim->hdr.flags &= ~(WIM_HDR_FLAG_METADATA_ONLY          |
+                           WIM_HDR_FLAG_RESOURCE_ONLY          |
+                           WIM_HDR_FLAG_SPANNED                |
+                           WIM_HDR_FLAG_WRITE_IN_PROGRESS);
+
+       /* Set SPANNED header flag if writing part of a split WIM.  */
+       if (total_parts != 1)
+               wim->hdr.flags |= WIM_HDR_FLAG_SPANNED;
+
+       /* Set part number and total parts of split WIM.  This will be 1 and 1
+        * if the WIM is standalone.  */
+       wim->hdr.part_number = part_number;
+       wim->hdr.total_parts = total_parts;
+
+       /* Set compression type if different.  */
+       if (wim->compression_type != wim->out_compression_type) {
+               ret = set_wim_hdr_cflags(wim->out_compression_type, &wim->hdr);
+               wimlib_assert(ret == 0);
+       }
+
+       /* Set chunk size if different.  */
+       wim->hdr.chunk_size = wim->out_chunk_size;
 
-       if (progress_func)
-               progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL);
+       /* Set GUID.  */
+       if (!(write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)) {
+               if (guid)
+                       memcpy(wim->hdr.guid, guid, WIMLIB_GUID_LEN);
+               else
+                       randomize_byte_array(wim->hdr.guid, WIMLIB_GUID_LEN);
+       }
+
+       /* Clear references to resources that have not been written yet.  */
+       zero_reshdr(&wim->hdr.blob_table_reshdr);
+       zero_reshdr(&wim->hdr.xml_data_reshdr);
+       zero_reshdr(&wim->hdr.boot_metadata_reshdr);
+       zero_reshdr(&wim->hdr.integrity_table_reshdr);
+
+       /* Set image count and boot index correctly for single image writes.  */
+       if (image != WIMLIB_ALL_IMAGES) {
+               wim->hdr.image_count = 1;
+               if (wim->hdr.boot_idx == image)
+                       wim->hdr.boot_idx = 1;
+               else
+                       wim->hdr.boot_idx = 0;
+       }
+
+       /* Split WIMs can't be bootable.  */
+       if (total_parts != 1)
+               wim->hdr.boot_idx = 0;
+
+       /* Initialize output file descriptor.  */
+       if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) {
+               /* File descriptor was explicitly provided.  Return error if
+                * file descriptor is not seekable, unless writing a pipable WIM
+                * was requested.  */
+               wim->out_fd.fd = *(const int*)path_or_fd;
+               wim->out_fd.offset = 0;
+               if (!filedes_is_seekable(&wim->out_fd)) {
+                       ret = WIMLIB_ERR_INVALID_PARAM;
+                       if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
+                               goto out_restore_hdr;
+                       if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
+                               ERROR("Can't include integrity check when "
+                                     "writing pipable WIM to pipe!");
+                               goto out_restore_hdr;
+                       }
+               }
 
-       ret = for_image(w, image, write_metadata_resource);
+       } else {
+               /* Filename of WIM to write was provided; open file descriptor
+                * to it.  */
+               ret = open_wim_writable(wim, (const tchar*)path_or_fd,
+                                       O_TRUNC | O_CREAT | O_RDWR);
+               if (ret)
+                       goto out_restore_hdr;
+       }
+
+       /* Write initial header.  This is merely a "dummy" header since it
+        * doesn't have all the information yet, so it will be overwritten later
+        * (unless writing a pipable WIM).  */
+       if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
+               wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
+       ret = write_wim_header(&wim->hdr, &wim->out_fd);
+       wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
        if (ret)
-               goto out_close_wim;
+               goto out_restore_hdr;
+
+       /* Write file blobs and metadata resources.  */
+       if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) {
+               /* Default case: create a normal (non-pipable) WIM.  */
+               ret = write_file_blobs(wim, image, write_flags,
+                                      num_threads,
+                                      blob_list_override,
+                                      &blob_table_list);
+               if (ret)
+                       goto out_restore_hdr;
+
+               ret = write_metadata_resources(wim, image, write_flags);
+               if (ret)
+                       goto out_restore_hdr;
+       } else {
+               /* Non-default case: create pipable WIM.  */
+               ret = write_pipable_wim(wim, image, write_flags, num_threads,
+                                       blob_list_override,
+                                       &blob_table_list);
+               if (ret)
+                       goto out_restore_hdr;
+               write_flags |= WIMLIB_WRITE_FLAG_HEADER_AT_END;
+       }
 
-       if (progress_func)
-               progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL);
 
-       ret = finish_write(w, image, write_flags, progress_func);
-       /* finish_write() closed the WIM for us */
-       goto out;
-out_close_wim:
-       close_wim_writable(w);
-out:
-       DEBUG("wimlib_write(path=%"TS") = %d", path, ret);
+       /* Write blob table, XML data, and (optional) integrity table.  */
+       ret = finish_write(wim, image, write_flags, &blob_table_list);
+out_restore_hdr:
+       memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
+       (void)close_wim_writable(wim, write_flags);
+       DEBUG("ret=%d", ret);
        return ret;
 }
 
+/* Write a standalone WIM to a file or file descriptor.  */
+static int
+write_standalone_wim(WIMStruct *wim, const void *path_or_fd,
+                    int image, int write_flags, unsigned num_threads)
+{
+       return write_wim_part(wim, path_or_fd, image, write_flags,
+                             num_threads, 1, 1, NULL, NULL);
+}
+
+/* API function documented in wimlib.h  */
+WIMLIBAPI int
+wimlib_write(WIMStruct *wim, const tchar *path,
+            int image, int write_flags, unsigned num_threads)
+{
+       if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
+               return WIMLIB_ERR_INVALID_PARAM;
+
+       if (path == NULL || path[0] == T('\0'))
+               return WIMLIB_ERR_INVALID_PARAM;
+
+       return write_standalone_wim(wim, path, image, write_flags, num_threads);
+}
+
+/* API function documented in wimlib.h  */
+WIMLIBAPI int
+wimlib_write_to_fd(WIMStruct *wim, int fd,
+                  int image, int write_flags, unsigned num_threads)
+{
+       if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
+               return WIMLIB_ERR_INVALID_PARAM;
+
+       if (fd < 0)
+               return WIMLIB_ERR_INVALID_PARAM;
+
+       write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR;
+
+       return write_standalone_wim(wim, &fd, image, write_flags, num_threads);
+}
+
 static bool
-any_images_modified(WIMStruct *w)
+any_images_modified(WIMStruct *wim)
 {
-       for (int i = 0; i < w->hdr.image_count; i++)
-               if (w->image_metadata[i]->modified)
+       for (int i = 0; i < wim->hdr.image_count; i++)
+               if (wim->image_metadata[i]->modified)
                        return true;
        return false;
 }
 
+static int
+check_resource_offset(struct blob_descriptor *blob, void *_wim)
+{
+       const WIMStruct *wim = _wim;
+       off_t end_offset = *(const off_t*)wim->private;
+
+       if (blob->blob_location == BLOB_IN_WIM &&
+           blob->rdesc->wim == wim &&
+           blob->rdesc->offset_in_wim + blob->rdesc->size_in_wim > end_offset)
+               return WIMLIB_ERR_RESOURCE_ORDER;
+       return 0;
+}
+
+/* Make sure no file or metadata resources are located after the XML data (or
+ * integrity table if present)--- otherwise we can't safely overwrite the WIM in
+ * place and we return WIMLIB_ERR_RESOURCE_ORDER.  */
+static int
+check_resource_offsets(WIMStruct *wim, off_t end_offset)
+{
+       int ret;
+       unsigned i;
+
+       wim->private = &end_offset;
+       ret = for_blob_in_table(wim->blob_table, check_resource_offset, wim);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < wim->hdr.image_count; i++) {
+               ret = check_resource_offset(wim->image_metadata[i]->metadata_blob, wim);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
 /*
- * Overwrite a WIM, possibly appending streams to it.
+ * Overwrite a WIM, possibly appending new resources to it.
  *
  * A WIM looks like (or is supposed to look like) the following:
  *
  *                   Header (212 bytes)
- *                   Streams and metadata resources (variable size)
- *                   Lookup table (variable size)
+ *                   Resources for metadata and files (variable size)
+ *                   Blob table (variable size)
  *                   XML data (variable size)
  *                   Integrity table (optional) (variable size)
  *
- * If we are not adding any streams or metadata resources, the lookup table is
+ * If we are not adding any new files or metadata, then the blob table is
  * unchanged--- so we only need to overwrite the XML data, integrity table, and
  * header.  This operation is potentially unsafe if the program is abruptly
  * terminated while the XML data or integrity table are being overwritten, but
@@ -2003,256 +3092,296 @@ any_images_modified(WIMStruct *w)
  * the program is terminated while the integrity table is being calculated (but
  * no guarantees, due to write re-ordering...).
  *
- * If we are adding new streams or images (metadata resources), the lookup table
- * needs to be changed, and those streams need to be written.  In this case, we
- * try to perform a safe update of the WIM file by writing the streams *after*
- * the end of the previous WIM, then writing the new lookup table, XML data, and
- * (optionally) integrity table following the new streams.  This will produce a
- * layout like the following:
+ * If we are adding new blobs, including new file data as well as any metadata
+ * for any new images, then the blob table needs to be changed, and those blobs
+ * need to be written.  In this case, we try to perform a safe update of the WIM
+ * file by writing the blobs *after* the end of the previous WIM, then writing
+ * the new blob table, XML data, and (optionally) integrity table following the
+ * new blobs.  This will produce a layout like the following:
  *
  *                   Header (212 bytes)
- *                   (OLD) Streams and metadata resources (variable size)
- *                   (OLD) Lookup table (variable size)
+ *                   (OLD) Resources for metadata and files (variable size)
+ *                   (OLD) Blob table (variable size)
  *                   (OLD) XML data (variable size)
  *                   (OLD) Integrity table (optional) (variable size)
- *                   (NEW) Streams and metadata resources (variable size)
- *                   (NEW) Lookup table (variable size)
+ *                   (NEW) Resources for metadata and files (variable size)
+ *                   (NEW) Blob table (variable size)
  *                   (NEW) XML data (variable size)
  *                   (NEW) Integrity table (optional) (variable size)
  *
  * At all points, the WIM is valid as nothing points to the new data yet.  Then,
- * the header is overwritten to point to the new lookup table, XML data, and
+ * the header is overwritten to point to the new blob table, XML data, and
  * integrity table, to produce the following layout:
  *
  *                   Header (212 bytes)
- *                   Streams and metadata resources (variable size)
+ *                   Resources for metadata and files (variable size)
  *                   Nothing (variable size)
- *                   More Streams and metadata resources (variable size)
- *                   Lookup table (variable size)
+ *                   Resources for metadata and files (variable size)
+ *                   Blob table (variable size)
  *                   XML data (variable size)
  *                   Integrity table (optional) (variable size)
  *
  * This method allows an image to be appended to a large WIM very quickly, and
- * is is crash-safe except in the case of write re-ordering, but the
- * disadvantage is that a small hole is left in the WIM where the old lookup
- * table, xml data, and integrity table were.  (These usually only take up a
- * small amount of space compared to the streams, however.)
+ * is crash-safe except in the case of write re-ordering, but the disadvantage
+ * is that a small hole is left in the WIM where the old blob table, xml data,
+ * and integrity table were.  (These usually only take up a small amount of
+ * space compared to the blobs, however.)
  */
 static int
-overwrite_wim_inplace(WIMStruct *w, int write_flags,
-                     unsigned num_threads,
-                     wimlib_progress_func_t progress_func)
+overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads)
 {
        int ret;
-       struct list_head stream_list;
        off_t old_wim_end;
-       u64 old_lookup_table_end, old_xml_begin, old_xml_end;
-       int open_flags;
-
-       DEBUG("Overwriting `%"TS"' in-place", w->filename);
-
-       /* Make sure that the integrity table (if present) is after the XML
-        * data, and that there are no stream resources, metadata resources, or
-        * lookup tables after the XML data.  Otherwise, these data would be
-        * overwritten. */
-       old_xml_begin = w->hdr.xml_res_entry.offset;
-       old_xml_end = old_xml_begin + w->hdr.xml_res_entry.size;
-       old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
-                              w->hdr.lookup_table_res_entry.size;
-       if (w->hdr.integrity.offset != 0 && w->hdr.integrity.offset < old_xml_end) {
-               ERROR("Didn't expect the integrity table to be before the XML data");
-               return WIMLIB_ERR_RESOURCE_ORDER;
-       }
-
-       if (old_lookup_table_end > old_xml_begin) {
-               ERROR("Didn't expect the lookup table to be after the XML data");
-               return WIMLIB_ERR_RESOURCE_ORDER;
+       u64 old_blob_table_end, old_xml_begin, old_xml_end;
+       struct wim_header hdr_save;
+       struct list_head blob_list;
+       struct list_head blob_table_list;
+       struct filter_context filter_ctx;
+
+       DEBUG("Overwriting `%"TS"' in-place", wim->filename);
+
+       /* Save original header so it can be restored in case of error  */
+       memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
+
+       /* Set default integrity flag.  */
+       if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
+                            WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
+               if (wim_has_integrity_table(wim))
+                       write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
+
+       /* Set WIM version if writing solid resources.  */
+       if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
+               wim->hdr.wim_version = WIM_VERSION_SOLID;
+
+       /* Set additional flags for overwrite.  */
+       write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
+                      WIMLIB_WRITE_FLAG_STREAMS_OK;
+
+       /* Make sure there is no data after the XML data, except possibily an
+        * integrity table.  If this were the case, then this data would be
+        * overwritten.  */
+       old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
+       old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
+       old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
+                            wim->hdr.blob_table_reshdr.size_in_wim;
+       if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0 &&
+           wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
+               WARNING("Didn't expect the integrity table to be before the XML data");
+               ret = WIMLIB_ERR_RESOURCE_ORDER;
+               goto out_restore_memory_hdr;
+       }
+
+       if (old_blob_table_end > old_xml_begin) {
+               WARNING("Didn't expect the blob table to be after the XML data");
+               ret = WIMLIB_ERR_RESOURCE_ORDER;
+               goto out_restore_memory_hdr;
        }
 
        /* Set @old_wim_end, which indicates the point beyond which we don't
         * allow any file and metadata resources to appear without returning
         * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
         * overwrite these resources). */
-       if (!w->deletion_occurred && !any_images_modified(w)) {
+       if (!wim->image_deletion_occurred && !any_images_modified(wim)) {
                /* If no images have been modified and no images have been
-                * deleted, a new lookup table does not need to be written.  We
+                * deleted, a new blob table does not need to be written.  We
                 * shall write the new XML data and optional integrity table
-                * immediately after the lookup table.  Note that this may
+                * immediately after the blob table.  Note that this may
                 * overwrite an existing integrity table. */
-               DEBUG("Skipping writing lookup table "
+               DEBUG("Skipping writing blob table "
                      "(no images modified or deleted)");
-               old_wim_end = old_lookup_table_end;
-               write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE |
+               old_wim_end = old_blob_table_end;
+               write_flags |= WIMLIB_WRITE_FLAG_NO_BLOB_TABLE |
                               WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
-       } else if (w->hdr.integrity.offset) {
-               /* Old WIM has an integrity table; begin writing new streams
-                * after it. */
-               old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size;
+       } else if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0) {
+               /* Old WIM has an integrity table; begin writing new blobs after
+                * it. */
+               old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
+                             wim->hdr.integrity_table_reshdr.size_in_wim;
        } else {
-               /* No existing integrity table; begin writing new streams after
+               /* No existing integrity table; begin writing new blobs after
                 * the old XML data. */
                old_wim_end = old_xml_end;
        }
 
-       ret = prepare_streams_for_overwrite(w, old_wim_end, &stream_list);
+       ret = check_resource_offsets(wim, old_wim_end);
        if (ret)
-               return ret;
+               goto out_restore_memory_hdr;
 
-       open_flags = 0;
-       if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
-               open_flags |= O_RDWR;
-       else
-               open_flags |= O_WRONLY;
-       ret = open_wim_writable(w, w->filename, open_flags);
+       ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags,
+                                         &blob_list, &blob_table_list,
+                                         &filter_ctx);
        if (ret)
-               return ret;
+               goto out_restore_memory_hdr;
 
-       ret = lock_wim(w, w->out_fd);
-       if (ret) {
-               close_wim_writable(w);
-               return ret;
-       }
+       ret = open_wim_writable(wim, wim->filename, O_RDWR);
+       if (ret)
+               goto out_restore_memory_hdr;
+
+       ret = lock_wim_for_append(wim);
+       if (ret)
+               goto out_close_wim;
 
        /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */
-       ret = write_header_flags(w->hdr.flags | WIM_HDR_FLAG_WRITE_IN_PROGRESS,
-                                w->out_fd);
+       wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
+       ret = write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
        if (ret) {
                ERROR_WITH_ERRNO("Error updating WIM header flags");
-               close_wim_writable(w);
                goto out_unlock_wim;
        }
 
-       if (lseek(w->out_fd, old_wim_end, SEEK_SET) == -1) {
+       if (filedes_seek(&wim->out_fd, old_wim_end) == -1) {
                ERROR_WITH_ERRNO("Can't seek to end of WIM");
-               close_wim_writable(w);
                ret = WIMLIB_ERR_WRITE;
-               goto out_unlock_wim;
+               goto out_restore_physical_hdr;
        }
 
-       DEBUG("Writing newly added streams (offset = %"PRIu64")",
-             old_wim_end);
-       ret = write_stream_list(&stream_list,
-                               w->lookup_table,
-                               w->out_fd,
-                               w->compression_type,
-                               write_flags,
-                               num_threads,
-                               progress_func);
+       ret = wim_write_blob_list(wim, &blob_list, write_flags,
+                                 num_threads, &filter_ctx);
        if (ret)
                goto out_truncate;
 
-       for (int i = 0; i < w->hdr.image_count; i++) {
-               if (w->image_metadata[i]->modified) {
-                       select_wim_image(w, i + 1);
-                       ret = write_metadata_resource(w);
-                       if (ret)
-                               goto out_truncate;
-               }
-       }
-       write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE;
-       ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags,
-                          progress_func);
+       ret = write_metadata_resources(wim, WIMLIB_ALL_IMAGES, write_flags);
+       if (ret)
+               goto out_truncate;
+
+       ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags,
+                          &blob_table_list);
+       if (ret)
+               goto out_truncate;
+
+       unlock_wim_for_append(wim);
+       return 0;
+
 out_truncate:
-       close_wim_writable(w);
-       if (ret != 0 && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
+       if (!(write_flags & WIMLIB_WRITE_FLAG_NO_BLOB_TABLE)) {
                WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",
-                       w->filename, old_wim_end);
-               /* Return value of truncate() is ignored because this is already
-                * an error path. */
-               (void)ttruncate(w->filename, old_wim_end);
+                       wim->filename, old_wim_end);
+               /* Return value of ftruncate() is ignored because this is
+                * already an error path.  */
+               (void)ftruncate(wim->out_fd.fd, old_wim_end);
        }
+out_restore_physical_hdr:
+       (void)write_wim_header_flags(hdr_save.flags, &wim->out_fd);
 out_unlock_wim:
-       w->wim_locked = 0;
+       unlock_wim_for_append(wim);
+out_close_wim:
+       (void)close_wim_writable(wim, write_flags);
+out_restore_memory_hdr:
+       memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
        return ret;
 }
 
 static int
-overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags,
-                         unsigned num_threads,
-                         wimlib_progress_func_t progress_func)
+overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, unsigned num_threads)
 {
        size_t wim_name_len;
        int ret;
 
-       DEBUG("Overwriting `%"TS"' via a temporary file", w->filename);
+       DEBUG("Overwriting `%"TS"' via a temporary file", wim->filename);
 
        /* Write the WIM to a temporary file in the same directory as the
         * original WIM. */
-       wim_name_len = tstrlen(w->filename);
+       wim_name_len = tstrlen(wim->filename);
        tchar tmpfile[wim_name_len + 10];
-       tmemcpy(tmpfile, w->filename, wim_name_len);
+       tmemcpy(tmpfile, wim->filename, wim_name_len);
        randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
        tmpfile[wim_name_len + 9] = T('\0');
 
-       ret = wimlib_write(w, tmpfile, WIMLIB_ALL_IMAGES,
-                          write_flags | WIMLIB_WRITE_FLAG_FSYNC,
-                          num_threads, progress_func);
+       ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
+                          write_flags |
+                               WIMLIB_WRITE_FLAG_FSYNC |
+                               WIMLIB_WRITE_FLAG_RETAIN_GUID,
+                          num_threads);
        if (ret) {
-               ERROR("Failed to write the WIM file `%"TS"'", tmpfile);
-               goto out_unlink;
+               tunlink(tmpfile);
+               return ret;
        }
 
-       close_wim(w);
+       if (filedes_valid(&wim->in_fd)) {
+               filedes_close(&wim->in_fd);
+               filedes_invalidate(&wim->in_fd);
+       }
 
-       DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, w->filename);
-       /* Rename the new file to the old file .*/
-       if (trename(tmpfile, w->filename) != 0) {
+       /* Rename the new WIM file to the original WIM file.  Note: on Windows
+        * this actually calls win32_rename_replacement(), not _wrename(), so
+        * that removing the existing destination file can be handled.  */
+       DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, wim->filename);
+       ret = trename(tmpfile, wim->filename);
+       if (ret) {
                ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
-                                tmpfile, w->filename);
-               ret = WIMLIB_ERR_RENAME;
-               goto out_unlink;
-       }
-
-       if (progress_func) {
-               union wimlib_progress_info progress;
-               progress.rename.from = tmpfile;
-               progress.rename.to = w->filename;
-               progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress);
-       }
-       goto out;
-out_unlink:
-       /* Remove temporary file. */
-       if (tunlink(tmpfile) != 0)
-               WARNING_WITH_ERRNO("Failed to remove `%"TS"'", tmpfile);
-out:
-       return ret;
+                                tmpfile, wim->filename);
+       #ifdef __WIN32__
+               if (ret < 0)
+       #endif
+               {
+                       tunlink(tmpfile);
+               }
+               return WIMLIB_ERR_RENAME;
+       }
+
+       union wimlib_progress_info progress;
+       progress.rename.from = tmpfile;
+       progress.rename.to = wim->filename;
+       return call_progress(wim->progfunc, WIMLIB_PROGRESS_MSG_RENAME,
+                            &progress, wim->progctx);
 }
 
-/*
- * Writes a WIM file to the original file that it was read from, overwriting it.
- */
+/* Determine if the specified WIM file may be updated by appending in-place
+ * rather than writing and replacing it with an entirely new file.  */
+static bool
+can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags)
+{
+       /* REBUILD flag forces full rebuild.  */
+       if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
+               return false;
+
+       /* Image deletions cause full rebuild by default.  */
+       if (wim->image_deletion_occurred &&
+           !(write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
+               return false;
+
+       /* Pipable WIMs cannot be updated in place, nor can a non-pipable WIM be
+        * turned into a pipable WIM in-place.  */
+       if (wim_is_pipable(wim) || (write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
+               return false;
+
+       /* The default compression type and compression chunk size selected for
+        * the output WIM must be the same as those currently used for the WIM.
+        */
+       if (wim->compression_type != wim->out_compression_type)
+               return false;
+       if (wim->chunk_size != wim->out_chunk_size)
+               return false;
+
+       return true;
+}
+
+/* API function documented in wimlib.h  */
 WIMLIBAPI int
-wimlib_overwrite(WIMStruct *w, int write_flags,
-                unsigned num_threads,
-                wimlib_progress_func_t progress_func)
+wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads)
 {
        int ret;
        u32 orig_hdr_flags;
 
-       write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
+       if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
+               return WIMLIB_ERR_INVALID_PARAM;
 
-       if (!w->filename)
+       if (!wim->filename)
                return WIMLIB_ERR_NO_FILENAME;
 
-       orig_hdr_flags = w->hdr.flags;
+       orig_hdr_flags = wim->hdr.flags;
        if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
-               w->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
-       ret = can_modify_wim(w);
-       w->hdr.flags = orig_hdr_flags;
+               wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
+       ret = can_modify_wim(wim);
+       wim->hdr.flags = orig_hdr_flags;
        if (ret)
                return ret;
 
-       if ((!w->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
-           && !(write_flags & WIMLIB_WRITE_FLAG_REBUILD))
-       {
-               int ret;
-               ret = overwrite_wim_inplace(w, write_flags, num_threads,
-                                           progress_func);
-               if (ret == WIMLIB_ERR_RESOURCE_ORDER)
-                       WARNING("Falling back to re-building entire WIM");
-               else
+       if (can_overwrite_wim_inplace(wim, write_flags)) {
+               ret = overwrite_wim_inplace(wim, write_flags, num_threads);
+               if (ret != WIMLIB_ERR_RESOURCE_ORDER)
                        return ret;
+               WARNING("Falling back to re-building entire WIM");
        }
-       return overwrite_wim_via_tmpfile(w, write_flags, num_threads,
-                                        progress_func);
+       return overwrite_wim_via_tmpfile(wim, write_flags, num_threads);
 }