+#ifdef HAVE_ALLOCA_H
+# include <alloca.h>
+#endif
+
+
+#ifndef __WIN32__
+# include <sys/uio.h> /* for `struct iovec' */
+#endif
+
+/* Chunk table that's located at the beginning of each compressed resource in
+ * the WIM. (This is not the on-disk format; the on-disk format just has an
+ * array of offsets.) */
+struct chunk_table {
+ u64 original_resource_size;
+ u64 num_chunks;
+ u64 table_disk_size;
+ unsigned bytes_per_chunk_entry;
+ void *cur_offset_p;
+ union {
+ u32 cur_offset_u32;
+ u64 cur_offset_u64;
+ };
+ /* Beginning of chunk offsets, in either 32-bit or 64-bit little endian
+ * integers, including the first offset of 0, which will not be written.
+ * */
+ u8 offsets[] _aligned_attribute(8);
+};
+
+/* Allocate and initializes a chunk table, then reserve space for it in the
+ * output file unless writing a pipable resource. */
+static int
+begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
+ struct filedes *out_fd,
+ struct chunk_table **chunk_tab_ret,
+ int resource_flags)
+{
+ u64 size;
+ u64 num_chunks;
+ unsigned bytes_per_chunk_entry;
+ size_t alloc_size;
+ struct chunk_table *chunk_tab;
+ int ret;
+
+ size = wim_resource_size(lte);
+ num_chunks = wim_resource_chunks(lte);
+ bytes_per_chunk_entry = (size > (1ULL << 32)) ? 8 : 4;
+ alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
+ chunk_tab = CALLOC(1, alloc_size);
+
+ if (!chunk_tab) {
+ ERROR("Failed to allocate chunk table for %"PRIu64" byte "
+ "resource", size);
+ return WIMLIB_ERR_NOMEM;
+ }
+ chunk_tab->num_chunks = num_chunks;
+ chunk_tab->original_resource_size = size;
+ chunk_tab->bytes_per_chunk_entry = bytes_per_chunk_entry;
+ chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
+ (num_chunks - 1);
+ chunk_tab->cur_offset_p = chunk_tab->offsets;
+
+ /* We don't know the correct offsets yet; so just write zeroes to
+ * reserve space for the table, so we can go back to it later after
+ * we've written the compressed chunks following it.
+ *
+ * Special case: if writing a pipable WIM, compressed resources are in a
+ * modified format (see comment above write_pipable_wim()) and do not
+ * have a chunk table at the beginning, so don't reserve any space for
+ * one. */
+ if (!(resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) {
+ ret = full_write(out_fd, chunk_tab->offsets,
+ chunk_tab->table_disk_size);
+ if (ret) {
+ ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
+ "file resource");
+ FREE(chunk_tab);
+ return ret;
+ }
+ }
+ *chunk_tab_ret = chunk_tab;
+ return 0;
+}
+
+/* Add the offset for the next chunk to the chunk table being constructed for a
+ * compressed stream. */
+static void
+chunk_tab_record_chunk(struct chunk_table *chunk_tab, unsigned out_chunk_size)
+{
+ if (chunk_tab->bytes_per_chunk_entry == 4) {
+ *(le32*)chunk_tab->cur_offset_p = cpu_to_le32(chunk_tab->cur_offset_u32);
+ chunk_tab->cur_offset_p = (le32*)chunk_tab->cur_offset_p + 1;
+ chunk_tab->cur_offset_u32 += out_chunk_size;
+ } else {
+ *(le64*)chunk_tab->cur_offset_p = cpu_to_le64(chunk_tab->cur_offset_u64);
+ chunk_tab->cur_offset_p = (le64*)chunk_tab->cur_offset_p + 1;
+ chunk_tab->cur_offset_u64 += out_chunk_size;
+ }
+}
+
+/*
+ * compress_func_t- Pointer to a function to compresses a chunk
+ * of a WIM resource. This may be either
+ * wimlib_xpress_compress() (xpress-compress.c) or
+ * wimlib_lzx_compress() (lzx-compress.c).
+ *
+ * @chunk: Uncompressed data of the chunk.
+ * @chunk_size: Size of the uncompressed chunk, in bytes.
+ * @out: Pointer to output buffer of size at least (@chunk_size - 1) bytes.
+ *
+ * Returns the size of the compressed data written to @out in bytes, or 0 if the
+ * data could not be compressed to (@chunk_size - 1) bytes or fewer.
+ *
+ * As a special requirement, the compression code is optimized for the WIM
+ * format and therefore requires (@chunk_size <= 32768).
+ *
+ * As another special requirement, the compression code will read up to 8 bytes
+ * off the end of the @chunk array for performance reasons. The values of these
+ * bytes will not affect the output of the compression, but the calling code
+ * must make sure that the buffer holding the uncompressed chunk is actually at
+ * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
+ * mapped memory that will not cause a memory access violation if accessed.
+ */
+typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
+ void *out);
+
+static compress_func_t
+get_compress_func(int out_ctype)
+{
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
+ return wimlib_lzx_compress;
+ else
+ return wimlib_xpress_compress;
+}
+
+/* Finishes a WIM chunk table and writes it to the output file at the correct
+ * offset. */
+static int
+finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
+ struct filedes *out_fd,
+ off_t res_start_offset,
+ int write_resource_flags)
+{
+ int ret;
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
+ ret = full_write(out_fd,
+ chunk_tab->offsets +
+ chunk_tab->bytes_per_chunk_entry,
+ chunk_tab->table_disk_size);
+ } else {
+ ret = full_pwrite(out_fd,
+ chunk_tab->offsets +
+ chunk_tab->bytes_per_chunk_entry,
+ chunk_tab->table_disk_size,
+ res_start_offset);
+ }
+ if (ret) {
+ ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
+ "file resource");
+ }
+ return ret;
+}
+
+/* Write the header for a stream in a pipable WIM.
+ */
+static int
+write_pwm_stream_header(const struct wim_lookup_table_entry *lte,
+ struct filedes *out_fd,
+ int additional_reshdr_flags)
+{
+ struct pwm_stream_hdr stream_hdr;
+ u32 reshdr_flags;
+ int ret;
+
+ stream_hdr.magic = PWM_STREAM_MAGIC;
+ stream_hdr.uncompressed_size = cpu_to_le64(lte->resource_entry.original_size);
+ if (additional_reshdr_flags & PWM_RESHDR_FLAG_UNHASHED) {
+ zero_out_hash(stream_hdr.hash);
+ } else {
+ wimlib_assert(!lte->unhashed);
+ copy_hash(stream_hdr.hash, lte->hash);
+ }
+
+ reshdr_flags = lte->resource_entry.flags & ~WIM_RESHDR_FLAG_COMPRESSED;
+ reshdr_flags |= additional_reshdr_flags;
+ stream_hdr.flags = cpu_to_le32(reshdr_flags);
+ ret = full_write(out_fd, &stream_hdr, sizeof(stream_hdr));
+ if (ret)
+ ERROR_WITH_ERRNO("Error writing stream header");
+ return ret;
+}
+
+static int
+seek_and_truncate(struct filedes *out_fd, off_t offset)
+{
+ if (filedes_seek(out_fd, offset) == -1 ||
+ ftruncate(out_fd->fd, offset))
+ {
+ ERROR_WITH_ERRNO("Failed to truncate output WIM file");
+ return WIMLIB_ERR_WRITE;
+ }
+ return 0;
+}
+
+static int
+finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte)
+{
+ u8 md[SHA1_HASH_SIZE];
+
+ sha1_final(md, sha_ctx);
+ if (lte->unhashed) {
+ copy_hash(lte->hash, md);
+ } else if (!hashes_equal(md, lte->hash)) {
+ ERROR("WIM resource has incorrect hash!");
+ if (lte_filename_valid(lte)) {
+ ERROR("We were reading it from \"%"TS"\"; maybe "
+ "it changed while we were reading it.",
+ lte->file_on_disk);
+ }
+ return WIMLIB_ERR_INVALID_RESOURCE_HASH;
+ }
+ return 0;
+}
+
+struct write_resource_ctx {
+ compress_func_t compress;
+ struct chunk_table *chunk_tab;
+ struct filedes *out_fd;
+ SHA_CTX sha_ctx;
+ bool doing_sha;
+ int resource_flags;
+};
+
+static int
+write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx)
+{
+ struct write_resource_ctx *ctx = _ctx;
+ const void *out_chunk;
+ unsigned out_chunk_size;
+ int ret;
+
+ if (ctx->doing_sha)
+ sha1_update(&ctx->sha_ctx, chunk, chunk_size);
+
+ out_chunk = chunk;
+ out_chunk_size = chunk_size;
+ if (ctx->compress) {
+ void *compressed_chunk;
+ unsigned compressed_size;
+
+ /* Compress the chunk. */
+ compressed_chunk = alloca(chunk_size);
+ compressed_size = (*ctx->compress)(chunk, chunk_size,
+ compressed_chunk);
+
+ /* Use compressed data if compression to less than input size
+ * was successful. */
+ if (compressed_size) {
+ out_chunk = compressed_chunk;
+ out_chunk_size = compressed_size;
+ }
+ }
+
+ if (ctx->chunk_tab) {
+ /* Update chunk table accounting. */
+ chunk_tab_record_chunk(ctx->chunk_tab, out_chunk_size);
+
+ /* If writing compressed chunks to a pipable WIM, before the
+ * chunk data write a chunk header that provides the compressed
+ * chunk size. */
+ if (ctx->resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
+ struct pwm_chunk_hdr chunk_hdr = {
+ .compressed_size = cpu_to_le32(out_chunk_size),
+ };
+ ret = full_write(ctx->out_fd, &chunk_hdr,
+ sizeof(chunk_hdr));
+ if (ret)
+ goto error;
+ }
+ }
+
+ /* Write the chunk data. */
+ ret = full_write(ctx->out_fd, out_chunk, out_chunk_size);
+ if (ret)
+ goto error;
+ return 0;
+
+error:
+ ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
+ return ret;
+}
+
+/*
+ * write_wim_resource()-
+ *
+ * Write a resource to an output WIM.
+ *
+ * @lte:
+ * Lookup table entry for the resource, which could be in another WIM, in
+ * an external file, or in another location.
+ *
+ * @out_fd:
+ * File descriptor opened to the output WIM.
+ *
+ * @out_ctype:
+ * One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate which
+ * compression algorithm to use.
+ *
+ * @out_res_entry:
+ * On success, this is filled in with the offset, flags, compressed size,
+ * and uncompressed size of the resource in the output WIM.
+ *
+ * @write_resource_flags:
+ * * WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed even
+ * if it could otherwise be copied directly from the input;
+ * * WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE if writing a resource for a pipable WIM
+ * (and the output file descriptor may be a pipe).
+ *
+ * Additional notes: The SHA1 message digest of the uncompressed data is
+ * calculated (except when doing a raw copy --- see below). If the @unhashed
+ * flag is set on the lookup table entry, this message digest is simply copied
+ * to it; otherwise, the message digest is compared with the existing one, and
+ * the function will fail if they do not match.
+ */
+int
+write_wim_resource(struct wim_lookup_table_entry *lte,
+ struct filedes *out_fd, int out_ctype,
+ struct resource_entry *out_res_entry,
+ int resource_flags)
+{
+ struct write_resource_ctx write_ctx;
+ off_t res_start_offset;
+ u64 read_size;
+ int ret;
+
+ /* Mask out any irrelevant flags, since this function also uses this
+ * variable to store WIMLIB_READ_RESOURCE flags. */
+ resource_flags &= WIMLIB_WRITE_RESOURCE_MASK;
+
+ /* Get current position in output WIM. */
+ res_start_offset = out_fd->offset;
+
+ /* If we are not forcing the data to be recompressed, and the input
+ * resource is located in a WIM with the same compression type as that
+ * desired other than no compression, we can simply copy the compressed
+ * data without recompressing it. This also means we must skip
+ * calculating the SHA1, as we never will see the uncompressed data. */
+ if (lte->resource_location == RESOURCE_IN_WIM &&
+ out_ctype == wim_resource_compression_type(lte) &&
+ out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
+ !(resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS))
+ {
+ /* Normally we can request a RAW_FULL read, but if we're reading
+ * from a pipable resource and writing a non-pipable resource or
+ * vice versa, then a RAW_CHUNKS read needs to be requested so
+ * that the written resource can be appropriately formatted.
+ * However, in neither case is any actual decompression needed.
+ */
+ if (lte->is_pipable == !!(resource_flags &
+ WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE))
+ resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_FULL;
+ else
+ resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS;
+ write_ctx.doing_sha = false;
+ read_size = lte->resource_entry.size;
+ } else {
+ write_ctx.doing_sha = true;
+ sha1_init(&write_ctx.sha_ctx);
+ read_size = lte->resource_entry.original_size;
+ }
+
+ /* If the output resource is to be compressed, initialize the chunk
+ * table and set the function to use for chunk compression. Exceptions:
+ * no compression function is needed if doing a raw copy; also, no chunk
+ * table is needed if doing a *full* (not per-chunk) raw copy. */
+ write_ctx.compress = NULL;
+ write_ctx.chunk_tab = NULL;
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
+ if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW))
+ write_ctx.compress = get_compress_func(out_ctype);
+ if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW_FULL)) {
+ ret = begin_wim_resource_chunk_tab(lte, out_fd,
+ &write_ctx.chunk_tab,
+ resource_flags);
+ if (ret)
+ goto out;
+ }
+ }
+
+ /* If writing a pipable resource, write the stream header and update
+ * @res_start_offset to be the end of the stream header. */
+ if (resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
+ int reshdr_flags = 0;
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE)
+ reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
+ ret = write_pwm_stream_header(lte, out_fd, reshdr_flags);
+ if (ret)
+ goto out_free_chunk_tab;
+ res_start_offset = out_fd->offset;
+ }
+
+ /* Write the entire resource by reading the entire resource and feeding
+ * the data through the write_resource_cb function. */
+ write_ctx.out_fd = out_fd;
+ write_ctx.resource_flags = resource_flags;
+try_write_again:
+ ret = read_resource_prefix(lte, read_size,
+ write_resource_cb, &write_ctx, resource_flags);
+ if (ret)
+ goto out_free_chunk_tab;
+
+ /* Verify SHA1 message digest of the resource, or set the hash for the
+ * first time. */
+ if (write_ctx.doing_sha) {
+ ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte);
+ if (ret)
+ goto out_free_chunk_tab;
+ }
+
+ /* Write chunk table if needed. */
+ if (write_ctx.chunk_tab) {
+ ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab,
+ out_fd,
+ res_start_offset,
+ resource_flags);
+ if (ret)
+ goto out_free_chunk_tab;
+ }
+
+ /* Fill in out_res_entry with information about the newly written
+ * resource. */
+ out_res_entry->size = out_fd->offset - res_start_offset;
+ out_res_entry->flags = lte->resource_entry.flags;
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
+ out_res_entry->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
+ else
+ out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
+ out_res_entry->offset = res_start_offset;
+ out_res_entry->original_size = wim_resource_size(lte);
+
+ /* Check for resources compressed to greater than their original size
+ * and write them uncompressed instead. (But never do this if writing
+ * to a pipe, and don't bother if we did a raw copy.) */
+ if (out_res_entry->size > out_res_entry->original_size &&
+ !(resource_flags & (WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE |
+ WIMLIB_READ_RESOURCE_FLAG_RAW)))
+ {
+ DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
+ "writing uncompressed instead",
+ out_res_entry->original_size, out_res_entry->size);
+ ret = seek_and_truncate(out_fd, res_start_offset);
+ if (ret)
+ goto out_free_chunk_tab;
+ out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
+ FREE(write_ctx.chunk_tab);
+ write_ctx.compress = NULL;
+ write_ctx.chunk_tab = NULL;
+ write_ctx.doing_sha = false;
+ goto try_write_again;
+ }
+ if (resource_flags & (WIMLIB_READ_RESOURCE_FLAG_RAW)) {
+ DEBUG("Copied raw compressed data "
+ "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
+ out_res_entry->original_size, out_res_entry->size,
+ out_res_entry->offset, out_res_entry->flags);
+ } else if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
+ DEBUG("Wrote compressed resource "
+ "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
+ out_res_entry->original_size, out_res_entry->size,
+ out_res_entry->offset, out_res_entry->flags);
+ } else {
+ DEBUG("Wrote uncompressed resource "
+ "(%"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
+ out_res_entry->original_size,
+ out_res_entry->offset, out_res_entry->flags);
+ }
+ ret = 0;
+out_free_chunk_tab:
+ FREE(write_ctx.chunk_tab);
+out:
+ return ret;
+}
+
+/* Like write_wim_resource(), but the resource is specified by a buffer of
+ * uncompressed data rather a lookup table entry; also writes the SHA1 hash of
+ * the buffer to @hash_ret. */
+int
+write_wim_resource_from_buffer(const void *buf, size_t buf_size,
+ int reshdr_flags, struct filedes *out_fd,
+ int out_ctype,
+ struct resource_entry *out_res_entry,
+ u8 *hash_ret, int write_resource_flags)
+{
+ /* Set up a temporary lookup table entry to provide to
+ * write_wim_resource(). */
+ struct wim_lookup_table_entry lte;
+ int ret;
+
+ lte.resource_location = RESOURCE_IN_ATTACHED_BUFFER;
+ lte.attached_buffer = (void*)buf;
+ lte.resource_entry.original_size = buf_size;
+ lte.resource_entry.flags = reshdr_flags;
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
+ sha1_buffer(buf, buf_size, lte.hash);
+ lte.unhashed = 0;
+ } else {
+ lte.unhashed = 1;
+ }
+
+ ret = write_wim_resource(<e, out_fd, out_ctype, out_res_entry,
+ write_resource_flags);
+ if (ret)
+ return ret;
+ if (hash_ret)
+ copy_hash(hash_ret, lte.hash);
+ return 0;
+}
+
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+
+/* Blocking shared queue (solves the producer-consumer problem) */
+struct shared_queue {
+ unsigned size;
+ unsigned front;
+ unsigned back;
+ unsigned filled_slots;
+ void **array;
+ pthread_mutex_t lock;
+ pthread_cond_t msg_avail_cond;
+ pthread_cond_t space_avail_cond;
+};
+
+static int
+shared_queue_init(struct shared_queue *q, unsigned size)
+{
+ wimlib_assert(size != 0);
+ q->array = CALLOC(sizeof(q->array[0]), size);
+ if (!q->array)
+ goto err;
+ q->filled_slots = 0;
+ q->front = 0;
+ q->back = size - 1;
+ q->size = size;
+ if (pthread_mutex_init(&q->lock, NULL)) {
+ ERROR_WITH_ERRNO("Failed to initialize mutex");
+ goto err;
+ }
+ if (pthread_cond_init(&q->msg_avail_cond, NULL)) {
+ ERROR_WITH_ERRNO("Failed to initialize condition variable");
+ goto err_destroy_lock;
+ }
+ if (pthread_cond_init(&q->space_avail_cond, NULL)) {
+ ERROR_WITH_ERRNO("Failed to initialize condition variable");
+ goto err_destroy_msg_avail_cond;
+ }
+ return 0;
+err_destroy_msg_avail_cond:
+ pthread_cond_destroy(&q->msg_avail_cond);
+err_destroy_lock:
+ pthread_mutex_destroy(&q->lock);
+err:
+ return WIMLIB_ERR_NOMEM;
+}
+
+static void
+shared_queue_destroy(struct shared_queue *q)
+{
+ FREE(q->array);
+ pthread_mutex_destroy(&q->lock);
+ pthread_cond_destroy(&q->msg_avail_cond);
+ pthread_cond_destroy(&q->space_avail_cond);
+}
+
+static void
+shared_queue_put(struct shared_queue *q, void *obj)
+{
+ pthread_mutex_lock(&q->lock);
+ while (q->filled_slots == q->size)
+ pthread_cond_wait(&q->space_avail_cond, &q->lock);
+
+ q->back = (q->back + 1) % q->size;
+ q->array[q->back] = obj;
+ q->filled_slots++;
+
+ pthread_cond_broadcast(&q->msg_avail_cond);
+ pthread_mutex_unlock(&q->lock);
+}
+
+static void *
+shared_queue_get(struct shared_queue *q)
+{
+ void *obj;
+
+ pthread_mutex_lock(&q->lock);
+ while (q->filled_slots == 0)
+ pthread_cond_wait(&q->msg_avail_cond, &q->lock);
+
+ obj = q->array[q->front];
+ q->array[q->front] = NULL;
+ q->front = (q->front + 1) % q->size;
+ q->filled_slots--;
+
+ pthread_cond_broadcast(&q->space_avail_cond);
+ pthread_mutex_unlock(&q->lock);
+ return obj;
+}
+
+struct compressor_thread_params {
+ struct shared_queue *res_to_compress_queue;
+ struct shared_queue *compressed_res_queue;
+ compress_func_t compress;
+};
+
+#define MAX_CHUNKS_PER_MSG 2
+
+struct message {
+ struct wim_lookup_table_entry *lte;
+ u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
+ u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
+ unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
+ struct iovec out_chunks[MAX_CHUNKS_PER_MSG];
+ unsigned num_chunks;
+ struct list_head list;
+ bool complete;
+ u64 begin_chunk;
+};
+
+static void
+compress_chunks(struct message *msg, compress_func_t compress)
+{
+ for (unsigned i = 0; i < msg->num_chunks; i++) {
+ unsigned len = compress(msg->uncompressed_chunks[i],
+ msg->uncompressed_chunk_sizes[i],
+ msg->compressed_chunks[i]);
+ void *out_chunk;
+ unsigned out_len;
+ if (len) {
+ /* To be written compressed */
+ out_chunk = msg->compressed_chunks[i];
+ out_len = len;
+ } else {
+ /* To be written uncompressed */
+ out_chunk = msg->uncompressed_chunks[i];
+ out_len = msg->uncompressed_chunk_sizes[i];
+ }
+ msg->out_chunks[i].iov_base = out_chunk;
+ msg->out_chunks[i].iov_len = out_len;
+ }
+}
+
+/* Compressor thread routine. This is a lot simpler than the main thread
+ * routine: just repeatedly get a group of chunks from the
+ * res_to_compress_queue, compress them, and put them in the
+ * compressed_res_queue. A NULL pointer indicates that the thread should stop.
+ * */
+static void *
+compressor_thread_proc(void *arg)
+{
+ struct compressor_thread_params *params = arg;
+ struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
+ struct shared_queue *compressed_res_queue = params->compressed_res_queue;
+ compress_func_t compress = params->compress;
+ struct message *msg;
+
+ DEBUG("Compressor thread ready");
+ while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
+ compress_chunks(msg, compress);
+ shared_queue_put(compressed_res_queue, msg);
+ }
+ DEBUG("Compressor thread terminating");
+ return NULL;
+}
+#endif /* ENABLE_MULTITHREADED_COMPRESSION */
+
+struct write_streams_progress_data {
+ wimlib_progress_func_t progress_func;
+ union wimlib_progress_info progress;
+ uint64_t next_progress;
+ WIMStruct *prev_wim_part;
+};
+
+static void
+do_write_streams_progress(struct write_streams_progress_data *progress_data,
+ struct wim_lookup_table_entry *lte,
+ bool stream_discarded)
+{
+ union wimlib_progress_info *progress = &progress_data->progress;
+ bool new_wim_part;
+
+ if (stream_discarded) {
+ progress->write_streams.total_bytes -= wim_resource_size(lte);
+ if (progress_data->next_progress != ~(uint64_t)0 &&
+ progress_data->next_progress > progress->write_streams.total_bytes)
+ {
+ progress_data->next_progress = progress->write_streams.total_bytes;
+ }
+ } else {
+ progress->write_streams.completed_bytes += wim_resource_size(lte);
+ }
+ new_wim_part = false;
+ if (lte->resource_location == RESOURCE_IN_WIM &&
+ lte->wim != progress_data->prev_wim_part)
+ {
+ if (progress_data->prev_wim_part) {
+ new_wim_part = true;
+ progress->write_streams.completed_parts++;
+ }
+ progress_data->prev_wim_part = lte->wim;
+ }
+ progress->write_streams.completed_streams++;
+ if (progress_data->progress_func
+ && (progress->write_streams.completed_bytes >= progress_data->next_progress
+ || new_wim_part))
+ {
+ progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+ progress);
+ if (progress_data->next_progress == progress->write_streams.total_bytes) {
+ progress_data->next_progress = ~(uint64_t)0;
+ } else {
+ progress_data->next_progress =
+ min(progress->write_streams.total_bytes,
+ progress->write_streams.completed_bytes +
+ progress->write_streams.total_bytes / 100);
+ }
+ }
+}
+
+struct serial_write_stream_ctx {
+ struct filedes *out_fd;
+ int out_ctype;
+ int write_resource_flags;
+};
+
+static int
+serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx)
+{
+ struct serial_write_stream_ctx *ctx = _ctx;
+ return write_wim_resource(lte, ctx->out_fd,
+ ctx->out_ctype, <e->output_resource_entry,
+ ctx->write_resource_flags);
+}
+
+
+/* Write a list of streams, taking into account that some streams may be
+ * duplicates that are checksummed and discarded on the fly, and also delegating
+ * the actual writing of a stream to a function @write_stream_cb, which is
+ * passed the context @write_stream_ctx. */
+static int
+do_write_stream_list(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ int (*write_stream_cb)(struct wim_lookup_table_entry *, void *),
+ void *write_stream_ctx,
+ struct write_streams_progress_data *progress_data)
+{
+ int ret = 0;
+ struct wim_lookup_table_entry *lte;
+ bool stream_discarded;
+
+ /* For each stream in @stream_list ... */
+ while (!list_empty(stream_list)) {
+ stream_discarded = false;
+ lte = container_of(stream_list->next,
+ struct wim_lookup_table_entry,
+ write_streams_list);
+ list_del(<e->write_streams_list);
+ if (lte->unhashed && !lte->unique_size) {
+ /* Unhashed stream that shares a size with some other
+ * stream in the WIM we are writing. The stream must be
+ * checksummed to know if we need to write it or not. */
+ struct wim_lookup_table_entry *tmp;
+ u32 orig_refcnt = lte->out_refcnt;
+
+ ret = hash_unhashed_stream(lte, lookup_table, &tmp);
+ if (ret)
+ break;
+ if (tmp != lte) {
+ lte = tmp;
+ /* We found a duplicate stream. */
+ if (orig_refcnt != tmp->out_refcnt) {
+ /* We have already written, or are going
+ * to write, the duplicate stream. So
+ * just skip to the next stream. */
+ DEBUG("Discarding duplicate stream of length %"PRIu64,
+ wim_resource_size(lte));
+ lte->no_progress = 0;
+ stream_discarded = true;
+ goto skip_to_progress;
+ }
+ }
+ }
+
+ /* Here, @lte is either a hashed stream or an unhashed stream
+ * with a unique size. In either case we know that the stream
+ * has to be written. In either case the SHA1 message digest
+ * will be calculated over the stream while writing it; however,
+ * in the former case this is done merely to check the data,
+ * while in the latter case this is done because we do not have
+ * the SHA1 message digest yet. */
+ wimlib_assert(lte->out_refcnt != 0);
+ lte->deferred = 0;
+ lte->no_progress = 0;
+ ret = (*write_stream_cb)(lte, write_stream_ctx);
+ if (ret)
+ break;
+ /* In parallel mode, some streams are deferred for later,
+ * serialized processing; ignore them here. */
+ if (lte->deferred)
+ continue;
+ if (lte->unhashed) {
+ list_del(<e->unhashed_list);
+ lookup_table_insert(lookup_table, lte);
+ lte->unhashed = 0;
+ }
+ skip_to_progress:
+ if (!lte->no_progress) {
+ do_write_streams_progress(progress_data,
+ lte, stream_discarded);
+ }
+ }
+ return ret;
+}
+
+static int
+do_write_stream_list_serial(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ struct filedes *out_fd,
+ int out_ctype,
+ int write_resource_flags,
+ struct write_streams_progress_data *progress_data)
+{
+ struct serial_write_stream_ctx ctx = {
+ .out_fd = out_fd,
+ .out_ctype = out_ctype,
+ .write_resource_flags = write_resource_flags,
+ };
+ return do_write_stream_list(stream_list,
+ lookup_table,
+ serial_write_stream,
+ &ctx,
+ progress_data);
+}
+
+static inline int
+write_flags_to_resource_flags(int write_flags)
+{
+ int resource_flags = 0;
+
+ if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+ resource_flags |= WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS;
+ if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
+ resource_flags |= WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE;
+ return resource_flags;
+}
+
+static int
+write_stream_list_serial(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ struct filedes *out_fd,
+ int out_ctype,
+ int write_resource_flags,
+ struct write_streams_progress_data *progress_data)
+{
+ union wimlib_progress_info *progress = &progress_data->progress;
+ DEBUG("Writing stream list of size %"PRIu64" (serial version)",
+ progress->write_streams.total_streams);
+ progress->write_streams.num_threads = 1;
+ if (progress_data->progress_func) {
+ progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+ progress);
+ }
+ return do_write_stream_list_serial(stream_list,
+ lookup_table,
+ out_fd,
+ out_ctype,
+ write_resource_flags,
+ progress_data);
+}
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+static int
+write_wim_chunks(struct message *msg, struct filedes *out_fd,
+ struct chunk_table *chunk_tab,
+ int write_resource_flags)
+{
+ struct iovec *vecs;
+ struct pwm_chunk_hdr *chunk_hdrs;
+ unsigned nvecs;
+ int ret;
+
+ for (unsigned i = 0; i < msg->num_chunks; i++)
+ chunk_tab_record_chunk(chunk_tab, msg->out_chunks[i].iov_len);
+
+ if (!(write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) {
+ nvecs = msg->num_chunks;
+ vecs = msg->out_chunks;
+ } else {
+ /* Special case: If writing a compressed resource to a pipable
+ * WIM, prefix each compressed chunk with a header that gives
+ * its compressed size. */
+ nvecs = msg->num_chunks * 2;
+ vecs = alloca(nvecs * sizeof(vecs[0]));
+ chunk_hdrs = alloca(msg->num_chunks * sizeof(chunk_hdrs[0]));
+
+ for (unsigned i = 0; i < msg->num_chunks; i++) {
+ chunk_hdrs[i].compressed_size = cpu_to_le32(msg->out_chunks[i].iov_len);
+ vecs[i * 2].iov_base = &chunk_hdrs[i];
+ vecs[i * 2].iov_len = sizeof(chunk_hdrs[i]);
+ vecs[i * 2 + 1].iov_base = msg->out_chunks[i].iov_base;
+ vecs[i * 2 + 1].iov_len = msg->out_chunks[i].iov_len;
+ }
+ }
+ ret = full_writev(out_fd, vecs, nvecs);
+ if (ret)
+ ERROR_WITH_ERRNO("Failed to write WIM chunks");
+ return ret;
+}
+
+struct main_writer_thread_ctx {
+ struct list_head *stream_list;
+ struct wim_lookup_table *lookup_table;
+ struct filedes *out_fd;
+ off_t res_start_offset;
+ int out_ctype;
+ int write_resource_flags;
+ struct shared_queue *res_to_compress_queue;
+ struct shared_queue *compressed_res_queue;
+ size_t num_messages;
+ struct write_streams_progress_data *progress_data;
+
+ struct list_head available_msgs;
+ struct list_head outstanding_streams;
+ struct list_head serial_streams;
+ size_t num_outstanding_messages;
+
+ SHA_CTX next_sha_ctx;
+ u64 next_chunk;
+ u64 next_num_chunks;
+ struct wim_lookup_table_entry *next_lte;
+
+ struct message *msgs;
+ struct message *next_msg;
+ struct chunk_table *cur_chunk_tab;
+};
+
+static int
+init_message(struct message *msg)
+{
+ for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
+ msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
+ msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
+ if (msg->compressed_chunks[i] == NULL ||
+ msg->uncompressed_chunks[i] == NULL)
+ return WIMLIB_ERR_NOMEM;
+ }
+ return 0;
+}
+
+static void
+destroy_message(struct message *msg)
+{
+ for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
+ FREE(msg->compressed_chunks[i]);
+ FREE(msg->uncompressed_chunks[i]);
+ }
+}
+
+static void
+free_messages(struct message *msgs, size_t num_messages)
+{
+ if (msgs) {
+ for (size_t i = 0; i < num_messages; i++)
+ destroy_message(&msgs[i]);
+ FREE(msgs);
+ }
+}
+
+static struct message *
+allocate_messages(size_t num_messages)
+{
+ struct message *msgs;
+
+ msgs = CALLOC(num_messages, sizeof(struct message));
+ if (!msgs)
+ return NULL;
+ for (size_t i = 0; i < num_messages; i++) {
+ if (init_message(&msgs[i])) {
+ free_messages(msgs, num_messages);
+ return NULL;
+ }
+ }
+ return msgs;
+}
+
+static void
+main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx)
+{
+ while (ctx->num_outstanding_messages--)
+ shared_queue_get(ctx->compressed_res_queue);
+ free_messages(ctx->msgs, ctx->num_messages);
+ FREE(ctx->cur_chunk_tab);
+}
+
+static int
+main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx)
+{
+ /* Pre-allocate all the buffers that will be needed to do the chunk
+ * compression. */
+ ctx->msgs = allocate_messages(ctx->num_messages);
+ if (!ctx->msgs)
+ return WIMLIB_ERR_NOMEM;
+
+ /* Initially, all the messages are available to use. */
+ INIT_LIST_HEAD(&ctx->available_msgs);
+ for (size_t i = 0; i < ctx->num_messages; i++)
+ list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs);
+
+ /* outstanding_streams is the list of streams that currently have had
+ * chunks sent off for compression.
+ *
+ * The first stream in outstanding_streams is the stream that is
+ * currently being written.
+ *
+ * The last stream in outstanding_streams is the stream that is
+ * currently being read and having chunks fed to the compressor threads.
+ * */
+ INIT_LIST_HEAD(&ctx->outstanding_streams);
+ ctx->num_outstanding_messages = 0;
+
+ ctx->next_msg = NULL;
+
+ /* Resources that don't need any chunks compressed are added to this
+ * list and written directly by the main thread. */
+ INIT_LIST_HEAD(&ctx->serial_streams);
+
+ ctx->cur_chunk_tab = NULL;
+
+ return 0;
+}
+
+static int
+receive_compressed_chunks(struct main_writer_thread_ctx *ctx)
+{
+ struct message *msg;
+ struct wim_lookup_table_entry *cur_lte;
+ int ret;
+
+ wimlib_assert(!list_empty(&ctx->outstanding_streams));
+ wimlib_assert(ctx->num_outstanding_messages != 0);
+
+ cur_lte = container_of(ctx->outstanding_streams.next,
+ struct wim_lookup_table_entry,
+ being_compressed_list);
+
+ /* Get the next message from the queue and process it.
+ * The message will contain 1 or more data chunks that have been
+ * compressed. */
+ msg = shared_queue_get(ctx->compressed_res_queue);
+ msg->complete = true;
+ --ctx->num_outstanding_messages;
+
+ /* Is this the next chunk in the current resource? If it's not
+ * (i.e., an earlier chunk in a same or different resource
+ * hasn't been compressed yet), do nothing, and keep this
+ * message around until all earlier chunks are received.
+ *
+ * Otherwise, write all the chunks we can. */
+ while (cur_lte != NULL &&
+ !list_empty(&cur_lte->msg_list)
+ && (msg = container_of(cur_lte->msg_list.next,
+ struct message,
+ list))->complete)
+ {
+ list_move(&msg->list, &ctx->available_msgs);
+ if (msg->begin_chunk == 0) {
+ /* First set of chunks. */
+
+ /* Write pipable WIM stream header if needed. */
+ if (ctx->write_resource_flags &
+ WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)
+ {
+ ret = write_pwm_stream_header(cur_lte, ctx->out_fd,
+ WIM_RESHDR_FLAG_COMPRESSED);
+ if (ret)
+ return ret;
+ }
+
+ /* Save current offset. */
+ ctx->res_start_offset = ctx->out_fd->offset;
+
+ /* Begin building the chunk table, and leave space for
+ * it if needed. */
+ ret = begin_wim_resource_chunk_tab(cur_lte,
+ ctx->out_fd,
+ &ctx->cur_chunk_tab,
+ ctx->write_resource_flags);
+ if (ret)
+ return ret;
+
+ }
+
+ /* Write the compressed chunks from the message. */
+ ret = write_wim_chunks(msg, ctx->out_fd, ctx->cur_chunk_tab,
+ ctx->write_resource_flags);
+ if (ret)
+ return ret;
+
+ /* Was this the last chunk of the stream? If so, finish
+ * it. */
+ if (list_empty(&cur_lte->msg_list) &&
+ msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
+ {
+ u64 res_csize;
+
+ ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
+ ctx->out_fd,
+ ctx->res_start_offset,
+ ctx->write_resource_flags);
+ if (ret)
+ return ret;
+
+ list_del(&cur_lte->being_compressed_list);
+
+ res_csize = ctx->out_fd->offset - ctx->res_start_offset;
+
+ FREE(ctx->cur_chunk_tab);
+ ctx->cur_chunk_tab = NULL;
+
+ /* Check for resources compressed to greater than or
+ * equal to their original size and write them
+ * uncompressed instead. (But never do this if writing
+ * to a pipe.) */
+ if (res_csize >= wim_resource_size(cur_lte) &&
+ !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE))
+ {
+ DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
+ "writing uncompressed instead",
+ wim_resource_size(cur_lte), res_csize);
+ ret = seek_and_truncate(ctx->out_fd, ctx->res_start_offset);
+ if (ret)
+ return ret;
+ ret = write_wim_resource(cur_lte,
+ ctx->out_fd,
+ WIMLIB_COMPRESSION_TYPE_NONE,
+ &cur_lte->output_resource_entry,
+ ctx->write_resource_flags);
+ if (ret)
+ return ret;
+ } else {
+ cur_lte->output_resource_entry.size =
+ res_csize;
+
+ cur_lte->output_resource_entry.original_size =
+ cur_lte->resource_entry.original_size;
+
+ cur_lte->output_resource_entry.offset =
+ ctx->res_start_offset;
+
+ cur_lte->output_resource_entry.flags =
+ cur_lte->resource_entry.flags |
+ WIM_RESHDR_FLAG_COMPRESSED;
+
+ DEBUG("Wrote compressed resource "
+ "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
+ cur_lte->output_resource_entry.original_size,
+ cur_lte->output_resource_entry.size,
+ cur_lte->output_resource_entry.offset,
+ cur_lte->output_resource_entry.flags);
+ }
+
+ do_write_streams_progress(ctx->progress_data,
+ cur_lte, false);
+
+ /* Since we just finished writing a stream, write any
+ * streams that have been added to the serial_streams
+ * list for direct writing by the main thread (e.g.
+ * resources that don't need to be compressed because
+ * the desired compression type is the same as the
+ * previous compression type). */
+ if (!list_empty(&ctx->serial_streams)) {
+ ret = do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fd,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_data);
+ if (ret)
+ return ret;
+ }
+
+ /* Advance to the next stream to write. */
+ if (list_empty(&ctx->outstanding_streams)) {
+ cur_lte = NULL;
+ } else {
+ cur_lte = container_of(ctx->outstanding_streams.next,
+ struct wim_lookup_table_entry,
+ being_compressed_list);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Called when the main thread has read a new chunk of data. */
+static int
+main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+ struct message *next_msg;
+ u64 next_chunk_in_msg;
+
+ /* Update SHA1 message digest for the stream currently being read by the
+ * main thread. */
+ sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
+
+ /* We send chunks of data to the compressor chunks in batches which we
+ * refer to as "messages". @next_msg is the message that is currently
+ * being prepared to send off. If it is NULL, that indicates that we
+ * need to start a new message. */
+ next_msg = ctx->next_msg;
+ if (!next_msg) {
+ /* We need to start a new message. First check to see if there
+ * is a message available in the list of available messages. If
+ * so, we can just take one. If not, all the messages (there is
+ * a fixed number of them, proportional to the number of
+ * threads) have been sent off to the compressor threads, so we
+ * receive messages from the compressor threads containing
+ * compressed chunks of data.
+ *
+ * We may need to receive multiple messages before one is
+ * actually available to use because messages received that are
+ * *not* for the very next set of chunks to compress must be
+ * buffered until it's time to write those chunks. */
+ while (list_empty(&ctx->available_msgs)) {
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ return ret;
+ }
+
+ next_msg = container_of(ctx->available_msgs.next,
+ struct message, list);
+ list_del(&next_msg->list);
+ next_msg->complete = false;
+ next_msg->begin_chunk = ctx->next_chunk;
+ next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
+ ctx->next_num_chunks - ctx->next_chunk);
+ ctx->next_msg = next_msg;
+ }
+
+ /* Fill in the next chunk to compress */
+ next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
+
+ next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
+ memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
+ chunk, chunk_size);
+ ctx->next_chunk++;
+ if (++next_chunk_in_msg == next_msg->num_chunks) {
+ /* Send off an array of chunks to compress */
+ list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
+ shared_queue_put(ctx->res_to_compress_queue, next_msg);
+ ++ctx->num_outstanding_messages;
+ ctx->next_msg = NULL;
+ }
+ return 0;
+}
+
+static int
+main_writer_thread_finish(void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+ while (ctx->num_outstanding_messages != 0) {
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ return ret;
+ }
+ wimlib_assert(list_empty(&ctx->outstanding_streams));
+ return do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fd,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_data);
+}
+
+static int
+submit_stream_for_compression(struct wim_lookup_table_entry *lte,
+ struct main_writer_thread_ctx *ctx)
+{
+ int ret;
+
+ /* Read the entire stream @lte, feeding its data chunks to the
+ * compressor threads. Also SHA1-sum the stream; this is required in
+ * the case that @lte is unhashed, and a nice additional verification
+ * when @lte is already hashed. */
+ sha1_init(&ctx->next_sha_ctx);
+ ctx->next_chunk = 0;
+ ctx->next_num_chunks = wim_resource_chunks(lte);
+ ctx->next_lte = lte;
+ INIT_LIST_HEAD(<e->msg_list);
+ list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams);
+ ret = read_resource_prefix(lte, wim_resource_size(lte),
+ main_writer_thread_cb, ctx, 0);
+ if (ret)
+ return ret;
+ wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
+ return finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
+}
+
+static int
+main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+
+ if (wim_resource_size(lte) < 1000 ||
+ ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
+ (lte->resource_location == RESOURCE_IN_WIM &&
+ !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS) &&
+ lte->wim->compression_type == ctx->out_ctype))
+ {
+ /* Stream is too small or isn't being compressed. Process it by
+ * the main thread when we have a chance. We can't necessarily
+ * process it right here, as the main thread could be in the
+ * middle of writing a different stream. */
+ list_add_tail(<e->write_streams_list, &ctx->serial_streams);
+ lte->deferred = 1;
+ ret = 0;
+ } else {
+ ret = submit_stream_for_compression(lte, ctx);
+ }
+ lte->no_progress = 1;
+ return ret;
+}
+
+static long
+get_default_num_threads(void)
+{
+#ifdef __WIN32__
+ return win32_get_number_of_processors();
+#else
+ return sysconf(_SC_NPROCESSORS_ONLN);
+#endif
+}
+
+/* Equivalent to write_stream_list_serial(), except this takes a @num_threads
+ * parameter and will perform compression using that many threads. Falls
+ * back to write_stream_list_serial() on certain errors, such as a failure to
+ * create the number of threads requested.
+ *
+ * High level description of the algorithm for writing compressed streams in
+ * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes
+ * rather than on full files. The currently executing thread becomes the main
+ * thread and is entirely in charge of reading the data to compress (which may
+ * be in any location understood by the resource code--- such as in an external
+ * file being captured, or in another WIM file from which an image is being
+ * exported) and actually writing the compressed data to the output file.
+ * Additional threads are "compressor threads" and all execute the
+ * compressor_thread_proc, where they repeatedly retrieve buffers of data from
+ * the main thread, compress them, and hand them back to the main thread.
+ *
+ * Certain streams, such as streams that do not need to be compressed (e.g.
+ * input compression type same as output compression type) or streams of very
+ * small size are placed in a list (main_writer_thread_ctx.serial_list) and
+ * handled entirely by the main thread at an appropriate time.
+ *
+ * At any given point in time, multiple streams may be having chunks compressed
+ * concurrently. The stream that the main thread is currently *reading* may be
+ * later in the list that the stream that the main thread is currently
+ * *writing*.
+ */
+static int
+write_stream_list_parallel(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ struct filedes *out_fd,
+ int out_ctype,
+ int write_resource_flags,
+ struct write_streams_progress_data *progress_data,
+ unsigned num_threads)
+{
+ int ret;
+ struct shared_queue res_to_compress_queue;
+ struct shared_queue compressed_res_queue;
+ pthread_t *compressor_threads = NULL;
+ union wimlib_progress_info *progress = &progress_data->progress;
+
+ if (num_threads == 0) {
+ long nthreads = get_default_num_threads();
+ if (nthreads < 1 || nthreads > UINT_MAX) {
+ WARNING("Could not determine number of processors! Assuming 1");
+ goto out_serial;
+ } else if (nthreads == 1) {
+ goto out_serial_quiet;
+ } else {
+ num_threads = nthreads;
+ }
+ }
+
+ DEBUG("Writing stream list of size %"PRIu64" "
+ "(parallel version, num_threads=%u)",
+ progress->write_streams.total_streams, num_threads);
+
+ progress->write_streams.num_threads = num_threads;
+
+ static const size_t MESSAGES_PER_THREAD = 2;
+ size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
+
+ DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
+
+ ret = shared_queue_init(&res_to_compress_queue, queue_size);
+ if (ret)
+ goto out_serial;
+
+ ret = shared_queue_init(&compressed_res_queue, queue_size);
+ if (ret)
+ goto out_destroy_res_to_compress_queue;
+
+ struct compressor_thread_params params;
+ params.res_to_compress_queue = &res_to_compress_queue;
+ params.compressed_res_queue = &compressed_res_queue;
+ params.compress = get_compress_func(out_ctype);
+
+ compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
+ if (!compressor_threads) {
+ ret = WIMLIB_ERR_NOMEM;
+ goto out_destroy_compressed_res_queue;
+ }
+
+ for (unsigned i = 0; i < num_threads; i++) {
+ DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
+ ret = pthread_create(&compressor_threads[i], NULL,
+ compressor_thread_proc, ¶ms);
+ if (ret != 0) {
+ ret = -1;
+ ERROR_WITH_ERRNO("Failed to create compressor "
+ "thread %u of %u",
+ i + 1, num_threads);
+ num_threads = i;
+ goto out_join;
+ }
+ }
+
+ if (progress_data->progress_func) {
+ progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+ progress);
+ }
+
+ struct main_writer_thread_ctx ctx;
+ ctx.stream_list = stream_list;
+ ctx.lookup_table = lookup_table;
+ ctx.out_fd = out_fd;
+ ctx.out_ctype = out_ctype;
+ ctx.res_to_compress_queue = &res_to_compress_queue;
+ ctx.compressed_res_queue = &compressed_res_queue;
+ ctx.num_messages = queue_size;
+ ctx.write_resource_flags = write_resource_flags;
+ ctx.progress_data = progress_data;
+ ret = main_writer_thread_init_ctx(&ctx);
+ if (ret)
+ goto out_join;
+ ret = do_write_stream_list(stream_list, lookup_table,
+ main_thread_process_next_stream,
+ &ctx, progress_data);
+ if (ret)
+ goto out_destroy_ctx;
+
+ /* The main thread has finished reading all streams that are going to be
+ * compressed in parallel, and it now needs to wait for all remaining
+ * chunks to be compressed so that the remaining streams can actually be
+ * written to the output file. Furthermore, any remaining streams that
+ * had processing deferred to the main thread need to be handled. These
+ * tasks are done by the main_writer_thread_finish() function. */
+ ret = main_writer_thread_finish(&ctx);
+out_destroy_ctx:
+ main_writer_thread_destroy_ctx(&ctx);
+out_join:
+ for (unsigned i = 0; i < num_threads; i++)
+ shared_queue_put(&res_to_compress_queue, NULL);
+
+ for (unsigned i = 0; i < num_threads; i++) {
+ if (pthread_join(compressor_threads[i], NULL)) {
+ WARNING_WITH_ERRNO("Failed to join compressor "
+ "thread %u of %u",
+ i + 1, num_threads);
+ }
+ }
+ FREE(compressor_threads);
+out_destroy_compressed_res_queue:
+ shared_queue_destroy(&compressed_res_queue);
+out_destroy_res_to_compress_queue:
+ shared_queue_destroy(&res_to_compress_queue);
+ if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
+ return ret;
+out_serial:
+ WARNING("Falling back to single-threaded compression");
+out_serial_quiet:
+ return write_stream_list_serial(stream_list,
+ lookup_table,
+ out_fd,
+ out_ctype,
+ write_resource_flags,
+ progress_data);
+
+}
+#endif
+
+/*
+ * Write a list of streams to a WIM (@out_fd) using the compression type
+ * @out_ctype and up to @num_threads compressor threads.
+ */
+static int
+write_stream_list(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ struct filedes *out_fd, int out_ctype, int write_flags,
+ unsigned num_threads, wimlib_progress_func_t progress_func)
+{
+ struct wim_lookup_table_entry *lte;
+ size_t num_streams = 0;
+ u64 total_bytes = 0;
+ u64 total_compression_bytes = 0;
+ struct write_streams_progress_data progress_data;
+ int ret;
+ int write_resource_flags;
+ unsigned total_parts = 0;
+ WIMStruct *prev_wim_part = NULL;
+
+ if (list_empty(stream_list))
+ return 0;
+
+ write_resource_flags = write_flags_to_resource_flags(write_flags);
+
+ DEBUG("write_resource_flags=0x%08x", write_resource_flags);
+
+ sort_stream_list_by_sequential_order(stream_list,
+ offsetof(struct wim_lookup_table_entry,
+ write_streams_list));
+
+ /* Calculate the total size of the streams to be written. Note: this
+ * will be the uncompressed size, as we may not know the compressed size
+ * yet, and also this will assume that every unhashed stream will be
+ * written (which will not necessarily be the case). */
+ list_for_each_entry(lte, stream_list, write_streams_list) {
+ num_streams++;
+ total_bytes += wim_resource_size(lte);
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
+ && (wim_resource_compression_type(lte) != out_ctype ||
+ (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS)))
+ {
+ total_compression_bytes += wim_resource_size(lte);
+ }
+ if (lte->resource_location == RESOURCE_IN_WIM) {
+ if (prev_wim_part != lte->wim) {
+ prev_wim_part = lte->wim;
+ total_parts++;
+ }
+ }
+
+ }
+
+ memset(&progress_data, 0, sizeof(progress_data));
+ progress_data.progress_func = progress_func;
+
+ progress_data.progress.write_streams.total_bytes = total_bytes;
+ progress_data.progress.write_streams.total_streams = num_streams;
+ progress_data.progress.write_streams.completed_bytes = 0;
+ progress_data.progress.write_streams.completed_streams = 0;
+ progress_data.progress.write_streams.num_threads = num_threads;
+ progress_data.progress.write_streams.compression_type = out_ctype;
+ progress_data.progress.write_streams.total_parts = total_parts;
+ progress_data.progress.write_streams.completed_parts = 0;
+
+ progress_data.next_progress = 0;
+ progress_data.prev_wim_part = NULL;
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+ if (total_compression_bytes >= 2000000 && num_threads != 1)
+ ret = write_stream_list_parallel(stream_list,
+ lookup_table,
+ out_fd,
+ out_ctype,
+ write_resource_flags,
+ &progress_data,
+ num_threads);
+ else
+#endif
+ ret = write_stream_list_serial(stream_list,
+ lookup_table,
+ out_fd,
+ out_ctype,
+ write_resource_flags,
+ &progress_data);
+ if (ret == 0)
+ DEBUG("Successfully wrote stream list.");
+ else
+ DEBUG("Failed to write stream list.");
+ return ret;
+}
+
+struct stream_size_table {
+ struct hlist_head *array;
+ size_t num_entries;
+ size_t capacity;
+};
+
+static int
+init_stream_size_table(struct stream_size_table *tab, size_t capacity)