+#include <errno.h>
+
+#ifdef WITH_NTFS_3G
+# include <time.h>
+# include <ntfs-3g/attrib.h>
+# include <ntfs-3g/inode.h>
+# include <ntfs-3g/dir.h>
+#endif
+
+#ifdef HAVE_ALLOCA_H
+# include <alloca.h>
+#else
+# include <stdlib.h>
+#endif
+
+#include <limits.h>
+
+#if defined(__WIN32__) && !defined(INVALID_HANDLE_VALUE)
+# define INVALID_HANDLE_VALUE ((HANDLE)(-1))
+#endif
+
+/* Chunk table that's located at the beginning of each compressed resource in
+ * the WIM. (This is not the on-disk format; the on-disk format just has an
+ * array of offsets.) */
+struct chunk_table {
+ off_t file_offset;
+ u64 num_chunks;
+ u64 original_resource_size;
+ u64 bytes_per_chunk_entry;
+ u64 table_disk_size;
+ u64 cur_offset;
+ u64 *cur_offset_p;
+ u64 offsets[0];
+};
+
+/*
+ * Allocates and initializes a chunk table, and reserves space for it in the
+ * output file.
+ */
+static int
+begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
+ FILE *out_fp,
+ off_t file_offset,
+ struct chunk_table **chunk_tab_ret)
+{
+ u64 size = wim_resource_size(lte);
+ u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE;
+ size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
+ struct chunk_table *chunk_tab = CALLOC(1, alloc_size);
+ int ret;
+
+ if (!chunk_tab) {
+ ERROR("Failed to allocate chunk table for %"PRIu64" byte "
+ "resource", size);
+ ret = WIMLIB_ERR_NOMEM;
+ goto out;
+ }
+ chunk_tab->file_offset = file_offset;
+ chunk_tab->num_chunks = num_chunks;
+ chunk_tab->original_resource_size = size;
+ chunk_tab->bytes_per_chunk_entry = (size >= (1ULL << 32)) ? 8 : 4;
+ chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
+ (num_chunks - 1);
+ chunk_tab->cur_offset = 0;
+ chunk_tab->cur_offset_p = chunk_tab->offsets;
+
+ if (fwrite(chunk_tab, 1, chunk_tab->table_disk_size, out_fp) !=
+ chunk_tab->table_disk_size) {
+ ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
+ "file resource");
+ FREE(chunk_tab);
+ ret = WIMLIB_ERR_WRITE;
+ goto out;
+ }
+
+ ret = 0;
+ *chunk_tab_ret = chunk_tab;
+out:
+ return ret;
+}
+
+/*
+ * compress_func_t- Pointer to a function to compresses a chunk
+ * of a WIM resource. This may be either
+ * wimlib_xpress_compress() (xpress-compress.c) or
+ * wimlib_lzx_compress() (lzx-compress.c).
+ *
+ * @chunk: Uncompressed data of the chunk.
+ * @chunk_size: Size of the uncompressed chunk, in bytes.
+ * @out: Pointer to output buffer of size at least (@chunk_size - 1) bytes.
+ *
+ * Returns the size of the compressed data written to @out in bytes, or 0 if the
+ * data could not be compressed to (@chunk_size - 1) bytes or fewer.
+ *
+ * As a special requirement, the compression code is optimized for the WIM
+ * format and therefore requires (@chunk_size <= 32768).
+ *
+ * As another special requirement, the compression code will read up to 8 bytes
+ * off the end of the @chunk array for performance reasons. The values of these
+ * bytes will not affect the output of the compression, but the calling code
+ * must make sure that the buffer holding the uncompressed chunk is actually at
+ * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
+ * mapped memory that will not cause a memory access violation if accessed.
+ */
+typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
+ void *out);
+
+compress_func_t
+get_compress_func(int out_ctype)
+{
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
+ return wimlib_lzx_compress;
+ else
+ return wimlib_xpress_compress;
+}
+
+/*
+ * Writes a chunk of a WIM resource to an output file.
+ *
+ * @chunk: Uncompressed data of the chunk.
+ * @chunk_size: Size of the chunk (<= WIM_CHUNK_SIZE)
+ * @out_fp: FILE * to write the chunk to.
+ * @compress: Compression function to use (NULL if writing uncompressed
+ * data).
+ * @chunk_tab: Pointer to chunk table being created. It is updated with the
+ * offset of the chunk we write.
+ *
+ * Returns 0 on success; nonzero on failure.
+ */
+static int
+write_wim_resource_chunk(const void *chunk, unsigned chunk_size,
+ FILE *out_fp, compress_func_t compress,
+ struct chunk_table *chunk_tab)
+{
+ const u8 *out_chunk;
+ unsigned out_chunk_size;
+ if (compress) {
+ u8 *compressed_chunk = alloca(chunk_size);
+
+ out_chunk_size = compress(chunk, chunk_size, compressed_chunk);
+ if (out_chunk_size) {
+ /* Write compressed */
+ out_chunk = compressed_chunk;
+ } else {
+ /* Write uncompressed */
+ out_chunk = chunk;
+ out_chunk_size = chunk_size;
+ }
+ *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
+ chunk_tab->cur_offset += out_chunk_size;
+ } else {
+ /* Write uncompressed */
+ out_chunk = chunk;
+ out_chunk_size = chunk_size;
+ }
+ if (fwrite(out_chunk, 1, out_chunk_size, out_fp) != out_chunk_size) {
+ ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
+ return WIMLIB_ERR_WRITE;
+ }
+ return 0;
+}
+
+/*
+ * Finishes a WIM chunk table and writes it to the output file at the correct
+ * offset.
+ *
+ * The final size of the full compressed resource is returned in the
+ * @compressed_size_p.
+ */
+static int
+finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
+ FILE *out_fp, u64 *compressed_size_p)
+{
+ size_t bytes_written;
+ if (fseeko(out_fp, chunk_tab->file_offset, SEEK_SET) != 0) {
+ ERROR_WITH_ERRNO("Failed to seek to byte %"PRIu64" of output "
+ "WIM file", chunk_tab->file_offset);
+ return WIMLIB_ERR_WRITE;
+ }
+
+ if (chunk_tab->bytes_per_chunk_entry == 8) {
+ array_cpu_to_le64(chunk_tab->offsets, chunk_tab->num_chunks);
+ } else {
+ for (u64 i = 0; i < chunk_tab->num_chunks; i++)
+ ((u32*)chunk_tab->offsets)[i] =
+ cpu_to_le32(chunk_tab->offsets[i]);
+ }
+ bytes_written = fwrite((u8*)chunk_tab->offsets +
+ chunk_tab->bytes_per_chunk_entry,
+ 1, chunk_tab->table_disk_size, out_fp);
+ if (bytes_written != chunk_tab->table_disk_size) {
+ ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
+ "file resource");
+ return WIMLIB_ERR_WRITE;
+ }
+ if (fseeko(out_fp, 0, SEEK_END) != 0) {
+ ERROR_WITH_ERRNO("Failed to seek to end of output WIM file");
+ return WIMLIB_ERR_WRITE;
+ }
+ *compressed_size_p = chunk_tab->cur_offset + chunk_tab->table_disk_size;
+ return 0;
+}
+
+static int
+finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte)
+{
+ u8 md[SHA1_HASH_SIZE];
+ sha1_final(md, sha_ctx);
+ if (lte->unhashed) {
+ copy_hash(lte->hash, md);
+ } else if (!hashes_equal(md, lte->hash)) {
+ ERROR("WIM resource has incorrect hash!");
+ if (lte_filename_valid(lte)) {
+ ERROR("We were reading it from \"%"TS"\"; maybe "
+ "it changed while we were reading it.",
+ lte->file_on_disk);
+ }
+ return WIMLIB_ERR_INVALID_RESOURCE_HASH;
+ }
+ return 0;
+}
+
+
+struct write_resource_ctx {
+ compress_func_t compress;
+ struct chunk_table *chunk_tab;
+ FILE *out_fp;
+ SHA_CTX sha_ctx;
+ bool doing_sha;
+};
+
+static int
+write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx)
+{
+ struct write_resource_ctx *ctx = _ctx;
+
+ if (ctx->doing_sha)
+ sha1_update(&ctx->sha_ctx, chunk, chunk_size);
+
+ if (ctx->compress) {
+ return write_wim_resource_chunk(chunk, chunk_size,
+ ctx->out_fp, ctx->compress,
+ ctx->chunk_tab);
+ } else {
+ if (fwrite(chunk, 1, chunk_size, ctx->out_fp) != chunk_size) {
+ ERROR_WITH_ERRNO("Error writing to output WIM");
+ return WIMLIB_ERR_WRITE;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/*
+ * Write a resource to an output WIM.
+ *
+ * @lte: Lookup table entry for the resource, which could be in another WIM,
+ * in an external file, or in another location.
+ *
+ * @out_fp: FILE * opened to the output WIM.
+ *
+ * @out_ctype: One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate
+ * which compression algorithm to use.
+ *
+ * @out_res_entry: On success, this is filled in with the offset, flags,
+ * compressed size, and uncompressed size of the resource
+ * in the output WIM.
+ *
+ * @flags: WIMLIB_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed
+ * even if it could otherwise be copied directly from the input.
+ *
+ * Additional notes: The SHA1 message digest of the uncompressed data is
+ * calculated (except when doing a raw copy --- see below). If the @unhashed
+ * flag is set on the lookup table entry, this message digest is simply copied
+ * to it; otherwise, the message digest is compared with the existing one, and
+ * the function will fail if they do not match.
+ */
+int
+write_wim_resource(struct wim_lookup_table_entry *lte,
+ FILE *out_fp, int out_ctype,
+ struct resource_entry *out_res_entry,
+ int flags)
+{
+ struct write_resource_ctx write_ctx;
+ u64 read_size;
+ u64 new_size;
+ off_t offset;
+ int ret;
+
+ flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS;
+
+ if (wim_resource_size(lte) == 0) {
+ /* Empty resource; nothing needs to be done, so just return
+ * success. */
+ return 0;
+ }
+
+ /* Get current position in output WIM */
+ offset = ftello(out_fp);
+ if (offset == -1) {
+ ERROR_WITH_ERRNO("Can't get position in output WIM");
+ return WIMLIB_ERR_WRITE;
+ }
+
+ /* If we are not forcing the data to be recompressed, and the input
+ * resource is located in a WIM with the same compression type as that
+ * desired other than no compression, we can simply copy the compressed
+ * data without recompressing it. This also means we must skip
+ * calculating the SHA1, as we never will see the uncompressed data. */
+ if (!(flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
+ lte->resource_location == RESOURCE_IN_WIM &&
+ out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
+ wimlib_get_compression_type(lte->wim) == out_ctype)
+ {
+ flags |= WIMLIB_RESOURCE_FLAG_RAW;
+ write_ctx.doing_sha = false;
+ read_size = lte->resource_entry.size;
+ } else {
+ write_ctx.doing_sha = true;
+ sha1_init(&write_ctx.sha_ctx);
+ read_size = lte->resource_entry.original_size;
+ }
+
+ /* Initialize the chunk table and set the compression function if
+ * compressing the resource. */
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
+ (flags & WIMLIB_RESOURCE_FLAG_RAW)) {
+ write_ctx.compress = NULL;
+ write_ctx.chunk_tab = NULL;
+ } else {
+ write_ctx.compress = get_compress_func(out_ctype);
+ ret = begin_wim_resource_chunk_tab(lte, out_fp,
+ offset,
+ &write_ctx.chunk_tab);
+ if (ret)
+ return ret;
+ }
+
+ /* Write the entire resource by reading the entire resource and feeding
+ * the data through the write_resource_cb function. */
+ write_ctx.out_fp = out_fp;
+try_write_again:
+ ret = read_resource_prefix(lte, read_size,
+ write_resource_cb, &write_ctx, flags);
+ if (ret)
+ goto out_free_chunk_tab;
+
+ /* Verify SHA1 message digest of the resource, or set the hash for the
+ * first time. */
+ if (write_ctx.doing_sha) {
+ ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte);
+ if (ret)
+ goto out_free_chunk_tab;
+ }
+
+ out_res_entry->flags = lte->resource_entry.flags;
+ out_res_entry->original_size = wim_resource_size(lte);
+ out_res_entry->offset = offset;
+ if (flags & WIMLIB_RESOURCE_FLAG_RAW) {
+ /* Doing a raw write: The new compressed size is the same as
+ * the compressed size in the other WIM. */
+ new_size = lte->resource_entry.size;
+ } else if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) {
+ /* Using WIMLIB_COMPRESSION_TYPE_NONE: The new compressed size
+ * is the original size. */
+ new_size = lte->resource_entry.original_size;
+ out_res_entry->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
+ } else {
+ /* Using a different compression type: Call
+ * finish_wim_resource_chunk_tab() and it will provide the new
+ * compressed size. */
+ ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, out_fp,
+ &new_size);
+ if (ret)
+ goto out_free_chunk_tab;
+ if (new_size >= wim_resource_size(lte)) {
+ /* Oops! We compressed the resource to larger than the original
+ * size. Write the resource uncompressed instead. */
+ if (fseeko(out_fp, offset, SEEK_SET) ||
+ fflush(out_fp) ||
+ ftruncate(fileno(out_fp),
+ offset + wim_resource_size(lte)))
+ {
+ ERROR_WITH_ERRNO("Failed to flush and/or truncate "
+ "output WIM file");
+ ret = WIMLIB_ERR_WRITE;
+ goto out_free_chunk_tab;
+ }
+ DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
+ "writing uncompressed instead",
+ wim_resource_size(lte), new_size);
+ write_ctx.compress = NULL;
+ write_ctx.doing_sha = false;
+ out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
+ goto try_write_again;
+ }
+ out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
+ }
+ out_res_entry->size = new_size;
+ ret = 0;
+out_free_chunk_tab:
+ FREE(write_ctx.chunk_tab);
+ return ret;
+}
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+
+/* Blocking shared queue (solves the producer-consumer problem) */
+struct shared_queue {
+ unsigned size;
+ unsigned front;
+ unsigned back;
+ unsigned filled_slots;
+ void **array;
+ pthread_mutex_t lock;
+ pthread_cond_t msg_avail_cond;
+ pthread_cond_t space_avail_cond;
+};
+
+static int
+shared_queue_init(struct shared_queue *q, unsigned size)
+{
+ wimlib_assert(size != 0);
+ q->array = CALLOC(sizeof(q->array[0]), size);
+ if (!q->array)
+ return WIMLIB_ERR_NOMEM;
+ q->filled_slots = 0;
+ q->front = 0;
+ q->back = size - 1;
+ q->size = size;
+ pthread_mutex_init(&q->lock, NULL);
+ pthread_cond_init(&q->msg_avail_cond, NULL);
+ pthread_cond_init(&q->space_avail_cond, NULL);
+ return 0;
+}
+
+static void
+shared_queue_destroy(struct shared_queue *q)
+{
+ FREE(q->array);
+ pthread_mutex_destroy(&q->lock);
+ pthread_cond_destroy(&q->msg_avail_cond);
+ pthread_cond_destroy(&q->space_avail_cond);
+}
+
+static void
+shared_queue_put(struct shared_queue *q, void *obj)
+{
+ pthread_mutex_lock(&q->lock);
+ while (q->filled_slots == q->size)
+ pthread_cond_wait(&q->space_avail_cond, &q->lock);
+
+ q->back = (q->back + 1) % q->size;
+ q->array[q->back] = obj;
+ q->filled_slots++;
+
+ pthread_cond_broadcast(&q->msg_avail_cond);
+ pthread_mutex_unlock(&q->lock);
+}
+
+static void *
+shared_queue_get(struct shared_queue *q)
+{
+ void *obj;
+
+ pthread_mutex_lock(&q->lock);
+ while (q->filled_slots == 0)
+ pthread_cond_wait(&q->msg_avail_cond, &q->lock);
+
+ obj = q->array[q->front];
+ q->array[q->front] = NULL;
+ q->front = (q->front + 1) % q->size;
+ q->filled_slots--;
+
+ pthread_cond_broadcast(&q->space_avail_cond);
+ pthread_mutex_unlock(&q->lock);
+ return obj;
+}
+
+struct compressor_thread_params {
+ struct shared_queue *res_to_compress_queue;
+ struct shared_queue *compressed_res_queue;
+ compress_func_t compress;
+};
+
+#define MAX_CHUNKS_PER_MSG 2
+
+struct message {
+ struct wim_lookup_table_entry *lte;
+ u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
+ u8 *out_compressed_chunks[MAX_CHUNKS_PER_MSG];
+ u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
+ unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
+ unsigned compressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
+ unsigned num_chunks;
+ struct list_head list;
+ bool complete;
+ u64 begin_chunk;
+};
+
+static void
+compress_chunks(struct message *msg, compress_func_t compress)
+{
+ for (unsigned i = 0; i < msg->num_chunks; i++) {
+ DEBUG2("compress chunk %u of %u", i, msg->num_chunks);
+ unsigned len = compress(msg->uncompressed_chunks[i],
+ msg->uncompressed_chunk_sizes[i],
+ msg->compressed_chunks[i]);
+ if (len) {
+ /* To be written compressed */
+ msg->out_compressed_chunks[i] = msg->compressed_chunks[i];
+ msg->compressed_chunk_sizes[i] = len;
+ } else {
+ /* To be written uncompressed */
+ msg->out_compressed_chunks[i] = msg->uncompressed_chunks[i];
+ msg->compressed_chunk_sizes[i] = msg->uncompressed_chunk_sizes[i];
+
+ }
+ }
+}
+
+/* Compressor thread routine. This is a lot simpler than the main thread
+ * routine: just repeatedly get a group of chunks from the
+ * res_to_compress_queue, compress them, and put them in the
+ * compressed_res_queue. A NULL pointer indicates that the thread should stop.
+ * */
+static void *
+compressor_thread_proc(void *arg)
+{
+ struct compressor_thread_params *params = arg;
+ struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
+ struct shared_queue *compressed_res_queue = params->compressed_res_queue;
+ compress_func_t compress = params->compress;
+ struct message *msg;
+
+ DEBUG("Compressor thread ready");
+ while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
+ compress_chunks(msg, compress);
+ shared_queue_put(compressed_res_queue, msg);
+ }
+ DEBUG("Compressor thread terminating");
+ return NULL;
+}
+#endif /* ENABLE_MULTITHREADED_COMPRESSION */
+
+static void
+do_write_streams_progress(union wimlib_progress_info *progress,
+ wimlib_progress_func_t progress_func,
+ uint64_t size_added)
+{
+ progress->write_streams.completed_bytes += size_added;
+ progress->write_streams.completed_streams++;
+ if (progress_func &&
+ progress->write_streams.completed_bytes >= progress->write_streams._private)
+ {
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+ progress);
+ if (progress->write_streams._private == progress->write_streams.total_bytes) {
+ progress->write_streams._private = ~0;
+ } else {
+ progress->write_streams._private =
+ min(progress->write_streams.total_bytes,
+ progress->write_streams.completed_bytes +
+ progress->write_streams.total_bytes / 100);
+ }
+ }
+}
+
+static int
+do_write_stream_list(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp,
+ int out_ctype,
+ int write_resource_flags,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress)
+{
+ int ret = 0;
+ struct wim_lookup_table_entry *lte;
+
+ /* For each stream in @stream_list ... */
+ while (!list_empty(stream_list)) {
+ lte = container_of(stream_list->next,
+ struct wim_lookup_table_entry,
+ write_streams_list);
+ list_del(<e->write_streams_list);
+ if (lte->unhashed && !lte->unique_size) {
+ /* Unhashed stream that shares a size with some other
+ * stream in the WIM we are writing. The stream must be
+ * checksummed to know if we need to write it or not. */
+ struct wim_lookup_table_entry *tmp;
+ u32 orig_refcnt = lte->out_refcnt;
+
+ ret = hash_unhashed_stream(lte,
+ lookup_table,
+ &tmp);
+ if (ret)
+ break;
+ if (tmp != lte) {
+ lte = tmp;
+ /* We found a duplicate stream. */
+ if (orig_refcnt != tmp->out_refcnt) {
+ /* We have already written, or are going
+ * to write, the duplicate stream. So
+ * just skip to the next stream. */
+ DEBUG("Discarding duplicate stream of length %"PRIu64,
+ wim_resource_size(lte));
+ goto skip_to_progress;
+ }
+ }
+ }
+
+ /* Here, @lte is either a hashed stream or an unhashed stream
+ * with a unique size. In either case we know that the stream
+ * has to be written. In either case the SHA1 message digest
+ * will be calculated over the stream while writing it; however,
+ * in the former case this is done merely to check the data,
+ * while in the latter case this is done because we do not have
+ * the SHA1 message digest yet. */
+ wimlib_assert(lte->out_refcnt != 0);
+ ret = write_wim_resource(lte,
+ out_fp,
+ out_ctype,
+ <e->output_resource_entry,
+ write_resource_flags);
+ if (ret)
+ break;
+ if (lte->unhashed) {
+ list_del(<e->unhashed_list);
+ lookup_table_insert(lookup_table, lte);
+ lte->unhashed = 0;
+ }
+ skip_to_progress:
+ do_write_streams_progress(progress,
+ progress_func,
+ wim_resource_size(lte));
+ }
+ return ret;
+}
+
+static int
+write_stream_list_serial(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp,
+ int out_ctype,
+ int write_flags,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress)
+{
+ int write_resource_flags = 0;
+ if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+ write_resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS;
+
+ progress->write_streams.num_threads = 1;
+ if (progress_func)
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
+ return do_write_stream_list(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_resource_flags,
+ progress_func,
+ progress);
+}
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+static int
+write_wim_chunks(struct message *msg, FILE *out_fp,
+ struct chunk_table *chunk_tab)
+{
+ for (unsigned i = 0; i < msg->num_chunks; i++) {
+ unsigned chunk_csize = msg->compressed_chunk_sizes[i];
+
+ DEBUG2("Write wim chunk %u of %u (csize = %u)",
+ i, msg->num_chunks, chunk_csize);
+
+ if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp)
+ != chunk_csize)
+ {
+ ERROR_WITH_ERRNO("Failed to write WIM chunk");
+ return WIMLIB_ERR_WRITE;
+ }
+
+ *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
+ chunk_tab->cur_offset += chunk_csize;
+ }
+ return 0;
+}
+
+struct main_writer_thread_ctx {
+ struct list_head *stream_list;
+ struct wim_lookup_table *lookup_table;
+ FILE *out_fp;
+ int out_ctype;
+ struct shared_queue *res_to_compress_queue;
+ struct shared_queue *compressed_res_queue;
+ size_t num_messages;
+ int write_flags;
+ wimlib_progress_func_t progress_func;
+ union wimlib_progress_info *progress;
+
+ struct list_head available_msgs;
+ struct list_head outstanding_streams;
+ struct list_head serial_streams;
+ u64 next_chunk;
+ u64 next_num_chunks;
+ struct message *msgs;
+ struct message *next_msg;
+ size_t next_chunk_in_msg;
+ struct wim_lookup_table_entry *cur_lte;
+ struct chunk_table *cur_chunk_tab;
+ struct wim_lookup_table_entry *next_lte;
+ SHA_CTX sha_ctx;
+ u8 next_hash[20];
+};
+
+static int
+init_message(struct message *msg)
+{
+ for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
+ msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
+ msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
+ if (msg->compressed_chunks[i] == NULL ||
+ msg->uncompressed_chunks[i] == NULL)
+ return WIMLIB_ERR_NOMEM;
+ }
+ return 0;
+}
+
+static void
+destroy_message(struct message *msg)
+{
+ for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
+ FREE(msg->compressed_chunks[i]);
+ FREE(msg->uncompressed_chunks[i]);
+ }
+}
+
+static void
+free_messages(struct message *msgs, size_t num_messages)
+{
+ if (msgs) {
+ for (size_t i = 0; i < num_messages; i++)
+ destroy_message(&msgs[i]);
+ FREE(msgs);
+ }
+}
+
+static struct message *
+allocate_messages(size_t num_messages)
+{
+ struct message *msgs;
+
+ msgs = CALLOC(num_messages, sizeof(struct message));
+ if (!msgs)
+ return NULL;
+ for (size_t i = 0; i < num_messages; i++) {
+ if (init_message(&msgs[i])) {
+ free_messages(msgs, num_messages);
+ return NULL;
+ }
+ }
+ return msgs;
+}
+
+static void
+main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx)
+{
+ free_messages(ctx->msgs, ctx->num_messages);
+ FREE(ctx->cur_chunk_tab);
+}
+
+
+static int
+main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx)
+{
+ /* Pre-allocate all the buffers that will be needed to do the chunk
+ * compression. */
+ ctx->msgs = allocate_messages(ctx->num_messages);
+ if (!ctx->msgs)
+ return WIMLIB_ERR_NOMEM;
+
+ /* Initially, all the messages are available to use. */
+ INIT_LIST_HEAD(&ctx->available_msgs);
+ for (size_t i = 0; i < ctx->num_messages; i++)
+ list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs);
+
+ /* outstanding_streams is the list of streams that currently have had
+ * chunks sent off for compression.
+ *
+ * The first stream in outstanding_streams is the stream that is
+ * currently being written (cur_lte).
+ *
+ * The last stream in outstanding_streams is the stream that is
+ * currently being read and chunks fed to the compressor threads. */
+ INIT_LIST_HEAD(&ctx->outstanding_streams);
+
+ /* Resources that don't need any chunks compressed are added to this
+ * list and written directly by the main thread. */
+ INIT_LIST_HEAD(&ctx->serial_streams);
+
+ ctx->cur_lte = NULL;
+ return 0;
+}
+
+static int
+receive_compressed_chunks(struct main_writer_thread_ctx *ctx)
+{
+ struct message *msg;
+ struct wim_lookup_table_entry *cur_lte;
+ int ret;
+
+ wimlib_assert(!list_empty(&ctx->outstanding_streams));
+
+ /* Get the next message from the queue and process it.
+ * The message will contain 1 or more data chunks that have been
+ * compressed. */
+ msg = shared_queue_get(ctx->compressed_res_queue);
+ msg->complete = true;
+ cur_lte = ctx->cur_lte;
+
+ /* Is this the next chunk in the current resource? If it's not
+ * (i.e., an earlier chunk in a same or different resource
+ * hasn't been compressed yet), do nothing, and keep this
+ * message around until all earlier chunks are received.
+ *
+ * Otherwise, write all the chunks we can. */
+ while (cur_lte != NULL &&
+ !list_empty(&cur_lte->msg_list) &&
+ (msg = container_of(cur_lte->msg_list.next,
+ struct message,
+ list))->complete)
+ {
+ if (msg->begin_chunk == 0) {
+
+ /* This is the first set of chunks. Leave space
+ * for the chunk table in the output file. */
+ off_t cur_offset = ftello(ctx->out_fp);
+ if (cur_offset == -1) {
+ ret = WIMLIB_ERR_WRITE;
+ goto out;
+ }
+ ret = begin_wim_resource_chunk_tab(cur_lte,
+ ctx->out_fp,
+ cur_offset,
+ &ctx->cur_chunk_tab);
+ if (ret)
+ goto out;
+ }
+
+ /* Write the compressed chunks from the message. */
+ ret = write_wim_chunks(msg, ctx->out_fp, ctx->cur_chunk_tab);
+ if (ret)
+ goto out;
+
+ list_del(&msg->list);
+
+ /* This message is available to use for different chunks
+ * now. */
+ list_add(&msg->list, &ctx->available_msgs);
+
+ /* Was this the last chunk of the stream? If so, finish
+ * it. */
+ if (list_empty(&cur_lte->msg_list) &&
+ msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
+ {
+ DEBUG2("Finish wim chunk tab");
+ u64 res_csize;
+ ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
+ ctx->out_fp,
+ &res_csize);
+ if (ret)
+ goto out;
+
+#if 0
+ if (res_csize >= wim_resource_size(cur_lte)) {
+ /* Oops! We compressed the resource to
+ * larger than the original size. Write
+ * the resource uncompressed instead. */
+ ret = write_uncompressed_resource_and_truncate(
+ cur_lte,
+ ctx->out_fp,
+ ctx->cur_chunk_tab->file_offset,
+ &cur_lte->output_resource_entry);
+ if (ret)
+ goto out;
+ } else
+#endif
+ {
+ cur_lte->output_resource_entry.size =
+ res_csize;
+
+ cur_lte->output_resource_entry.original_size =
+ cur_lte->resource_entry.original_size;
+
+ cur_lte->output_resource_entry.offset =
+ ctx->cur_chunk_tab->file_offset;
+
+ cur_lte->output_resource_entry.flags =
+ cur_lte->resource_entry.flags |
+ WIM_RESHDR_FLAG_COMPRESSED;
+ }
+ do_write_streams_progress(ctx->progress, ctx->progress_func,
+ wim_resource_size(cur_lte));
+ FREE(ctx->cur_chunk_tab);
+ ctx->cur_chunk_tab = NULL;
+
+ struct list_head *next = cur_lte->write_streams_list.next;
+ list_del(&cur_lte->write_streams_list);
+
+ if (next == &ctx->outstanding_streams)
+ cur_lte = NULL;
+ else
+ cur_lte = container_of(cur_lte->write_streams_list.next,
+ struct wim_lookup_table_entry,
+ write_streams_list);
+
+ /* Since we just finished writing a stream, write any
+ * streams that have been added to the serial_streams
+ * list for direct writing by the main thread (e.g.
+ * resources that don't need to be compressed because
+ * the desired compression type is the same as the
+ * previous compression type). */
+ ret = do_write_stream_list(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fp,
+ ctx->out_ctype,
+ ctx->progress_func,
+ ctx->progress,
+ 0);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ ctx->cur_lte = cur_lte;
+ return ret;
+}
+
+static int
+main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+ struct message *next_msg;
+
+ next_msg = ctx->next_msg;
+
+ sha1_update(&ctx->sha_ctx, chunk, chunk_size);
+
+ if (!next_msg) {
+ if (list_empty(&ctx->available_msgs)) {
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ return ret;
+ }
+
+ wimlib_assert(!list_empty(&ctx->available_msgs));
+
+ next_msg = container_of(ctx->available_msgs.next,
+ struct message,
+ list);
+ list_del(&next_msg->list);
+ next_msg->complete = false;
+ next_msg->begin_chunk = ctx->next_chunk;
+ next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
+ ctx->next_num_chunks - ctx->next_chunk);
+ ctx->next_chunk_in_msg = 0;
+ }
+
+ wimlib_assert(next_msg != NULL);
+ wimlib_assert(ctx->next_chunk_in_msg < next_msg->num_chunks);
+
+ next_msg->uncompressed_chunk_sizes[ctx->next_chunk_in_msg] = chunk_size;
+ memcpy(next_msg->uncompressed_chunks[ctx->next_chunk_in_msg],
+ chunk, chunk_size);
+
+ if (++ctx->next_chunk_in_msg == next_msg->num_chunks) {
+ shared_queue_put(ctx->res_to_compress_queue,
+ next_msg);
+ ctx->next_msg = NULL;
+ }
+ return 0;
+}
+
+static int
+submit_stream_for_compression(struct wim_lookup_table_entry *lte,
+ struct main_writer_thread_ctx *ctx)
+{
+ int ret;
+
+ sha1_init(&ctx->sha_ctx);
+ ctx->next_num_chunks = wim_resource_chunks(lte);
+ ret = read_resource_prefix(lte, wim_resource_size(lte),
+ main_writer_thread_cb, ctx, 0);
+ if (ret)
+ return ret;
+ ret = finalize_and_check_sha1(&ctx->sha_ctx, lte);
+ if (ret)
+ return ret;
+}
+
+/*
+ * This function is executed by the main thread when the resources are being
+ * compressed in parallel. The main thread is in change of all reading of the
+ * uncompressed data and writing of the compressed data. The compressor threads
+ * *only* do compression from/to in-memory buffers.
+ *
+ * Each unit of work given to a compressor thread is up to MAX_CHUNKS_PER_MSG
+ * chunks of compressed data to compress, represented in a `struct message'.
+ * Each message is passed from the main thread to a worker thread through the
+ * res_to_compress_queue, and it is passed back through the
+ * compressed_res_queue.
+ */
+static int
+main_writer_thread_proc(struct main_writer_thread_ctx *ctx)
+{
+ int ret;
+ struct list_head *stream_list;
+ struct wim_lookup_table_entry *lte;
+
+ ret = main_writer_thread_init_ctx(ctx);
+ if (ret)
+ goto out_destroy_ctx;
+
+ stream_list = ctx->stream_list;
+ while (!list_empty(stream_list)) {
+ lte = container_of(stream_list->next,
+ struct wim_lookup_table_entry,
+ write_streams_list);
+ list_del(<e->write_streams_list);
+ if (lte->unhashed && !lte->unique_size) {
+ struct wim_lookup_table_entry *tmp;
+ u32 orig_refcnt = lte->out_refcnt;
+
+ ret = hash_unhashed_stream(lte, ctx->lookup_table, &tmp);
+ if (ret)
+ goto out_destroy_ctx;
+ if (tmp != lte) {
+ lte = tmp;
+ if (orig_refcnt != tmp->out_refcnt) {
+ DEBUG("Discarding duplicate stream of length %"PRIu64,
+ wim_resource_size(lte));
+ goto skip_to_progress;
+ }
+ }
+ }
+
+ if (wim_resource_size(lte) < 1000 ||
+ ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
+ (lte->resource_location == RESOURCE_IN_WIM &&
+ wimlib_get_compression_type(lte->wim) == ctx->out_ctype))
+ {
+ list_add(<e->write_streams_list,
+ &ctx->serial_streams);
+ } else {
+ ret = submit_stream_for_compression(lte, ctx);
+ if (ret)
+ goto out_destroy_ctx;
+ if (lte->unhashed) {
+ list_del(<e->unhashed_list);
+ lookup_table_insert(ctx->lookup_table, lte);
+ lte->unhashed = 0;
+ }
+ }
+ skip_to_progress:
+ do_write_streams_progress(ctx->progress,
+ ctx->progress_func,
+ wim_resource_size(lte));
+ }
+
+ while (!list_empty(&ctx->outstanding_streams)) {
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ goto out_destroy_ctx;
+ }
+ ret = 0;
+out_destroy_ctx:
+ main_writer_thread_destroy_ctx(ctx);
+ return ret;
+}
+
+static long
+get_default_num_threads()
+{
+#ifdef __WIN32__
+ return win32_get_number_of_processors();
+#else
+ return sysconf(_SC_NPROCESSORS_ONLN);
+#endif
+}
+
+static int
+write_stream_list_parallel(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp,
+ int out_ctype,
+ int write_flags,
+ unsigned num_threads,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress)
+{
+ int ret;
+ struct shared_queue res_to_compress_queue;
+ struct shared_queue compressed_res_queue;
+ pthread_t *compressor_threads = NULL;
+
+ if (num_threads == 0) {
+ long nthreads = get_default_num_threads();
+ if (nthreads < 1 || nthreads > UINT_MAX) {
+ WARNING("Could not determine number of processors! Assuming 1");
+ goto out_serial;
+ } else {
+ num_threads = nthreads;
+ }
+ }
+
+ progress->write_streams.num_threads = num_threads;
+
+ static const double MESSAGES_PER_THREAD = 2.0;
+ size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
+
+ DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
+
+ ret = shared_queue_init(&res_to_compress_queue, queue_size);
+ if (ret != 0)
+ goto out_serial;
+
+ ret = shared_queue_init(&compressed_res_queue, queue_size);
+ if (ret != 0)
+ goto out_destroy_res_to_compress_queue;
+
+ struct compressor_thread_params params;
+ params.res_to_compress_queue = &res_to_compress_queue;
+ params.compressed_res_queue = &compressed_res_queue;
+ params.compress = get_compress_func(out_ctype);
+
+ compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
+ if (!compressor_threads) {
+ ret = WIMLIB_ERR_NOMEM;
+ goto out_destroy_compressed_res_queue;
+ }
+
+ for (unsigned i = 0; i < num_threads; i++) {
+ DEBUG("pthread_create thread %u", i);
+ ret = pthread_create(&compressor_threads[i], NULL,
+ compressor_thread_proc, ¶ms);
+ if (ret != 0) {
+ ret = -1;
+ ERROR_WITH_ERRNO("Failed to create compressor "
+ "thread %u", i);
+ num_threads = i;
+ goto out_join;
+ }
+ }
+
+ if (progress_func)
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
+
+ struct main_writer_thread_ctx ctx;
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.stream_list = stream_list;
+ ctx.lookup_table = lookup_table;
+ ctx.out_fp = out_fp;
+ ctx.out_ctype = out_ctype;
+ ctx.res_to_compress_queue = &res_to_compress_queue;
+ ctx.compressed_res_queue = &compressed_res_queue;
+ ctx.num_messages = queue_size;
+ ctx.write_flags = write_flags;
+ ctx.progress_func = progress_func;
+ ctx.progress = progress;
+ ret = main_writer_thread_proc(&ctx);
+out_join:
+ for (unsigned i = 0; i < num_threads; i++)
+ shared_queue_put(&res_to_compress_queue, NULL);
+
+ for (unsigned i = 0; i < num_threads; i++) {
+ if (pthread_join(compressor_threads[i], NULL)) {
+ WARNING_WITH_ERRNO("Failed to join compressor "
+ "thread %u", i);
+ }
+ }
+ FREE(compressor_threads);
+out_destroy_compressed_res_queue:
+ shared_queue_destroy(&compressed_res_queue);
+out_destroy_res_to_compress_queue:
+ shared_queue_destroy(&res_to_compress_queue);
+ if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
+ return ret;
+out_serial:
+ WARNING("Falling back to single-threaded compression");
+ return write_stream_list_serial(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_flags,
+ progress_func,
+ progress);
+
+}
+#endif
+
+/*
+ * Write a list of streams to a WIM (@out_fp) using the compression type
+ * @out_ctype and up to @num_threads compressor threads.
+ */
+static int
+write_stream_list(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp, int out_ctype, int write_flags,
+ unsigned num_threads, wimlib_progress_func_t progress_func)
+{
+ struct wim_lookup_table_entry *lte;
+ size_t num_streams = 0;
+ u64 total_bytes = 0;
+ u64 total_compression_bytes = 0;
+ union wimlib_progress_info progress;
+ int ret;
+
+ if (list_empty(stream_list))
+ return 0;
+
+ /* Calculate the total size of the streams to be written. Note: this
+ * will be the uncompressed size, as we may not know the compressed size
+ * yet, and also this will assume that every unhashed stream will be
+ * written (which will not necessarily be the case). */
+ list_for_each_entry(lte, stream_list, write_streams_list) {
+ num_streams++;
+ total_bytes += wim_resource_size(lte);
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
+ && (wim_resource_compression_type(lte) != out_ctype ||
+ (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)))
+ {
+ total_compression_bytes += wim_resource_size(lte);
+ }
+ }
+ progress.write_streams.total_bytes = total_bytes;
+ progress.write_streams.total_streams = num_streams;
+ progress.write_streams.completed_bytes = 0;
+ progress.write_streams.completed_streams = 0;
+ progress.write_streams.num_threads = num_threads;
+ progress.write_streams.compression_type = out_ctype;
+ progress.write_streams._private = 0;
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+ if (total_compression_bytes >= 1000000 && num_threads != 1)
+ ret = write_stream_list_parallel(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_flags,
+ num_threads,
+ progress_func,
+ &progress);
+ else
+#endif
+ ret = write_stream_list_serial(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_flags,
+ progress_func,
+ &progress);
+ return ret;
+}