+ msg->out_compressed_chunks[i] = msg->uncompressed_chunks[i];
+ msg->compressed_chunk_sizes[i] = msg->uncompressed_chunk_sizes[i];
+ }
+ }
+}
+
+static void *compressor_thread_proc(void *arg)
+{
+ struct compressor_thread_params *params = arg;
+ struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
+ struct shared_queue *compressed_res_queue = params->compressed_res_queue;
+ compress_func_t compress = params->compress;
+ struct message *msg;
+
+ DEBUG("Compressor thread ready");
+ while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
+ compress_chunks(msg, compress);
+ shared_queue_put(compressed_res_queue, msg);
+ }
+ DEBUG("Compressor thread terminating");
+ return NULL;
+}
+#endif
+
+static int do_write_stream_list(struct list_head *my_resources,
+ FILE *out_fp,
+ int out_ctype,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress,
+ int write_resource_flags)
+{
+ int ret;
+ struct lookup_table_entry *lte, *tmp;
+
+ list_for_each_entry_safe(lte, tmp, my_resources, staging_list) {
+ ret = write_wim_resource(lte,
+ out_fp,
+ out_ctype,
+ <e->output_resource_entry,
+ write_resource_flags);
+ if (ret != 0)
+ return ret;
+ list_del(<e->staging_list);
+ progress->write_streams.completed_bytes +=
+ wim_resource_size(lte);
+ progress->write_streams.completed_streams++;
+ if (progress_func) {
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+ progress);
+ }
+ }
+ return 0;
+}
+
+static int write_stream_list_serial(struct list_head *stream_list,
+ FILE *out_fp,
+ int out_ctype,
+ int write_flags,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress)
+{
+ int write_resource_flags;
+
+ if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+ write_resource_flags = WIMLIB_RESOURCE_FLAG_RECOMPRESS;
+ else
+ write_resource_flags = 0;
+ progress->write_streams.num_threads = 1;
+ if (progress_func)
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
+ return do_write_stream_list(stream_list, out_fp,
+ out_ctype, progress_func,
+ progress, write_resource_flags);
+}
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+static int write_wim_chunks(struct message *msg, FILE *out_fp,
+ struct chunk_table *chunk_tab)
+{
+ for (unsigned i = 0; i < msg->num_chunks; i++) {
+ unsigned chunk_csize = msg->compressed_chunk_sizes[i];
+
+ DEBUG2("Write wim chunk %u of %u (csize = %u)",
+ i, msg->num_chunks, chunk_csize);
+
+ if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp)
+ != chunk_csize)
+ {
+ ERROR_WITH_ERRNO("Failed to write WIM chunk");
+ return WIMLIB_ERR_WRITE;
+ }
+
+ *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
+ chunk_tab->cur_offset += chunk_csize;
+ }
+ return 0;
+}
+
+/*
+ * This function is executed by the main thread when the resources are being
+ * compressed in parallel. The main thread is in change of all reading of the
+ * uncompressed data and writing of the compressed data. The compressor threads
+ * *only* do compression from/to in-memory buffers.
+ *
+ * Each unit of work given to a compressor thread is up to MAX_CHUNKS_PER_MSG
+ * chunks of compressed data to compress, represented in a `struct message'.
+ * Each message is passed from the main thread to a worker thread through the
+ * res_to_compress_queue, and it is passed back through the
+ * compressed_res_queue.
+ */
+static int main_writer_thread_proc(struct list_head *stream_list,
+ FILE *out_fp,
+ int out_ctype,
+ struct shared_queue *res_to_compress_queue,
+ struct shared_queue *compressed_res_queue,
+ size_t queue_size,
+ int write_flags,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress)
+{
+ int ret;
+
+ struct message msgs[queue_size];
+ ZERO_ARRAY(msgs);
+
+ // Initially, all the messages are available to use.
+ LIST_HEAD(available_msgs);
+ for (size_t i = 0; i < ARRAY_LEN(msgs); i++)
+ list_add(&msgs[i].list, &available_msgs);
+
+ // outstanding_resources is the list of resources that currently have
+ // had chunks sent off for compression.
+ //
+ // The first stream in outstanding_resources is the stream that is
+ // currently being written (cur_lte).
+ //
+ // The last stream in outstanding_resources is the stream that is
+ // currently being read and chunks fed to the compressor threads
+ // (next_lte).
+ //
+ // Depending on the number of threads and the sizes of the resource,
+ // the outstanding streams list may contain streams between cur_lte and
+ // next_lte that have all their chunks compressed or being compressed,
+ // but haven't been written yet.
+ //
+ LIST_HEAD(outstanding_resources);
+ struct list_head *next_resource = stream_list->next;
+ struct lookup_table_entry *next_lte = NULL;
+ u64 next_chunk = 0;
+ u64 next_num_chunks = 0;
+
+ // As in write_wim_resource(), each resource we read is checksummed.
+ SHA_CTX next_sha_ctx;
+ u8 next_hash[SHA1_HASH_SIZE];
+
+ // Resources that don't need any chunks compressed are added to this
+ // list and written directly by the main thread.
+ LIST_HEAD(my_resources);
+
+ struct lookup_table_entry *cur_lte = NULL;
+ struct chunk_table *cur_chunk_tab = NULL;
+ struct message *msg;
+
+#ifdef WITH_NTFS_3G
+ ntfs_inode *ni = NULL;
+#endif
+
+ DEBUG("Initializing buffers for uncompressed "
+ "and compressed data (%zu bytes needed)",
+ queue_size * MAX_CHUNKS_PER_MSG * WIM_CHUNK_SIZE * 2);
+
+ // Pre-allocate all the buffers that will be needed to do the chunk
+ // compression.
+ for (size_t i = 0; i < ARRAY_LEN(msgs); i++) {
+ for (size_t j = 0; j < MAX_CHUNKS_PER_MSG; j++) {
+ msgs[i].compressed_chunks[j] = MALLOC(WIM_CHUNK_SIZE);
+
+ // The extra 8 bytes is because longest_match() in lz.c
+ // may read a little bit off the end of the uncompressed
+ // data. It doesn't need to be initialized--- we really
+ // just need to avoid accessing an unmapped page.
+ msgs[i].uncompressed_chunks[j] = MALLOC(WIM_CHUNK_SIZE + 8);
+ if (msgs[i].compressed_chunks[j] == NULL ||
+ msgs[i].uncompressed_chunks[j] == NULL)
+ {
+ ERROR("Could not allocate enough memory for "
+ "multi-threaded compression");
+ ret = WIMLIB_ERR_NOMEM;
+ goto out;
+ }
+ }
+ }
+
+ // This loop is executed until all resources have been written, except
+ // possibly a few that have been added to the @my_resources list for
+ // writing later.
+ while (1) {
+ // Send chunks to the compressor threads until either (a) there
+ // are no more messages available since they were all sent off,
+ // or (b) there are no more resources that need to be
+ // compressed.
+ while (!list_empty(&available_msgs)) {
+ if (next_chunk == next_num_chunks) {
+ // If next_chunk == next_num_chunks, there are
+ // no more chunks to write in the current
+ // stream. So, check the SHA1 message digest of
+ // the stream that was just finished (unless
+ // next_lte == NULL, which is the case the very
+ // first time this loop is entered, and also
+ // near the very end of the compression when
+ // there are no more streams.) Then, advance to
+ // the next stream (if there is one).
+ if (next_lte != NULL) {
+ #ifdef WITH_NTFS_3G
+ end_wim_resource_read(next_lte, ni);
+ ni = NULL;
+ #else
+ end_wim_resource_read(next_lte);
+ #endif
+ DEBUG2("Finalize SHA1 md (next_num_chunks=%zu)",
+ next_num_chunks);
+ sha1_final(next_hash, &next_sha_ctx);
+ if (!hashes_equal(next_lte->hash, next_hash)) {
+ ERROR("WIM resource has incorrect hash!");
+ if (next_lte->resource_location ==
+ RESOURCE_IN_FILE_ON_DISK)
+ {
+ ERROR("We were reading it from `%s'; "
+ "maybe it changed while we were "
+ "reading it.",
+ next_lte->file_on_disk);
+ }
+ ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
+ goto out;
+ }
+ }
+
+ // Advance to the next resource.
+ //
+ // If the next resource needs no compression, just write
+ // it with this thread (not now though--- we could be in
+ // the middle of writing another resource.) Keep doing
+ // this until we either get to the end of the resources
+ // list, or we get to a resource that needs compression.
+ while (1) {
+ if (next_resource == stream_list) {
+ next_lte = NULL;
+ break;
+ }
+ next_lte = container_of(next_resource,
+ struct lookup_table_entry,
+ staging_list);
+ next_resource = next_resource->next;
+ if ((!(write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+ && wim_resource_compression_type(next_lte) == out_ctype)
+ || wim_resource_size(next_lte) == 0)
+ {
+ list_add_tail(&next_lte->staging_list,
+ &my_resources);
+ } else {
+ list_add_tail(&next_lte->staging_list,
+ &outstanding_resources);
+ next_chunk = 0;
+ next_num_chunks = wim_resource_chunks(next_lte);
+ sha1_init(&next_sha_ctx);
+ INIT_LIST_HEAD(&next_lte->msg_list);
+ #ifdef WITH_NTFS_3G
+ ret = prepare_resource_for_read(next_lte, &ni);
+ #else
+ ret = prepare_resource_for_read(next_lte);
+ #endif
+
+ if (ret != 0)
+ goto out;
+ if (cur_lte == NULL)
+ cur_lte = next_lte;
+ break;
+ }
+ }
+ }
+
+ if (next_lte == NULL)
+ break;
+
+ // Get a message from the available messages
+ // list
+ msg = container_of(available_msgs.next,
+ struct message,
+ list);
+
+ // ... and delete it from the available messages
+ // list
+ list_del(&msg->list);
+
+ // Initialize the message with the chunks to
+ // compress.
+ msg->num_chunks = min(next_num_chunks - next_chunk,
+ MAX_CHUNKS_PER_MSG);
+ msg->lte = next_lte;
+ msg->complete = false;
+ msg->begin_chunk = next_chunk;
+
+ unsigned size = WIM_CHUNK_SIZE;
+ for (unsigned i = 0; i < msg->num_chunks; i++) {
+
+ // Read chunk @next_chunk of the stream into the
+ // message so that a compressor thread can
+ // compress it.
+
+ if (next_chunk == next_num_chunks - 1 &&
+ wim_resource_size(next_lte) % WIM_CHUNK_SIZE != 0)
+ {
+ size = wim_resource_size(next_lte) % WIM_CHUNK_SIZE;
+ }
+
+
+ DEBUG2("Read resource (size=%u, offset=%zu)",
+ size, next_chunk * WIM_CHUNK_SIZE);
+
+ msg->uncompressed_chunk_sizes[i] = size;
+
+ ret = read_wim_resource(next_lte,
+ msg->uncompressed_chunks[i],
+ size,
+ next_chunk * WIM_CHUNK_SIZE,
+ 0);
+ if (ret != 0)
+ goto out;
+ sha1_update(&next_sha_ctx,
+ msg->uncompressed_chunks[i], size);
+ next_chunk++;
+ }
+
+ // Send the compression request
+ list_add_tail(&msg->list, &next_lte->msg_list);
+ shared_queue_put(res_to_compress_queue, msg);
+ DEBUG2("Compression request sent");
+ }
+
+ // If there are no outstanding resources, there are no more
+ // resources that need to be written.
+ if (list_empty(&outstanding_resources)) {
+ ret = 0;
+ goto out;
+ }
+
+ // Get the next message from the queue and process it.
+ // The message will contain 1 or more data chunks that have been
+ // compressed.
+ msg = shared_queue_get(compressed_res_queue);
+ msg->complete = true;
+
+ // Is this the next chunk in the current resource? If it's not
+ // (i.e., an earlier chunk in a same or different resource
+ // hasn't been compressed yet), do nothing, and keep this
+ // message around until all earlier chunks are received.
+ //
+ // Otherwise, write all the chunks we can.
+ while (cur_lte != NULL && !list_empty(&cur_lte->msg_list)
+ && (msg = container_of(cur_lte->msg_list.next,
+ struct message,
+ list))->complete)
+ {
+ DEBUG2("Complete msg (begin_chunk=%"PRIu64")", msg->begin_chunk);
+ if (msg->begin_chunk == 0) {
+ DEBUG2("Begin chunk tab");
+
+ // This is the first set of chunks. Leave space
+ // for the chunk table in the output file.
+ off_t cur_offset = ftello(out_fp);
+ if (cur_offset == -1) {
+ ret = WIMLIB_ERR_WRITE;
+ goto out;
+ }
+ ret = begin_wim_resource_chunk_tab(cur_lte,
+ out_fp,
+ cur_offset,
+ &cur_chunk_tab);
+ if (ret != 0)
+ goto out;
+ }
+
+ // Write the compressed chunks from the message.
+ ret = write_wim_chunks(msg, out_fp, cur_chunk_tab);