+}
+
+static int
+receive_compressed_chunks(struct main_writer_thread_ctx *ctx)
+{
+ struct message *msg;
+ struct wim_lookup_table_entry *cur_lte;
+ int ret;
+
+ wimlib_assert(!list_empty(&ctx->outstanding_streams));
+ wimlib_assert(ctx->num_outstanding_messages != 0);
+
+ cur_lte = container_of(ctx->outstanding_streams.next,
+ struct wim_lookup_table_entry,
+ being_compressed_list);
+
+ /* Get the next message from the queue and process it.
+ * The message will contain 1 or more data chunks that have been
+ * compressed. */
+ msg = shared_queue_get(ctx->compressed_res_queue);
+ msg->complete = true;
+ --ctx->num_outstanding_messages;
+
+ /* Is this the next chunk in the current resource? If it's not
+ * (i.e., an earlier chunk in a same or different resource
+ * hasn't been compressed yet), do nothing, and keep this
+ * message around until all earlier chunks are received.
+ *
+ * Otherwise, write all the chunks we can. */
+ while (cur_lte != NULL &&
+ !list_empty(&cur_lte->msg_list)
+ && (msg = container_of(cur_lte->msg_list.next,
+ struct message,
+ list))->complete)
+ {
+ list_move(&msg->list, &ctx->available_msgs);
+ if (msg->begin_chunk == 0) {
+ /* This is the first set of chunks. Leave space
+ * for the chunk table in the output file. */
+ off_t cur_offset = ftello(ctx->out_fp);
+ if (cur_offset == -1)
+ return WIMLIB_ERR_WRITE;
+ ret = begin_wim_resource_chunk_tab(cur_lte,
+ ctx->out_fp,
+ cur_offset,
+ &ctx->cur_chunk_tab);
+ if (ret)
+ return ret;
+ }
+
+ /* Write the compressed chunks from the message. */
+ ret = write_wim_chunks(msg, ctx->out_fp, ctx->cur_chunk_tab);
+ if (ret)
+ return ret;
+
+ /* Was this the last chunk of the stream? If so, finish
+ * it. */
+ if (list_empty(&cur_lte->msg_list) &&
+ msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
+ {
+ u64 res_csize;
+ off_t offset;
+
+ ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
+ ctx->out_fp,
+ &res_csize);
+ if (ret)
+ return ret;
+
+ list_del(&cur_lte->being_compressed_list);
+
+ /* Grab the offset of this stream in the output file
+ * from the chunk table before we free it. */
+ offset = ctx->cur_chunk_tab->file_offset;
+
+ FREE(ctx->cur_chunk_tab);
+ ctx->cur_chunk_tab = NULL;
+
+ if (res_csize >= wim_resource_size(cur_lte)) {
+ /* Oops! We compressed the resource to
+ * larger than the original size. Write
+ * the resource uncompressed instead. */
+ DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
+ "writing uncompressed instead",
+ wim_resource_size(cur_lte), res_csize);
+ ret = fflush_and_ftruncate(ctx->out_fp, offset);
+ if (ret)
+ return ret;
+ ret = write_wim_resource(cur_lte,
+ ctx->out_fp,
+ WIMLIB_COMPRESSION_TYPE_NONE,
+ &cur_lte->output_resource_entry,
+ ctx->write_resource_flags);
+ if (ret)
+ return ret;
+ } else {
+ cur_lte->output_resource_entry.size =
+ res_csize;
+
+ cur_lte->output_resource_entry.original_size =
+ cur_lte->resource_entry.original_size;
+
+ cur_lte->output_resource_entry.offset =
+ offset;
+
+ cur_lte->output_resource_entry.flags =
+ cur_lte->resource_entry.flags |
+ WIM_RESHDR_FLAG_COMPRESSED;
+ }
+
+ do_write_streams_progress(ctx->progress,
+ ctx->progress_func,
+ wim_resource_size(cur_lte));
+
+ /* Since we just finished writing a stream, write any
+ * streams that have been added to the serial_streams
+ * list for direct writing by the main thread (e.g.
+ * resources that don't need to be compressed because
+ * the desired compression type is the same as the
+ * previous compression type). */
+ if (!list_empty(&ctx->serial_streams)) {
+ ret = do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fp,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_func,
+ ctx->progress);
+ if (ret)
+ return ret;
+ }
+
+ /* Advance to the next stream to write. */
+ if (list_empty(&ctx->outstanding_streams)) {
+ cur_lte = NULL;
+ } else {
+ cur_lte = container_of(ctx->outstanding_streams.next,
+ struct wim_lookup_table_entry,
+ being_compressed_list);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Called when the main thread has read a new chunk of data. */
+static int
+main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+ struct message *next_msg;
+ u64 next_chunk_in_msg;
+
+ /* Update SHA1 message digest for the stream currently being read by the
+ * main thread. */
+ sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
+
+ /* We send chunks of data to the compressor chunks in batches which we
+ * refer to as "messages". @next_msg is the message that is currently
+ * being prepared to send off. If it is NULL, that indicates that we
+ * need to start a new message. */
+ next_msg = ctx->next_msg;
+ if (!next_msg) {
+ /* We need to start a new message. First check to see if there
+ * is a message available in the list of available messages. If
+ * so, we can just take one. If not, all the messages (there is
+ * a fixed number of them, proportional to the number of
+ * threads) have been sent off to the compressor threads, so we
+ * receive messages from the compressor threads containing
+ * compressed chunks of data.
+ *
+ * We may need to receive multiple messages before one is
+ * actually available to use because messages received that are
+ * *not* for the very next set of chunks to compress must be
+ * buffered until it's time to write those chunks. */
+ while (list_empty(&ctx->available_msgs)) {
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ return ret;
+ }
+
+ next_msg = container_of(ctx->available_msgs.next,
+ struct message, list);
+ list_del(&next_msg->list);
+ next_msg->complete = false;
+ next_msg->begin_chunk = ctx->next_chunk;
+ next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
+ ctx->next_num_chunks - ctx->next_chunk);
+ ctx->next_msg = next_msg;
+ }
+
+ /* Fill in the next chunk to compress */
+ next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
+
+ next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
+ memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
+ chunk, chunk_size);
+ ctx->next_chunk++;
+ if (++next_chunk_in_msg == next_msg->num_chunks) {
+ /* Send off an array of chunks to compress */
+ list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
+ shared_queue_put(ctx->res_to_compress_queue, next_msg);
+ ++ctx->num_outstanding_messages;
+ ctx->next_msg = NULL;
+ }
+ return 0;
+}
+
+static int
+main_writer_thread_finish(void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+ while (ctx->num_outstanding_messages != 0) {
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ return ret;
+ }
+ wimlib_assert(list_empty(&ctx->outstanding_streams));
+ return do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fp,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_func,
+ ctx->progress);
+}
+
+static int
+submit_stream_for_compression(struct wim_lookup_table_entry *lte,
+ struct main_writer_thread_ctx *ctx)
+{
+ int ret;
+
+ /* Read the entire stream @lte, feeding its data chunks to the
+ * compressor threads. Also SHA1-sum the stream; this is required in
+ * the case that @lte is unhashed, and a nice additional verification
+ * when @lte is already hashed. */
+ sha1_init(&ctx->next_sha_ctx);
+ ctx->next_chunk = 0;
+ ctx->next_num_chunks = wim_resource_chunks(lte);
+ ctx->next_lte = lte;
+ INIT_LIST_HEAD(<e->msg_list);
+ list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams);
+ ret = read_resource_prefix(lte, wim_resource_size(lte),
+ main_writer_thread_cb, ctx, 0);
+ if (ret == 0) {
+ wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
+ ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
+ }