+ ctx.out_fd = out_fd;
+ ctx.lookup_table = lookup_table;
+ ctx.out_ctype = out_ctype;
+ ctx.out_chunk_size = out_chunk_size;
+ ctx.write_resource_flags = write_resource_flags;
+ ctx.filter_ctx = filter_ctx;
+
+ if (out_chunk_size <= STACK_MAX) {
+ ctx.chunk_buf = alloca(out_chunk_size);
+ } else {
+ ctx.chunk_buf = MALLOC(out_chunk_size);
+ if (ctx.chunk_buf == NULL) {
+ ret = WIMLIB_ERR_NOMEM;
+ goto out_destroy_context;
+ }
+ }
+ ctx.chunk_buf_filled = 0;
+
+ compute_stream_list_stats(stream_list, &ctx);
+
+ ctx.progress_data.progress_func = progress_func;
+
+ ctx.num_bytes_to_compress = find_raw_copy_resources(stream_list,
+ write_resource_flags,
+ out_ctype,
+ out_chunk_size,
+ &raw_copy_resources);
+
+ DEBUG("Writing stream list "
+ "(offset = %"PRIu64", write_resource_flags=0x%08x, "
+ "out_ctype=%d, out_chunk_size=%u, num_threads=%u, "
+ "total_bytes=%"PRIu64", num_bytes_to_compress=%"PRIu64")",
+ out_fd->offset, write_resource_flags,
+ out_ctype, out_chunk_size, num_threads,
+ ctx.progress_data.progress.write_streams.total_bytes,
+ ctx.num_bytes_to_compress);
+
+ if (ctx.num_bytes_to_compress == 0) {
+ DEBUG("No compression needed; skipping to raw copy!");
+ goto out_write_raw_copy_resources;
+ }
+
+ /* Unless uncompressed output was required, allocate a chunk_compressor
+ * to do compression. There are serial and parallel implementations of
+ * the chunk_compressor interface. We default to parallel using the
+ * specified number of threads, unless the upper bound on the number
+ * bytes needing to be compressed is less 2000000 (heuristic value). */
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
+
+ if (ctx.num_bytes_to_compress >= 2000000) {
+ ret = new_parallel_chunk_compressor(out_ctype,
+ out_chunk_size,
+ num_threads, 0,
+ &ctx.compressor);
+ if (ret) {
+ DEBUG("Couldn't create parallel chunk compressor "
+ "(status %d)", ret);
+ }
+ }
+
+ if (ctx.compressor == NULL) {
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) {
+ ret = wimlib_lzx_alloc_context(out_chunk_size,
+ NULL,
+ comp_ctx);
+ if (ret)
+ goto out_destroy_context;
+ }
+ ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
+ *comp_ctx, &ctx.compressor);
+ if (ret)
+ goto out_destroy_context;
+ }
+ }
+
+ if (ctx.compressor)
+ ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
+ else
+ ctx.progress_data.progress.write_streams.num_threads = 1;
+
+ DEBUG("Actually using %u threads",
+ ctx.progress_data.progress.write_streams.num_threads);
+
+ INIT_LIST_HEAD(&ctx.pending_streams);
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
+ if (ret)
+ goto out_destroy_context;
+ }
+
+ /* Read the list of streams needing to be compressed, using the
+ * specified callbacks to execute processing of the data. */
+
+ struct read_stream_list_callbacks cbs = {
+ .begin_stream = write_stream_begin_read,
+ .begin_stream_ctx = &ctx,
+ .consume_chunk = write_stream_process_chunk,
+ .consume_chunk_ctx = &ctx,
+ .end_stream = write_stream_end_read,
+ .end_stream_ctx = &ctx,
+ };
+
+ ret = read_stream_list(stream_list,
+ offsetof(struct wim_lookup_table_entry, write_streams_list),
+ &cbs,
+ STREAM_LIST_ALREADY_SORTED |
+ VERIFY_STREAM_HASHES |
+ COMPUTE_MISSING_STREAM_HASHES);
+
+ if (ret)
+ goto out_destroy_context;
+
+ ret = finish_remaining_chunks(&ctx);
+ if (ret)
+ goto out_destroy_context;
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ struct wim_reshdr reshdr;
+ struct wim_lookup_table_entry *lte;
+ u64 offset_in_res;
+
+ ret = end_write_resource(&ctx, &reshdr);
+ if (ret)
+ goto out_destroy_context;
+
+ DEBUG("Ending packed resource: %lu %lu %lu.",
+ reshdr.offset_in_wim,
+ reshdr.size_in_wim,
+ reshdr.uncompressed_size);
+
+ offset_in_res = 0;
+ list_for_each_entry(lte, &ctx.pending_streams, write_streams_list) {
+ lte->out_reshdr.size_in_wim = lte->size;
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ lte->out_reshdr.flags |= WIM_RESHDR_FLAG_PACKED_STREAMS;
+ lte->out_reshdr.uncompressed_size = 0;
+ lte->out_reshdr.offset_in_wim = offset_in_res;
+ lte->out_res_offset_in_wim = reshdr.offset_in_wim;
+ lte->out_res_size_in_wim = reshdr.size_in_wim;
+ /*lte->out_res_uncompressed_size = reshdr.uncompressed_size;*/
+ offset_in_res += lte->size;
+ }
+ wimlib_assert(offset_in_res == reshdr.uncompressed_size);
+ }
+
+out_write_raw_copy_resources:
+ /* Copy any compressed resources for which the raw data can be reused
+ * without decompression. */
+ ret = write_raw_copy_resources(&raw_copy_resources, ctx.out_fd);
+
+out_destroy_context:
+ if (out_chunk_size > STACK_MAX)
+ FREE(ctx.chunk_buf);
+ FREE(ctx.chunk_csizes);
+ if (ctx.compressor)
+ ctx.compressor->destroy(ctx.compressor);
+ DEBUG("Done (ret=%d)", ret);
+ return ret;
+}
+
+static int
+write_wim_resource(struct wim_lookup_table_entry *lte,
+ struct filedes *out_fd,
+ int out_ctype,
+ u32 out_chunk_size,
+ int write_resource_flags,
+ struct wimlib_lzx_context **comp_ctx)
+{
+ LIST_HEAD(stream_list);
+ list_add(<e->write_streams_list, &stream_list);
+ lte->will_be_in_output_wim = 1;
+ return write_stream_list(&stream_list,
+ out_fd,
+ write_resource_flags & ~WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS,
+ out_ctype,
+ out_chunk_size,
+ 1,
+ NULL,
+ NULL,
+ comp_ctx,
+ NULL);