+ struct write_streams_ctx ctx;
+ struct list_head raw_copy_resources;
+
+ remove_zero_length_streams(stream_list);
+
+ if (list_empty(stream_list)) {
+ DEBUG("No streams to write.");
+ return 0;
+ }
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ /* Pre-sorting the streams is required for compute_stream_list_stats().
+ * Afterwards, read_stream_list() need not sort them again. */
+ ret = sort_stream_list_by_sequential_order(stream_list,
+ offsetof(struct wim_lookup_table_entry,
+ write_streams_list));
+ if (ret)
+ return ret;
+
+ ctx.out_fd = out_fd;
+ ctx.lookup_table = lookup_table;
+ ctx.out_ctype = out_ctype;
+ ctx.out_chunk_size = out_chunk_size;
+ ctx.write_resource_flags = write_resource_flags;
+ ctx.filter_ctx = filter_ctx;
+
+ if (out_chunk_size <= STACK_MAX) {
+ ctx.chunk_buf = alloca(out_chunk_size);
+ } else {
+ ctx.chunk_buf = MALLOC(out_chunk_size);
+ if (ctx.chunk_buf == NULL) {
+ ret = WIMLIB_ERR_NOMEM;
+ goto out_destroy_context;
+ }
+ }
+ ctx.chunk_buf_filled = 0;
+
+ compute_stream_list_stats(stream_list, &ctx);
+
+ ctx.progress_data.progress_func = progress_func;
+
+ ctx.num_bytes_to_compress = find_raw_copy_resources(stream_list,
+ write_resource_flags,
+ out_ctype,
+ out_chunk_size,
+ &raw_copy_resources);
+
+ DEBUG("Writing stream list "
+ "(offset = %"PRIu64", write_resource_flags=0x%08x, "
+ "out_ctype=%d, out_chunk_size=%u, num_threads=%u, "
+ "total_bytes=%"PRIu64", num_bytes_to_compress=%"PRIu64")",
+ out_fd->offset, write_resource_flags,
+ out_ctype, out_chunk_size, num_threads,
+ ctx.progress_data.progress.write_streams.total_bytes,
+ ctx.num_bytes_to_compress);
+
+ if (ctx.num_bytes_to_compress == 0) {
+ DEBUG("No compression needed; skipping to raw copy!");
+ goto out_write_raw_copy_resources;
+ }
+
+ /* Unless uncompressed output was required, allocate a chunk_compressor
+ * to do compression. There are serial and parallel implementations of
+ * the chunk_compressor interface. We default to parallel using the
+ * specified number of threads, unless the upper bound on the number
+ * bytes needing to be compressed is less 2000000 (heuristic value). */
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
+
+ if (ctx.num_bytes_to_compress >= 2000000) {
+ ret = new_parallel_chunk_compressor(out_ctype,
+ out_chunk_size,
+ num_threads, 0,
+ &ctx.compressor);
+ if (ret) {
+ DEBUG("Couldn't create parallel chunk compressor "
+ "(status %d)", ret);
+ }
+ }
+
+ if (ctx.compressor == NULL) {
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) {
+ ret = wimlib_lzx_alloc_context(out_chunk_size,
+ NULL,
+ comp_ctx);
+ if (ret)
+ goto out_destroy_context;
+ }
+ ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
+ *comp_ctx, &ctx.compressor);
+ if (ret)
+ goto out_destroy_context;
+ }
+ }
+
+ if (ctx.compressor)
+ ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
+ else
+ ctx.progress_data.progress.write_streams.num_threads = 1;
+
+ DEBUG("Actually using %u threads",
+ ctx.progress_data.progress.write_streams.num_threads);