+ /* Write the chunk data. */
+ ret = full_write(ctx->out_fd, cchunk, csize);
+ if (ret)
+ goto error;
+
+ ctx->cur_write_res_offset += usize;
+
+ do_write_streams_progress(&ctx->progress_data,
+ usize, false, lte);
+
+ if (ctx->cur_write_res_offset == ctx->cur_write_res_size &&
+ !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS))
+ {
+ wimlib_assert(ctx->cur_write_res_offset == lte->size);
+
+ /* Finished writing a stream in non-packed mode. */
+
+ ret = end_write_resource(ctx, <e->out_reshdr);
+ if (ret)
+ return ret;
+
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ if (ctx->compressor != NULL)
+ lte->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
+
+ if (ctx->compressor != NULL &&
+ lte->out_reshdr.size_in_wim >= lte->out_reshdr.uncompressed_size &&
+ !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) &&
+ !(lte->flags & WIM_RESHDR_FLAG_PACKED_STREAMS))
+ {
+ /* Stream did not compress to less than its original
+ * size. If we're not writing a pipable WIM (which
+ * could mean the output file descriptor is
+ * non-seekable), and the stream isn't located in a
+ * resource pack (which would make reading it again
+ * costly), truncate the file to the start of the stream
+ * and write it uncompressed instead. */
+ DEBUG("Stream of size %"PRIu64" did not compress to "
+ "less than original size; writing uncompressed.",
+ lte->size);
+ ret = write_stream_uncompressed(lte, ctx->out_fd);
+ if (ret)
+ return ret;
+ }
+
+ wimlib_assert(lte->out_reshdr.uncompressed_size == lte->size);
+
+ list_del(<e->write_streams_list);
+ ctx->cur_write_res_offset = 0;
+ }
+
+ return 0;
+
+error:
+ ERROR_WITH_ERRNO("Write error");
+ return ret;
+}
+
+static int
+submit_chunk_for_compression(struct write_streams_ctx *ctx,
+ const void *chunk, size_t size)
+{
+ /* While we are unable to submit the chunk for compression (due to too
+ * many chunks already outstanding), retrieve and write the next
+ * compressed chunk. */
+ while (!ctx->compressor->submit_chunk(ctx->compressor, chunk, size)) {
+ const void *cchunk;
+ unsigned csize;
+ unsigned usize;
+ bool bret;
+ int ret;
+
+ bret = ctx->compressor->get_chunk(ctx->compressor,
+ &cchunk, &csize, &usize);
+
+ wimlib_assert(bret);
+
+ ret = write_chunk(ctx, cchunk, csize, usize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Process the next chunk of data to be written to a WIM resource. */
+static int
+write_stream_process_chunk(const void *chunk, size_t size, void *_ctx)
+{
+ struct write_streams_ctx *ctx = _ctx;
+ int ret;
+ const u8 *chunkptr, *chunkend;
+
+ wimlib_assert(size != 0);
+
+ if (ctx->compressor == NULL) {
+ /* Write chunk uncompressed. */
+ ret = write_chunk(ctx, chunk, size, size);
+ if (ret)
+ return ret;
+ ctx->cur_read_res_offset += size;
+ return 0;
+ }
+
+ /* Submit the chunk for compression, but take into account that the
+ * @size the chunk was provided in may not correspond to the
+ * @out_chunk_size being used for compression. */
+ chunkptr = chunk;
+ chunkend = chunkptr + size;
+ do {
+ const u8 *resized_chunk;
+ size_t needed_chunk_size;
+
+ if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ needed_chunk_size = ctx->out_chunk_size;
+ } else {
+ u64 res_bytes_remaining;
+
+ res_bytes_remaining = ctx->cur_read_res_size -
+ ctx->cur_read_res_offset;
+ needed_chunk_size = min(ctx->out_chunk_size,
+ ctx->chunk_buf_filled +
+ res_bytes_remaining);
+ }
+
+ if (ctx->chunk_buf_filled == 0 &&
+ chunkend - chunkptr >= needed_chunk_size)
+ {
+ /* No intermediate buffering needed. */
+ resized_chunk = chunkptr;
+ chunkptr += needed_chunk_size;
+ ctx->cur_read_res_offset += needed_chunk_size;
+ } else {
+ /* Intermediate buffering needed. */
+ size_t bytes_consumed;
+
+ bytes_consumed = min(chunkend - chunkptr,
+ needed_chunk_size - ctx->chunk_buf_filled);
+
+ memcpy(&ctx->chunk_buf[ctx->chunk_buf_filled],
+ chunkptr, bytes_consumed);
+
+ chunkptr += bytes_consumed;
+ ctx->cur_read_res_offset += bytes_consumed;
+ ctx->chunk_buf_filled += bytes_consumed;
+ if (ctx->chunk_buf_filled == needed_chunk_size) {
+ resized_chunk = ctx->chunk_buf;
+ ctx->chunk_buf_filled = 0;
+ } else {
+ break;
+ }
+
+ }
+
+ ret = submit_chunk_for_compression(ctx, resized_chunk,
+ needed_chunk_size);
+ if (ret)
+ return ret;
+
+ } while (chunkptr != chunkend);
+ return 0;
+}
+
+/* Finish processing a stream for writing. It may not have been completely
+ * written yet, as the chunk_compressor implementation may still have chunks
+ * buffered or being compressed. */
+static int
+write_stream_end_read(struct wim_lookup_table_entry *lte, int status, void *_ctx)
+{
+ struct write_streams_ctx *ctx = _ctx;
+ if (status == 0)
+ wimlib_assert(ctx->cur_read_res_offset == ctx->cur_read_res_size);
+ if (ctx->stream_was_duplicate) {
+ free_lookup_table_entry(lte);
+ } else if (lte->unhashed && ctx->lookup_table != NULL) {
+ list_del(<e->unhashed_list);
+ lookup_table_insert(ctx->lookup_table, lte);
+ lte->unhashed = 0;
+ }
+ return status;
+}
+
+/* Compute statistics about a list of streams that will be written.
+ *
+ * Assumes the streams are sorted such that all streams located in each distinct
+ * WIM (specified by WIMStruct) are together. */
+static void
+compute_stream_list_stats(struct list_head *stream_list,
+ struct write_streams_ctx *ctx)
+{
+ struct wim_lookup_table_entry *lte;
+ u64 total_bytes = 0;
+ u64 num_streams = 0;
+ u64 total_parts = 0;
+ WIMStruct *prev_wim_part = NULL;
+
+ list_for_each_entry(lte, stream_list, write_streams_list) {
+ num_streams++;
+ total_bytes += lte->size;
+ if (lte->resource_location == RESOURCE_IN_WIM) {
+ if (prev_wim_part != lte->rspec->wim) {
+ prev_wim_part = lte->rspec->wim;
+ total_parts++;
+ }
+ }
+ }
+ ctx->progress_data.progress.write_streams.total_bytes = total_bytes;
+ ctx->progress_data.progress.write_streams.total_streams = num_streams;
+ ctx->progress_data.progress.write_streams.completed_bytes = 0;
+ ctx->progress_data.progress.write_streams.completed_streams = 0;
+ ctx->progress_data.progress.write_streams.compression_type = ctx->out_ctype;
+ ctx->progress_data.progress.write_streams.total_parts = total_parts;
+ ctx->progress_data.progress.write_streams.completed_parts = 0;
+ ctx->progress_data.next_progress = 0;
+ ctx->progress_data.prev_wim_part = NULL;
+}
+
+/* Find streams in @stream_list that can be copied to the output WIM in raw form
+ * rather than compressed. Delete these streams from @stream_list, and move one
+ * per resource to @raw_copy_resources. Return the total uncompressed size of
+ * the streams that need to be compressed. */
+static u64
+find_raw_copy_resources(struct list_head *stream_list,
+ int write_resource_flags,
+ int out_ctype,
+ u32 out_chunk_size,
+ struct list_head *raw_copy_resources)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+ u64 num_bytes_to_compress = 0;
+
+ INIT_LIST_HEAD(raw_copy_resources);
+
+ /* Initialize temporary raw_copy_ok flag. */
+ list_for_each_entry(lte, stream_list, write_streams_list)
+ if (lte->resource_location == RESOURCE_IN_WIM)
+ lte->rspec->raw_copy_ok = 0;
+
+ list_for_each_entry_safe(lte, tmp, stream_list, write_streams_list) {
+ if (lte->resource_location == RESOURCE_IN_WIM &&
+ lte->rspec->raw_copy_ok)
+ {
+ list_del(<e->write_streams_list);
+ } else if (can_raw_copy(lte, write_resource_flags,
+ out_ctype, out_chunk_size))
+ {
+ lte->rspec->raw_copy_ok = 1;
+ list_move_tail(<e->write_streams_list,
+ raw_copy_resources);
+ } else {
+ num_bytes_to_compress += lte->size;
+ }
+ }
+
+ return num_bytes_to_compress;
+}
+
+/* Copy a raw compressed resource located in another WIM file to the WIM file
+ * being written. */
+static int
+write_raw_copy_resource(struct wim_resource_spec *in_rspec,
+ struct filedes *out_fd)
+{
+ u64 cur_read_offset;
+ u64 end_read_offset;
+ u8 buf[BUFFER_SIZE];
+ size_t bytes_to_read;
+ int ret;
+ struct filedes *in_fd;
+ struct wim_lookup_table_entry *lte;
+ u64 out_offset_in_wim;
+
+ DEBUG("Copying raw compressed data (size_in_wim=%"PRIu64", "
+ "uncompressed_size=%"PRIu64")",
+ in_rspec->size_in_wim, in_rspec->uncompressed_size);
+
+ /* Copy the raw data. */
+ cur_read_offset = in_rspec->offset_in_wim;
+ end_read_offset = cur_read_offset + in_rspec->size_in_wim;
+
+ out_offset_in_wim = out_fd->offset;
+
+ if (in_rspec->is_pipable) {
+ if (cur_read_offset < sizeof(struct pwm_stream_hdr))
+ return WIMLIB_ERR_INVALID_PIPABLE_WIM;
+ cur_read_offset -= sizeof(struct pwm_stream_hdr);
+ out_offset_in_wim += sizeof(struct pwm_stream_hdr);
+ }
+ in_fd = &in_rspec->wim->in_fd;
+ wimlib_assert(cur_read_offset != end_read_offset);
+ do {
+
+ bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
+
+ ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
+ if (ret)
+ return ret;
+
+ ret = full_write(out_fd, buf, bytes_to_read);
+ if (ret)
+ return ret;
+
+ cur_read_offset += bytes_to_read;
+
+ } while (cur_read_offset != end_read_offset);
+
+ list_for_each_entry(lte, &in_rspec->stream_list, rspec_node) {
+ if (lte->will_be_in_output_wim) {
+ stream_set_out_reshdr_for_reuse(lte);
+ if (in_rspec->flags & WIM_RESHDR_FLAG_PACKED_STREAMS)
+ lte->out_res_offset_in_wim = out_offset_in_wim;
+ else
+ lte->out_reshdr.offset_in_wim = out_offset_in_wim;
+
+ }
+ }
+ return 0;
+}
+
+/* Copy a list of raw compressed resources located other WIM file(s) to the WIM
+ * file being written. */
+static int
+write_raw_copy_resources(struct list_head *raw_copy_resources,
+ struct filedes *out_fd,
+ struct write_streams_progress_data *progress_data)
+{
+ struct wim_lookup_table_entry *lte;
+ int ret;
+
+ list_for_each_entry(lte, raw_copy_resources, write_streams_list) {
+ ret = write_raw_copy_resource(lte->rspec, out_fd);
+ if (ret)
+ return ret;
+ do_write_streams_progress(progress_data, lte->size, false, lte);
+ }
+ return 0;
+}
+
+/* Wait for and write all chunks pending in the compressor. */
+static int
+finish_remaining_chunks(struct write_streams_ctx *ctx)
+{
+ const void *cdata;
+ unsigned csize;
+ unsigned usize;
+ int ret;
+
+ if (ctx->compressor == NULL)
+ return 0;
+
+ if (ctx->chunk_buf_filled != 0) {
+ ret = submit_chunk_for_compression(ctx, ctx->chunk_buf,
+ ctx->chunk_buf_filled);
+ if (ret)
+ return ret;
+ }
+
+ while (ctx->compressor->get_chunk(ctx->compressor, &cdata, &csize, &usize)) {
+ ret = write_chunk(ctx, cdata, csize, usize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void
+remove_zero_length_streams(struct list_head *stream_list)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+
+ list_for_each_entry_safe(lte, tmp, stream_list, write_streams_list) {
+ wimlib_assert(lte->will_be_in_output_wim);
+ if (lte->size == 0) {
+ list_del(<e->write_streams_list);
+ lte->out_reshdr.offset_in_wim = 0;
+ lte->out_reshdr.size_in_wim = 0;
+ lte->out_reshdr.uncompressed_size = 0;
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ }
+ }
+}
+
+/*
+ * Write a list of streams to the output WIM file.
+ *
+ * @stream_list
+ * The list of streams to write, specifies a list of `struct
+ * wim_lookup_table_entry's linked by the 'write_streams_list' member.
+ *
+ * @out_fd
+ * The file descriptor, opened for writing, to which to write the streams.
+ *
+ * @write_resource_flags
+ * Flags to modify how the streams are written:
+ *
+ * WRITE_RESOURCE_FLAG_RECOMPRESS:
+ * Force compression of all resources, even if they could otherwise
+ * be re-used by caping the raw data, due to being located in a WIM
+ * file with compatible compression parameters.
+ *
+ * WRITE_RESOURCE_FLAG_PIPABLE:
+ * Write the resources in the wimlib-specific pipable format, and
+ * furthermore do so in such a way that no seeking backwards in
+ * @out_fd will be performed (so it may be a pipe, contrary to the
+ * default behavior).
+ *
+ * WRITE_RESOURCE_FLAG_PACK_STREAMS:
+ * Pack all the streams into a single resource rather than writing
+ * them in separate resources. This format is only valid if the
+ * WIM version number is WIM_VERSION_PACKED_STREAMS. This flag
+ * currently may not be combined with WRITE_RESOURCE_FLAG_PIPABLE.
+ *
+ * @out_ctype
+ * Compression format to use to write the output streams, specified as one
+ * of the WIMLIB_COMPRESSION_TYPE_* constants, excepting
+ * WIMLIB_COMPRESSION_TYPE_INVALID but including
+ * WIMLIB_COMPRESSION_TYPE_NONE.
+ *
+ * @out_chunk_size
+ * Chunk size to use to write the streams. It must be a valid chunk size
+ * for the specified compression format @out_ctype, unless @out_ctype is
+ * WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter is ignored.
+ *
+ * @num_threads
+ * Number of threads to use to compress data. If 0, a default number of
+ * threads will be chosen. The number of threads still may be decreased
+ * from the specified value if insufficient memory is detected.
+ *
+ * @lookup_table
+ * If on-the-fly deduplication of unhashed streams is desired, this
+ * parameter must be pointer to the lookup table for the WIMStruct on whose
+ * behalf the streams are being written. Otherwise, this parameter can be
+ * NULL.
+ *
+ * @filter_ctx
+ * If on-the-fly deduplication of unhashed streams is desired, this
+ * parameter can be a pointer to a context for stream filtering used to
+ * detect whether the duplicate stream has been hard-filtered or not. If
+ * no streams are hard-filtered or no streams are unhashed, this parameter
+ * can be NULL.
+ *
+ * @progress_func
+ * If non-NULL, a progress function that will be called periodically with
+ * WIMLIB_PROGRESS_MSG_WRITE_STREAMS messages. Note that on-the-fly
+ * deduplication of unhashed streams may result in the total bytes provided
+ * in the progress data to decrease from one message to the next.
+ *
+ * This function will write the streams in @stream_list to resources in
+ * consecutive positions in the output WIM file, or to a single packed resource
+ * if WRITE_RESOURCE_FLAG_PACK_STREAMS was specified in @write_resource_flags.
+ * In both cases, the @out_reshdr of the `struct wim_lookup_table_entry' for
+ * each stream written will be updated to specify its location, size, and flags
+ * in the output WIM. In the packed resource case,
+ * WIM_RESHDR_FLAG_PACKED_STREAMS shall be set in the @flags field of the
+ * @out_reshdr, and @out_res_offset_in_wim and @out_res_size_in_wim will also
+ * be set to the offset and size, respectively, in the output WIM of the full
+ * packed resource containing the corresponding stream.
+ *
+ * Each of the streams to write may be in any location supported by the
+ * resource-handling code (specifically, read_stream_list()), such as the
+ * contents of external file that has been logically added to the output WIM, or
+ * a stream in another WIM file that has been imported, or even stream in the
+ * "same" WIM file of which a modified copy is being written. In the case that
+ * a stream is already in a WIM file and uses compatible compression parameters,
+ * by default this function will re-use the raw data instead of decompressing
+ * it, then recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS
+ * specified in @write_resource_flags, this is not done.
+ *
+ * As a further requirement, this function requires that the
+ * @will_be_in_output_wim member be set on all streams in @stream_list as well
+ * as any other streams not in @stream_list that will be in the output WIM file,
+ * but not on any other streams in the output WIM's lookup table or sharing a
+ * packed resource with a stream in @stream_list. Still furthermore, if
+ * on-the-fly deduplication of streams is possible, then all streams in
+ * @stream_list must also be linked by @lookup_table_list along with any other
+ * streams that have @will_be_in_output_wim set.
+ *
+ * This function handles on-the-fly deduplication of streams for which SHA1
+ * message digests have not yet been calculated and it is therefore known
+ * whether such streams are already in @stream_list or in the WIM's lookup table
+ * at all. If @lookup_table is non-NULL, then each stream in @stream_list that
+ * has @unhashed set but not @unique_size set is checksummed immediately before
+ * it would otherwise be read for writing in order to determine if it is
+ * identical to another stream already being written or one that would be
+ * filtered out of the output WIM using stream_filtered() with the context
+ * @filter_ctx. Each such duplicate stream will be removed from @stream_list, its
+ * reference count transfered to the pre-existing duplicate stream, its memory
+ * freed, and will not be written. Alternatively, if a stream in @stream_list
+ * is a duplicate with any stream in @lookup_table that has not been marked for
+ * writing or would not be hard-filtered, it is freed and the pre-existing
+ * duplicate is written instead, taking ownership of the reference count and
+ * slot in the @lookup_table_list.
+ *
+ * Returns 0 if all streams were written successfully (or did not need to be
+ * written); otherwise a non-zero error code.
+ */
+static int
+write_stream_list(struct list_head *stream_list,
+ struct filedes *out_fd,
+ int write_resource_flags,
+ int out_ctype,
+ u32 out_chunk_size,
+ unsigned num_threads,
+ struct wim_lookup_table *lookup_table,
+ struct filter_context *filter_ctx,
+ wimlib_progress_func_t progress_func)
+{
+ int ret;
+ struct write_streams_ctx ctx;
+ struct list_head raw_copy_resources;
+
+ wimlib_assert((write_resource_flags &
+ (WRITE_RESOURCE_FLAG_PACK_STREAMS |
+ WRITE_RESOURCE_FLAG_PIPABLE)) !=
+ (WRITE_RESOURCE_FLAG_PACK_STREAMS |
+ WRITE_RESOURCE_FLAG_PIPABLE));
+
+ remove_zero_length_streams(stream_list);
+
+ if (list_empty(stream_list)) {
+ DEBUG("No streams to write.");
+ return 0;
+ }
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ /* Pre-sorting the streams is required for compute_stream_list_stats().
+ * Afterwards, read_stream_list() need not sort them again. */
+ ret = sort_stream_list_by_sequential_order(stream_list,
+ offsetof(struct wim_lookup_table_entry,
+ write_streams_list));
+ if (ret)
+ return ret;
+
+ ctx.out_fd = out_fd;
+ ctx.lookup_table = lookup_table;
+ ctx.out_ctype = out_ctype;
+ ctx.out_chunk_size = out_chunk_size;
+ ctx.write_resource_flags = write_resource_flags;
+ ctx.filter_ctx = filter_ctx;
+
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
+ wimlib_assert(out_chunk_size != 0);
+ if (out_chunk_size <= STACK_MAX) {
+ ctx.chunk_buf = alloca(out_chunk_size);
+ } else {
+ ctx.chunk_buf = MALLOC(out_chunk_size);
+ if (ctx.chunk_buf == NULL) {
+ ret = WIMLIB_ERR_NOMEM;
+ goto out_destroy_context;
+ }
+ }
+ }
+ ctx.chunk_buf_filled = 0;
+
+ compute_stream_list_stats(stream_list, &ctx);
+
+ ctx.progress_data.progress_func = progress_func;
+
+ ctx.num_bytes_to_compress = find_raw_copy_resources(stream_list,
+ write_resource_flags,
+ out_ctype,
+ out_chunk_size,
+ &raw_copy_resources);
+
+ DEBUG("Writing stream list "
+ "(offset = %"PRIu64", write_resource_flags=0x%08x, "
+ "out_ctype=%d, out_chunk_size=%u, num_threads=%u, "
+ "total_bytes=%"PRIu64", num_bytes_to_compress=%"PRIu64")",
+ out_fd->offset, write_resource_flags,
+ out_ctype, out_chunk_size, num_threads,
+ ctx.progress_data.progress.write_streams.total_bytes,
+ ctx.num_bytes_to_compress);
+
+ if (ctx.num_bytes_to_compress == 0) {
+ DEBUG("No compression needed; skipping to raw copy!");
+ goto out_write_raw_copy_resources;
+ }
+
+ /* Unless uncompressed output was required, allocate a chunk_compressor
+ * to do compression. There are serial and parallel implementations of
+ * the chunk_compressor interface. We default to parallel using the
+ * specified number of threads, unless the upper bound on the number
+ * bytes needing to be compressed is less 2000000 (heuristic value). */
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
+
+ #ifdef ENABLE_MULTITHREADED_COMPRESSION
+ if (ctx.num_bytes_to_compress >= 2000000) {
+ ret = new_parallel_chunk_compressor(out_ctype,
+ out_chunk_size,
+ num_threads, 0,
+ &ctx.compressor);
+ if (ret) {
+ DEBUG("Couldn't create parallel chunk compressor "
+ "(status %d)", ret);
+ }
+ }
+ #endif
+
+ if (ctx.compressor == NULL) {
+ ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
+ &ctx.compressor);
+ if (ret)
+ goto out_destroy_context;
+ }
+ }
+
+ if (ctx.compressor)
+ ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
+ else
+ ctx.progress_data.progress.write_streams.num_threads = 1;
+
+ DEBUG("Actually using %u threads",
+ ctx.progress_data.progress.write_streams.num_threads);
+
+ INIT_LIST_HEAD(&ctx.pending_streams);
+
+ if (ctx.progress_data.progress_func) {
+ (*ctx.progress_data.progress_func)(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+ &ctx.progress_data.progress);
+ }
+
+ if (write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
+ if (ret)
+ goto out_destroy_context;
+ }
+
+ /* Read the list of streams needing to be compressed, using the
+ * specified callbacks to execute processing of the data. */
+
+ struct read_stream_list_callbacks cbs = {
+ .begin_stream = write_stream_begin_read,
+ .begin_stream_ctx = &ctx,
+ .consume_chunk = write_stream_process_chunk,
+ .consume_chunk_ctx = &ctx,
+ .end_stream = write_stream_end_read,
+ .end_stream_ctx = &ctx,
+ };
+
+ ret = read_stream_list(stream_list,
+ offsetof(struct wim_lookup_table_entry, write_streams_list),
+ &cbs,
+ STREAM_LIST_ALREADY_SORTED |
+ VERIFY_STREAM_HASHES |
+ COMPUTE_MISSING_STREAM_HASHES);
+
+ if (ret)
+ goto out_destroy_context;
+
+ ret = finish_remaining_chunks(&ctx);
+ if (ret)
+ goto out_destroy_context;
+
+ if (write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ struct wim_reshdr reshdr;
+ struct wim_lookup_table_entry *lte;
+ u64 offset_in_res;
+
+ ret = end_write_resource(&ctx, &reshdr);
+ if (ret)
+ goto out_destroy_context;
+
+ DEBUG("Ending packed resource: %lu %lu %lu.",
+ reshdr.offset_in_wim,
+ reshdr.size_in_wim,
+ reshdr.uncompressed_size);
+
+ offset_in_res = 0;
+ list_for_each_entry(lte, &ctx.pending_streams, write_streams_list) {
+ lte->out_reshdr.size_in_wim = lte->size;
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ lte->out_reshdr.flags |= WIM_RESHDR_FLAG_PACKED_STREAMS;
+ lte->out_reshdr.uncompressed_size = 0;
+ lte->out_reshdr.offset_in_wim = offset_in_res;
+ lte->out_res_offset_in_wim = reshdr.offset_in_wim;
+ lte->out_res_size_in_wim = reshdr.size_in_wim;
+ /*lte->out_res_uncompressed_size = reshdr.uncompressed_size;*/
+ offset_in_res += lte->size;
+ }
+ wimlib_assert(offset_in_res == reshdr.uncompressed_size);
+ }
+
+out_write_raw_copy_resources:
+ /* Copy any compressed resources for which the raw data can be reused
+ * without decompression. */
+ ret = write_raw_copy_resources(&raw_copy_resources, ctx.out_fd,
+ &ctx.progress_data);
+
+out_destroy_context:
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE && out_chunk_size > STACK_MAX)
+ FREE(ctx.chunk_buf);
+ FREE(ctx.chunk_csizes);
+ if (ctx.compressor)
+ ctx.compressor->destroy(ctx.compressor);
+ DEBUG("Done (ret=%d)", ret);
+ return ret;
+}
+
+static int
+wim_write_stream_list(WIMStruct *wim,
+ struct list_head *stream_list,
+ int write_flags,
+ unsigned num_threads,
+ struct filter_context *filter_ctx,
+ wimlib_progress_func_t progress_func)
+{
+ int out_ctype;
+ u32 out_chunk_size;
+ int write_resource_flags;
+
+ write_resource_flags = write_flags_to_resource_flags(write_flags);
+
+ if (write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ out_chunk_size = wim->out_pack_chunk_size;
+ out_ctype = wim->out_pack_compression_type;
+ } else {
+ out_chunk_size = wim->out_chunk_size;
+ out_ctype = wim->out_compression_type;
+ }
+
+ return write_stream_list(stream_list,
+ &wim->out_fd,
+ write_resource_flags,
+ out_ctype,
+ out_chunk_size,
+ num_threads,
+ wim->lookup_table,
+ filter_ctx,
+ progress_func);
+}
+
+static int
+write_wim_resource(struct wim_lookup_table_entry *lte,
+ struct filedes *out_fd,
+ int out_ctype,
+ u32 out_chunk_size,
+ int write_resource_flags)
+{
+ LIST_HEAD(stream_list);
+ list_add(<e->write_streams_list, &stream_list);
+ lte->will_be_in_output_wim = 1;
+ return write_stream_list(&stream_list,
+ out_fd,
+ write_resource_flags & ~WRITE_RESOURCE_FLAG_PACK_STREAMS,
+ out_ctype,
+ out_chunk_size,
+ 1,
+ NULL,
+ NULL,
+ NULL);
+}
+
+int
+write_wim_resource_from_buffer(const void *buf, size_t buf_size,
+ int reshdr_flags, struct filedes *out_fd,
+ int out_ctype,
+ u32 out_chunk_size,
+ struct wim_reshdr *out_reshdr,
+ u8 *hash,
+ int write_resource_flags)
+{
+ int ret;
+ struct wim_lookup_table_entry *lte;
+
+ /* Set up a temporary lookup table entry to provide to
+ * write_wim_resource(). */
+
+ lte = new_lookup_table_entry();
+ if (lte == NULL)
+ return WIMLIB_ERR_NOMEM;
+
+ lte->resource_location = RESOURCE_IN_ATTACHED_BUFFER;
+ lte->attached_buffer = (void*)buf;
+ lte->size = buf_size;
+ lte->flags = reshdr_flags;
+
+ if (write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
+ sha1_buffer(buf, buf_size, lte->hash);
+ lte->unhashed = 0;
+ } else {
+ lte->unhashed = 1;
+ }
+
+ ret = write_wim_resource(lte, out_fd, out_ctype, out_chunk_size,
+ write_resource_flags);
+ if (ret)
+ goto out_free_lte;
+
+ copy_reshdr(out_reshdr, <e->out_reshdr);
+
+ if (hash)
+ copy_hash(hash, lte->hash);
+ ret = 0;
+out_free_lte:
+ lte->resource_location = RESOURCE_NONEXISTENT;
+ free_lookup_table_entry(lte);
+ return ret;
+}
+
+struct stream_size_table {
+ struct hlist_head *array;
+ size_t num_entries;
+ size_t capacity;
+};
+
+static int
+init_stream_size_table(struct stream_size_table *tab, size_t capacity)
+{
+ tab->array = CALLOC(capacity, sizeof(tab->array[0]));
+ if (tab->array == NULL)
+ return WIMLIB_ERR_NOMEM;
+ tab->num_entries = 0;
+ tab->capacity = capacity;
+ return 0;
+}
+
+static void
+destroy_stream_size_table(struct stream_size_table *tab)
+{
+ FREE(tab->array);
+}
+
+static int
+stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
+{
+ struct stream_size_table *tab = _tab;
+ size_t pos;
+ struct wim_lookup_table_entry *same_size_lte;
+ struct hlist_node *tmp;
+
+ pos = hash_u64(lte->size) % tab->capacity;
+ lte->unique_size = 1;
+ hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
+ if (same_size_lte->size == lte->size) {
+ lte->unique_size = 0;
+ same_size_lte->unique_size = 0;
+ break;
+ }
+ }
+
+ hlist_add_head(<e->hash_list_2, &tab->array[pos]);
+ tab->num_entries++;
+ return 0;
+}
+
+struct find_streams_ctx {
+ WIMStruct *wim;
+ int write_flags;
+ struct list_head stream_list;
+ struct stream_size_table stream_size_tab;
+};
+
+static void
+reference_stream_for_write(struct wim_lookup_table_entry *lte,
+ struct list_head *stream_list, u32 nref)
+{
+ if (!lte->will_be_in_output_wim) {
+ lte->out_refcnt = 0;
+ list_add_tail(<e->write_streams_list, stream_list);
+ lte->will_be_in_output_wim = 1;
+ }
+ lte->out_refcnt += nref;
+}
+
+static int
+fully_reference_stream_for_write(struct wim_lookup_table_entry *lte,
+ void *_stream_list)
+{
+ struct list_head *stream_list = _stream_list;
+ lte->will_be_in_output_wim = 0;
+ reference_stream_for_write(lte, stream_list, lte->refcnt);
+ return 0;
+}
+
+static int
+inode_find_streams_to_reference(const struct wim_inode *inode,
+ const struct wim_lookup_table *table,
+ struct list_head *stream_list)
+{
+ struct wim_lookup_table_entry *lte;
+ unsigned i;
+
+ wimlib_assert(inode->i_nlink > 0);
+
+ for (i = 0; i <= inode->i_num_ads; i++) {
+ lte = inode_stream_lte(inode, i, table);
+ if (lte)
+ reference_stream_for_write(lte, stream_list,
+ inode->i_nlink);
+ else if (!is_zero_hash(inode_stream_hash(inode, i)))
+ return WIMLIB_ERR_RESOURCE_NOT_FOUND;
+ }
+ return 0;
+}
+
+static int
+do_stream_set_not_in_output_wim(struct wim_lookup_table_entry *lte, void *_ignore)
+{
+ lte->will_be_in_output_wim = 0;
+ return 0;
+}
+
+static int
+image_find_streams_to_reference(WIMStruct *wim)
+{
+ struct wim_image_metadata *imd;
+ struct wim_inode *inode;
+ struct wim_lookup_table_entry *lte;
+ struct list_head *stream_list;
+ int ret;
+
+ imd = wim_get_current_image_metadata(wim);
+
+ image_for_each_unhashed_stream(lte, imd)
+ lte->will_be_in_output_wim = 0;
+
+ stream_list = wim->private;
+ image_for_each_inode(inode, imd) {
+ ret = inode_find_streams_to_reference(inode,
+ wim->lookup_table,
+ stream_list);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int
+prepare_unfiltered_list_of_streams_in_output_wim(WIMStruct *wim,
+ int image,
+ int streams_ok,
+ struct list_head *stream_list_ret)
+{
+ int ret;
+
+ INIT_LIST_HEAD(stream_list_ret);
+
+ if (streams_ok && (image == WIMLIB_ALL_IMAGES ||
+ (image == 1 && wim->hdr.image_count == 1)))
+ {
+ /* Fast case: Assume that all streams are being written and
+ * that the reference counts are correct. */
+ struct wim_lookup_table_entry *lte;
+ struct wim_image_metadata *imd;
+ unsigned i;
+
+ for_lookup_table_entry(wim->lookup_table,
+ fully_reference_stream_for_write,
+ stream_list_ret);
+
+ for (i = 0; i < wim->hdr.image_count; i++) {
+ imd = wim->image_metadata[i];
+ image_for_each_unhashed_stream(lte, imd)
+ fully_reference_stream_for_write(lte, stream_list_ret);
+ }
+ } else {
+ /* Slow case: Walk through the images being written and
+ * determine the streams referenced. */
+ for_lookup_table_entry(wim->lookup_table,
+ do_stream_set_not_in_output_wim, NULL);
+ wim->private = stream_list_ret;
+ ret = for_image(wim, image, image_find_streams_to_reference);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct insert_other_if_hard_filtered_ctx {
+ struct stream_size_table *tab;
+ struct filter_context *filter_ctx;
+};
+
+static int
+insert_other_if_hard_filtered(struct wim_lookup_table_entry *lte, void *_ctx)
+{
+ struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
+
+ if (!lte->will_be_in_output_wim &&
+ stream_hard_filtered(lte, ctx->filter_ctx))
+ stream_size_table_insert(lte, ctx->tab);
+ return 0;
+}
+
+static int
+determine_stream_size_uniquity(struct list_head *stream_list,
+ struct wim_lookup_table *lt,
+ struct filter_context *filter_ctx)
+{
+ int ret;
+ struct stream_size_table tab;
+ struct wim_lookup_table_entry *lte;
+
+ ret = init_stream_size_table(&tab, lt->capacity);
+ if (ret)
+ return ret;
+
+ if (may_hard_filter_streams(filter_ctx)) {
+ struct insert_other_if_hard_filtered_ctx ctx = {
+ .tab = &tab,
+ .filter_ctx = filter_ctx,
+ };
+ for_lookup_table_entry(lt, insert_other_if_hard_filtered, &ctx);
+ }
+
+ list_for_each_entry(lte, stream_list, write_streams_list)
+ stream_size_table_insert(lte, &tab);
+
+ destroy_stream_size_table(&tab);
+ return 0;
+}
+
+static void
+filter_stream_list_for_write(struct list_head *stream_list,
+ struct filter_context *filter_ctx)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+
+ list_for_each_entry_safe(lte, tmp,
+ stream_list, write_streams_list)
+ {
+ int status = stream_filtered(lte, filter_ctx);
+
+ if (status == 0) {
+ /* Not filtered. */
+ continue;
+ } else {
+ if (status > 0) {
+ /* Soft filtered. */
+ } else {
+ /* Hard filtered. */
+ lte->will_be_in_output_wim = 0;
+ list_del(<e->lookup_table_list);
+ }
+ list_del(<e->write_streams_list);
+ }
+ }
+}
+
+/*
+ * prepare_stream_list_for_write() -
+ *
+ * Prepare the list of streams to write for writing a WIM containing the
+ * specified image(s) with the specified write flags.
+ *
+ * @wim
+ * The WIMStruct on whose behalf the write is occurring.
+ *
+ * @image
+ * Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES.
+ *
+ * @write_flags
+ * WIMLIB_WRITE_FLAG_* flags for the write operation:
+ *
+ * STREAMS_OK: For writes of all images, assume that all streams in the
+ * lookup table of @wim and the per-image lists of unhashed streams should
+ * be taken as-is, and image metadata should not be searched for
+ * references. This does not exclude filtering with OVERWRITE and
+ * SKIP_EXTERNAL_WIMS, below.
+ *
+ * OVERWRITE: Streams already present in @wim shall not be returned in
+ * @stream_list_ret.
+ *
+ * SKIP_EXTERNAL_WIMS: Streams already present in a WIM file, but not
+ * @wim, shall be be returned in neither @stream_list_ret nor
+ * @lookup_table_list_ret.
+ *
+ * @stream_list_ret
+ * List of streams, linked by write_streams_list, that need to be written
+ * will be returned here.
+ *
+ * Note that this function assumes that unhashed streams will be written;
+ * it does not take into account that they may become duplicates when
+ * actually hashed.
+ *
+ * @lookup_table_list_ret
+ * List of streams, linked by lookup_table_list, that need to be included
+ * in the WIM's lookup table will be returned here. This will be a
+ * superset of the streams in @stream_list_ret.
+ *
+ * This list will be a proper superset of @stream_list_ret if and only if
+ * WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
+ * the streams that would otherwise need to be written were already located
+ * in the WIM file.
+ *
+ * All streams in this list will have @out_refcnt set to the number of
+ * references to the stream in the output WIM. If
+ * WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt
+ * may be as low as 0.
+ *
+ * @filter_ctx_ret
+ * A context for queries of stream filter status with stream_filtered() is
+ * returned in this location.
+ *
+ * In addition, @will_be_in_output_wim will be set to 1 in all stream entries
+ * inserted into @lookup_table_list_ret and to 0 in all stream entries in the
+ * lookup table of @wim not inserted into @lookup_table_list_ret.
+ *
+ * Still furthermore, @unique_size will be set to 1 on all stream entries in
+ * @stream_list_ret that have unique size among all stream entries in
+ * @stream_list_ret and among all stream entries in the lookup table of @wim
+ * that are ineligible for being written due to filtering.
+ *
+ * Returns 0 on success; nonzero on read error, memory allocation error, or
+ * otherwise.
+ */
+static int
+prepare_stream_list_for_write(WIMStruct *wim, int image,
+ int write_flags,
+ struct list_head *stream_list_ret,
+ struct list_head *lookup_table_list_ret,
+ struct filter_context *filter_ctx_ret)
+{
+ int ret;
+ struct wim_lookup_table_entry *lte;
+
+ filter_ctx_ret->write_flags = write_flags;
+ filter_ctx_ret->wim = wim;
+
+ ret = prepare_unfiltered_list_of_streams_in_output_wim(
+ wim,
+ image,
+ write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK,
+ stream_list_ret);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(lookup_table_list_ret);
+ list_for_each_entry(lte, stream_list_ret, write_streams_list)
+ list_add_tail(<e->lookup_table_list, lookup_table_list_ret);
+
+ ret = determine_stream_size_uniquity(stream_list_ret, wim->lookup_table,
+ filter_ctx_ret);
+ if (ret)
+ return ret;
+
+ if (may_filter_streams(filter_ctx_ret))
+ filter_stream_list_for_write(stream_list_ret, filter_ctx_ret);
+
+ return 0;
+}
+
+static int
+write_wim_streams(WIMStruct *wim, int image, int write_flags,
+ unsigned num_threads,
+ wimlib_progress_func_t progress_func,
+ struct list_head *stream_list_override,
+ struct list_head *lookup_table_list_ret)
+{
+ int ret;
+ struct list_head _stream_list;
+ struct list_head *stream_list;
+ struct wim_lookup_table_entry *lte;
+ struct filter_context _filter_ctx;
+ struct filter_context *filter_ctx;
+
+ if (stream_list_override == NULL) {
+ /* Normal case: prepare stream list from image(s) being written.
+ */
+ stream_list = &_stream_list;
+ filter_ctx = &_filter_ctx;
+ ret = prepare_stream_list_for_write(wim, image, write_flags,
+ stream_list,
+ lookup_table_list_ret,
+ filter_ctx);
+ if (ret)
+ return ret;
+ } else {
+ /* Currently only as a result of wimlib_split() being called:
+ * use stream list already explicitly provided. Use existing
+ * reference counts. */
+ stream_list = stream_list_override;
+ filter_ctx = NULL;
+ INIT_LIST_HEAD(lookup_table_list_ret);
+ list_for_each_entry(lte, stream_list, write_streams_list) {
+ lte->out_refcnt = lte->refcnt;
+ lte->will_be_in_output_wim = 1;
+ lte->unique_size = 0;
+ list_add_tail(<e->lookup_table_list, lookup_table_list_ret);
+ }
+ }
+
+ return wim_write_stream_list(wim,
+ stream_list,
+ write_flags,
+ num_threads,
+ filter_ctx,
+ progress_func);
+}
+
+static int
+write_wim_metadata_resources(WIMStruct *wim, int image, int write_flags,
+ wimlib_progress_func_t progress_func)
+{
+ int ret;
+ int start_image;
+ int end_image;
+ int write_resource_flags;
+
+ if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA) {
+ DEBUG("Not writing any metadata resources.");
+ return 0;
+ }
+
+ write_resource_flags = write_flags_to_resource_flags(write_flags);
+
+ write_resource_flags &= ~WRITE_RESOURCE_FLAG_PACK_STREAMS;
+
+ DEBUG("Writing metadata resources (offset=%"PRIu64")",
+ wim->out_fd.offset);
+
+ if (progress_func)
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL);
+
+ if (image == WIMLIB_ALL_IMAGES) {
+ start_image = 1;
+ end_image = wim->hdr.image_count;
+ } else {
+ start_image = image;
+ end_image = image;
+ }
+
+ for (int i = start_image; i <= end_image; i++) {
+ struct wim_image_metadata *imd;
+
+ imd = wim->image_metadata[i - 1];
+ /* Build a new metadata resource only if image was modified from
+ * the original (or was newly added). Otherwise just copy the
+ * existing one. */
+ if (imd->modified) {
+ DEBUG("Image %u was modified; building and writing new "
+ "metadata resource", i);
+ ret = write_metadata_resource(wim, i,
+ write_resource_flags);
+ } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
+ DEBUG("Image %u was not modified; re-using existing "
+ "metadata resource.", i);
+ stream_set_out_reshdr_for_reuse(imd->metadata_lte);
+ ret = 0;
+ } else {
+ DEBUG("Image %u was not modified; copying existing "
+ "metadata resource.", i);
+ ret = write_wim_resource(imd->metadata_lte,
+ &wim->out_fd,
+ wim->out_compression_type,
+ wim->out_chunk_size,
+ write_resource_flags);
+ }
+ if (ret)
+ return ret;
+ }
+ if (progress_func)
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL);
+ return 0;
+}
+
+static int
+open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
+{
+ int raw_fd;
+ DEBUG("Opening \"%"TS"\" for writing.", path);
+
+ raw_fd = topen(path, open_flags | O_BINARY, 0644);
+ if (raw_fd < 0) {
+ ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
+ return WIMLIB_ERR_OPEN;
+ }
+ filedes_init(&wim->out_fd, raw_fd);
+ return 0;
+}
+
+static int
+close_wim_writable(WIMStruct *wim, int write_flags)
+{
+ int ret = 0;
+
+ if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)) {
+ DEBUG("Closing WIM file.");
+ if (filedes_valid(&wim->out_fd))
+ if (filedes_close(&wim->out_fd))
+ ret = WIMLIB_ERR_WRITE;
+ }
+ filedes_invalidate(&wim->out_fd);
+ return ret;
+}
+
+static int
+cmp_streams_by_out_rspec(const void *p1, const void *p2)
+{
+ const struct wim_lookup_table_entry *lte1, *lte2;
+
+ lte1 = *(const struct wim_lookup_table_entry**)p1;
+ lte2 = *(const struct wim_lookup_table_entry**)p2;
+
+ if (lte1->out_reshdr.flags & WIM_RESHDR_FLAG_PACKED_STREAMS) {
+ if (lte2->out_reshdr.flags & WIM_RESHDR_FLAG_PACKED_STREAMS) {
+ if (lte1->out_res_offset_in_wim != lte2->out_res_offset_in_wim)
+ return cmp_u64(lte1->out_res_offset_in_wim,
+ lte2->out_res_offset_in_wim);
+ } else {
+ return 1;
+ }
+ } else {
+ if (lte2->out_reshdr.flags & WIM_RESHDR_FLAG_PACKED_STREAMS)
+ return -1;