+/* Write the next chunk of (typically compressed) data to the output WIM,
+ * handling the writing of the chunk table. */
+static int
+write_chunk(struct write_streams_ctx *ctx, const void *cchunk,
+ size_t csize, size_t usize)
+{
+ int ret;
+
+ struct wim_lookup_table_entry *lte;
+
+ lte = list_entry(ctx->pending_streams.next,
+ struct wim_lookup_table_entry, write_streams_list);
+
+ if (ctx->cur_write_res_offset == 0 &&
+ !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS))
+ {
+ /* Starting to write a new stream in non-packed mode. */
+
+ if (ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
+ int additional_reshdr_flags = 0;
+ if (ctx->compressor != NULL)
+ additional_reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
+
+ DEBUG("Writing pipable WIM stream header "
+ "(offset=%"PRIu64")", ctx->out_fd->offset);
+
+ ret = write_pwm_stream_header(lte, ctx->out_fd,
+ additional_reshdr_flags);
+ if (ret)
+ return ret;
+ }
+
+ ret = begin_write_resource(ctx, lte->size);
+ if (ret)
+ return ret;
+ }
+
+ if (ctx->compressor != NULL) {
+ /* Record the compresed chunk size. */
+ wimlib_assert(ctx->chunk_index < ctx->num_alloc_chunks);
+ ctx->chunk_csizes[ctx->chunk_index++] = csize;
+
+ /* If writing a pipable WIM, before the chunk data write a chunk
+ * header that provides the compressed chunk size. */
+ if (ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
+ struct pwm_chunk_hdr chunk_hdr = {
+ .compressed_size = cpu_to_le32(csize),
+ };
+ ret = full_write(ctx->out_fd, &chunk_hdr,
+ sizeof(chunk_hdr));
+ if (ret)
+ goto error;
+ }
+ }
+
+ /* Write the chunk data. */
+ ret = full_write(ctx->out_fd, cchunk, csize);
+ if (ret)
+ goto error;
+
+ ctx->cur_write_res_offset += usize;
+
+ do_write_streams_progress(&ctx->progress_data,
+ usize, false, lte);
+
+ if (ctx->cur_write_res_offset == ctx->cur_write_res_size &&
+ !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS))
+ {
+ struct wim_lookup_table_entry *lte;
+
+ lte = list_entry(ctx->pending_streams.next,
+ struct wim_lookup_table_entry, write_streams_list);
+ wimlib_assert(ctx->cur_write_res_offset == lte->size);
+
+ /* Finished writing a stream in non-packed mode. */
+
+ ret = end_write_resource(ctx, <e->out_reshdr);
+ if (ret)
+ return ret;
+
+ wimlib_assert(lte->out_reshdr.uncompressed_size == lte->size);
+
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ if (ctx->compressor != NULL)
+ lte->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
+
+ list_del(<e->write_streams_list);
+ ctx->cur_write_res_offset = 0;
+ }
+
+ return 0;
+
+error:
+ ERROR_WITH_ERRNO("Write error");
+ return ret;
+}
+
+static int
+submit_chunk_for_compression(struct write_streams_ctx *ctx,
+ const void *chunk, size_t size)
+{
+ /* While we are unable to submit the chunk for compression (due to too
+ * many chunks already outstanding), retrieve and write the next
+ * compressed chunk. */
+ while (!ctx->compressor->submit_chunk(ctx->compressor, chunk, size)) {
+ const void *cchunk;
+ unsigned csize;
+ unsigned usize;
+ bool bret;
+ int ret;
+
+ bret = ctx->compressor->get_chunk(ctx->compressor,
+ &cchunk, &csize, &usize);
+
+ wimlib_assert(bret);
+
+ ret = write_chunk(ctx, cchunk, csize, usize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Process the next chunk of data to be written to a WIM resource. */
+static int
+write_stream_process_chunk(const void *chunk, size_t size, void *_ctx)
+{
+ struct write_streams_ctx *ctx = _ctx;
+ int ret;
+ const u8 *chunkptr, *chunkend;
+
+ wimlib_assert(size != 0);
+
+ if (ctx->compressor == NULL) {
+ /* Write chunk uncompressed. */
+ ret = write_chunk(ctx, chunk, size, size);
+ if (ret)
+ return ret;
+ ctx->cur_read_res_offset += size;
+ return 0;
+ }
+
+ /* Submit the chunk for compression, but take into account that the
+ * @size the chunk was provided in may not correspond to the
+ * @out_chunk_size being used for compression. */
+ chunkptr = chunk;
+ chunkend = chunkptr + size;
+ do {
+ const u8 *resized_chunk;
+ size_t needed_chunk_size;
+
+ if (ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ needed_chunk_size = ctx->out_chunk_size;
+ } else {
+ u64 res_bytes_remaining;
+
+ res_bytes_remaining = ctx->cur_read_res_size -
+ ctx->cur_read_res_offset;
+ needed_chunk_size = min(ctx->out_chunk_size,
+ ctx->chunk_buf_filled +
+ res_bytes_remaining);
+ }
+
+ if (ctx->chunk_buf_filled == 0 &&
+ chunkend - chunkptr >= needed_chunk_size)
+ {
+ /* No intermediate buffering needed. */
+ resized_chunk = chunkptr;
+ chunkptr += needed_chunk_size;
+ ctx->cur_read_res_offset += needed_chunk_size;
+ } else {
+ /* Intermediate buffering needed. */
+ size_t bytes_consumed;
+
+ bytes_consumed = min(chunkend - chunkptr,
+ needed_chunk_size - ctx->chunk_buf_filled);
+
+ memcpy(&ctx->chunk_buf[ctx->chunk_buf_filled],
+ chunkptr, bytes_consumed);
+
+ resized_chunk = ctx->chunk_buf;
+
+ chunkptr += bytes_consumed;
+ ctx->cur_read_res_offset += bytes_consumed;
+ ctx->chunk_buf_filled += bytes_consumed;
+ if (ctx->chunk_buf_filled == needed_chunk_size) {
+ resized_chunk = ctx->chunk_buf;
+ ctx->chunk_buf_filled = 0;
+ } else {
+ break;
+ }
+
+ }
+
+ ret = submit_chunk_for_compression(ctx, resized_chunk,
+ needed_chunk_size);
+ if (ret)
+ return ret;
+
+ } while (chunkptr != chunkend);
+ return 0;
+}
+
+/* Finish processing a stream for writing. It may not have been completely
+ * written yet, as the chunk_compressor implementation may still have chunks
+ * buffered or being compressed. */
+static int
+write_stream_end_read(struct wim_lookup_table_entry *lte, int status, void *_ctx)
+{
+ struct write_streams_ctx *ctx = _ctx;
+ if (status == 0)
+ wimlib_assert(ctx->cur_read_res_offset == ctx->cur_read_res_size);
+ if (ctx->stream_was_duplicate) {
+ free_lookup_table_entry(lte);
+ } else if (lte->unhashed && ctx->lookup_table != NULL) {
+ list_del(<e->unhashed_list);
+ lookup_table_insert(ctx->lookup_table, lte);
+ lte->unhashed = 0;
+ }
+ return status;
+}
+
+/* Compute statistics about a list of streams that will be written.
+ *
+ * Assumes the streams are sorted such that all streams located in each distinct
+ * WIM (specified by WIMStruct) are together. */
+static void
+compute_stream_list_stats(struct list_head *stream_list,
+ struct write_streams_ctx *ctx)
+{
+ struct wim_lookup_table_entry *lte;
+ u64 total_bytes = 0;
+ u64 num_streams = 0;
+ u64 total_parts = 0;
+ WIMStruct *prev_wim_part = NULL;
+
+ list_for_each_entry(lte, stream_list, write_streams_list) {
+ num_streams++;
+ total_bytes += lte->size;
+ if (lte->resource_location == RESOURCE_IN_WIM) {
+ if (prev_wim_part != lte->rspec->wim) {
+ prev_wim_part = lte->rspec->wim;
+ total_parts++;
+ }
+ }
+ }
+ ctx->progress_data.progress.write_streams.total_bytes = total_bytes;
+ ctx->progress_data.progress.write_streams.total_streams = num_streams;
+ ctx->progress_data.progress.write_streams.completed_bytes = 0;
+ ctx->progress_data.progress.write_streams.completed_streams = 0;
+ ctx->progress_data.progress.write_streams.compression_type = ctx->out_ctype;
+ ctx->progress_data.progress.write_streams.total_parts = total_parts;
+ ctx->progress_data.progress.write_streams.completed_parts = 0;
+ ctx->progress_data.next_progress = 0;
+ ctx->progress_data.prev_wim_part = NULL;
+}
+
+/* Find streams in @stream_list that can be copied to the output WIM in raw form
+ * rather than compressed. Delete these streams from @stream_list, and move one
+ * per resource to @raw_copy_resources. Return the total uncompressed size of
+ * the streams that need to be compressed. */
+static u64
+find_raw_copy_resources(struct list_head *stream_list,
+ int write_resource_flags,
+ int out_ctype,
+ u32 out_chunk_size,
+ struct list_head *raw_copy_resources)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+ u64 num_bytes_to_compress = 0;
+
+ INIT_LIST_HEAD(raw_copy_resources);
+
+ /* Initialize temporary raw_copy_ok flag. */
+ list_for_each_entry(lte, stream_list, write_streams_list)
+ if (lte->resource_location == RESOURCE_IN_WIM)
+ lte->rspec->raw_copy_ok = 0;
+
+ list_for_each_entry_safe(lte, tmp, stream_list, write_streams_list) {
+ if (lte->resource_location == RESOURCE_IN_WIM &&
+ lte->rspec->raw_copy_ok)
+ {
+ list_del(<e->write_streams_list);
+ } else if (can_raw_copy(lte, write_resource_flags,
+ out_ctype, out_chunk_size))
+ {
+ lte->rspec->raw_copy_ok = 1;
+ list_move_tail(<e->write_streams_list,
+ raw_copy_resources);
+ } else {
+ num_bytes_to_compress += lte->size;
+ }
+ }
+
+ return num_bytes_to_compress;
+}
+
+/* Copy a raw compressed resource located in another WIM file to the WIM file
+ * being written. */
+static int
+write_raw_copy_resource(struct wim_resource_spec *in_rspec,
+ struct filedes *out_fd)
+{
+ u64 cur_read_offset;
+ u64 end_read_offset;
+ u8 buf[BUFFER_SIZE];
+ size_t bytes_to_read;
+ int ret;
+ struct filedes *in_fd;
+ struct wim_lookup_table_entry *lte;
+ u64 out_offset_in_wim;
+
+ DEBUG("Copying raw compressed data (size_in_wim=%"PRIu64", "
+ "uncompressed_size=%"PRIu64")",
+ in_rspec->size_in_wim, in_rspec->uncompressed_size);
+
+ /* Copy the raw data. */
+ cur_read_offset = in_rspec->offset_in_wim;
+ end_read_offset = cur_read_offset + in_rspec->size_in_wim;
+
+ out_offset_in_wim = out_fd->offset;
+
+ if (in_rspec->is_pipable) {
+ if (cur_read_offset < sizeof(struct pwm_stream_hdr))
+ return WIMLIB_ERR_INVALID_PIPABLE_WIM;
+ cur_read_offset -= sizeof(struct pwm_stream_hdr);
+ out_offset_in_wim += sizeof(struct pwm_stream_hdr);
+ }
+ in_fd = &in_rspec->wim->in_fd;
+ while (cur_read_offset != end_read_offset) {
+
+ bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
+
+ ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
+ if (ret)
+ return ret;
+
+ ret = full_write(out_fd, buf, bytes_to_read);
+ if (ret)
+ return ret;
+
+ cur_read_offset += bytes_to_read;
+ }
+
+ list_for_each_entry(lte, &in_rspec->stream_list, rspec_node) {
+ if (lte->will_be_in_output_wim) {
+ stream_set_out_reshdr_for_reuse(lte);
+ if (in_rspec->flags & WIM_RESHDR_FLAG_PACKED_STREAMS)
+ lte->out_res_offset_in_wim = out_offset_in_wim;
+ else
+ lte->out_reshdr.offset_in_wim = out_offset_in_wim;
+
+ }
+ }
+ return 0;
+}
+
+/* Copy a list of raw compressed resources located other WIM file(s) to the WIM
+ * file being written. */
+static int
+write_raw_copy_resources(struct list_head *raw_copy_resources,
+ struct filedes *out_fd)
+{
+ struct wim_lookup_table_entry *lte;
+ int ret;
+
+ list_for_each_entry(lte, raw_copy_resources, write_streams_list) {
+ ret = write_raw_copy_resource(lte->rspec, out_fd);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Wait for and write all chunks pending in the compressor. */
+static int
+finish_remaining_chunks(struct write_streams_ctx *ctx)
+{
+ const void *cdata;
+ unsigned csize;
+ unsigned usize;
+ int ret;
+
+ if (ctx->compressor == NULL)
+ return 0;
+
+ if (ctx->chunk_buf_filled != 0) {
+ ret = submit_chunk_for_compression(ctx, ctx->chunk_buf,
+ ctx->chunk_buf_filled);
+ if (ret)
+ return ret;
+ }
+
+ while (ctx->compressor->get_chunk(ctx->compressor, &cdata, &csize, &usize)) {
+ ret = write_chunk(ctx, cdata, csize, usize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void
+remove_zero_length_streams(struct list_head *stream_list)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+
+ list_for_each_entry_safe(lte, tmp, stream_list, write_streams_list) {
+ wimlib_assert(lte->will_be_in_output_wim);
+ if (lte->size == 0) {
+ list_del(<e->write_streams_list);
+ lte->out_reshdr.offset_in_wim = 0;
+ lte->out_reshdr.size_in_wim = 0;
+ lte->out_reshdr.uncompressed_size = 0;
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ }
+ }
+}
+
+static int
+write_stream_list(struct list_head *stream_list,
+ struct filedes *out_fd,
+ int write_resource_flags,
+ int out_ctype,
+ u32 out_chunk_size,
+ unsigned num_threads,
+ struct wim_lookup_table *lookup_table,
+ struct filter_context *filter_ctx,
+ struct wimlib_lzx_context **comp_ctx,
+ wimlib_progress_func_t progress_func)
+{
+ int ret;
+ struct write_streams_ctx ctx;
+ struct list_head raw_copy_resources;
+
+ remove_zero_length_streams(stream_list);
+
+ if (list_empty(stream_list)) {
+ DEBUG("No streams to write.");
+ return 0;
+ }
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ /* Pre-sorting the streams is required for compute_stream_list_stats().
+ * Afterwards, read_stream_list() need not sort them again. */
+ ret = sort_stream_list_by_sequential_order(stream_list,
+ offsetof(struct wim_lookup_table_entry,
+ write_streams_list));
+ if (ret)
+ return ret;
+
+ ctx.out_fd = out_fd;
+ ctx.lookup_table = lookup_table;
+ ctx.out_ctype = out_ctype;
+ ctx.out_chunk_size = out_chunk_size;
+ ctx.write_resource_flags = write_resource_flags;
+ ctx.filter_ctx = filter_ctx;
+
+ if (out_chunk_size <= STACK_MAX) {
+ ctx.chunk_buf = alloca(out_chunk_size);
+ } else {
+ ctx.chunk_buf = MALLOC(out_chunk_size);
+ if (ctx.chunk_buf == NULL) {
+ ret = WIMLIB_ERR_NOMEM;
+ goto out_destroy_context;
+ }
+ }
+ ctx.chunk_buf_filled = 0;
+
+ compute_stream_list_stats(stream_list, &ctx);
+
+ ctx.progress_data.progress_func = progress_func;
+
+ ctx.num_bytes_to_compress = find_raw_copy_resources(stream_list,
+ write_resource_flags,
+ out_ctype,
+ out_chunk_size,
+ &raw_copy_resources);
+
+ DEBUG("Writing stream list "
+ "(offset = %"PRIu64", write_resource_flags=0x%08x, "
+ "out_ctype=%d, out_chunk_size=%u, num_threads=%u, "
+ "total_bytes=%"PRIu64", num_bytes_to_compress=%"PRIu64")",
+ out_fd->offset, write_resource_flags,
+ out_ctype, out_chunk_size, num_threads,
+ ctx.progress_data.progress.write_streams.total_bytes,
+ ctx.num_bytes_to_compress);
+
+ if (ctx.num_bytes_to_compress == 0) {
+ DEBUG("No compression needed; skipping to raw copy!");
+ goto out_write_raw_copy_resources;
+ }
+
+ /* Unless uncompressed output was required, allocate a chunk_compressor
+ * to do compression. There are serial and parallel implementations of
+ * the chunk_compressor interface. We default to parallel using the
+ * specified number of threads, unless the upper bound on the number
+ * bytes needing to be compressed is less 2000000 (heuristic value). */
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
+
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZMS &&
+ ctx.lookup_table != NULL) {
+ WARNING("LZMS compression not implemented; data will "
+ "actually be written uncompressed.");
+ }
+
+ if (ctx.num_bytes_to_compress >= 2000000) {
+ ret = new_parallel_chunk_compressor(out_ctype,
+ out_chunk_size,
+ num_threads, 0,
+ &ctx.compressor);
+ if (ret) {
+ DEBUG("Couldn't create parallel chunk compressor "
+ "(status %d)", ret);
+ }
+ }
+
+ if (ctx.compressor == NULL) {
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) {
+ ret = wimlib_lzx_alloc_context(out_chunk_size,
+ NULL,
+ comp_ctx);
+ if (ret)
+ goto out_destroy_context;
+ }
+ ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
+ *comp_ctx, &ctx.compressor);
+ if (ret)
+ goto out_destroy_context;
+ }
+ }
+
+ if (ctx.compressor)
+ ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
+ else
+ ctx.progress_data.progress.write_streams.num_threads = 1;
+
+ DEBUG("Actually using %u threads",
+ ctx.progress_data.progress.write_streams.num_threads);
+
+ INIT_LIST_HEAD(&ctx.pending_streams);
+
+ if (ctx.progress_data.progress_func) {
+ (*ctx.progress_data.progress_func)(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+ &ctx.progress_data.progress);
+ }
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
+ if (ret)
+ goto out_destroy_context;
+ }
+
+ /* Read the list of streams needing to be compressed, using the
+ * specified callbacks to execute processing of the data. */
+
+ struct read_stream_list_callbacks cbs = {
+ .begin_stream = write_stream_begin_read,
+ .begin_stream_ctx = &ctx,
+ .consume_chunk = write_stream_process_chunk,
+ .consume_chunk_ctx = &ctx,
+ .end_stream = write_stream_end_read,
+ .end_stream_ctx = &ctx,
+ };
+
+ ret = read_stream_list(stream_list,
+ offsetof(struct wim_lookup_table_entry, write_streams_list),
+ &cbs,
+ STREAM_LIST_ALREADY_SORTED |
+ VERIFY_STREAM_HASHES |
+ COMPUTE_MISSING_STREAM_HASHES);
+
+ if (ret)
+ goto out_destroy_context;
+
+ ret = finish_remaining_chunks(&ctx);
+ if (ret)
+ goto out_destroy_context;
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ struct wim_reshdr reshdr;
+ struct wim_lookup_table_entry *lte;
+ u64 offset_in_res;
+
+ ret = end_write_resource(&ctx, &reshdr);
+ if (ret)
+ goto out_destroy_context;
+
+ DEBUG("Ending packed resource: %lu %lu %lu.",
+ reshdr.offset_in_wim,
+ reshdr.size_in_wim,
+ reshdr.uncompressed_size);
+
+ offset_in_res = 0;
+ list_for_each_entry(lte, &ctx.pending_streams, write_streams_list) {
+ lte->out_reshdr.size_in_wim = lte->size;
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ lte->out_reshdr.flags |= WIM_RESHDR_FLAG_PACKED_STREAMS;
+ lte->out_reshdr.uncompressed_size = 0;
+ lte->out_reshdr.offset_in_wim = offset_in_res;
+ lte->out_res_offset_in_wim = reshdr.offset_in_wim;
+ lte->out_res_size_in_wim = reshdr.size_in_wim;
+ /*lte->out_res_uncompressed_size = reshdr.uncompressed_size;*/
+ offset_in_res += lte->size;
+ }
+ wimlib_assert(offset_in_res == reshdr.uncompressed_size);
+ }
+
+out_write_raw_copy_resources:
+ /* Copy any compressed resources for which the raw data can be reused
+ * without decompression. */
+ ret = write_raw_copy_resources(&raw_copy_resources, ctx.out_fd);
+
+out_destroy_context:
+ if (out_chunk_size > STACK_MAX)
+ FREE(ctx.chunk_buf);
+ FREE(ctx.chunk_csizes);
+ if (ctx.compressor)
+ ctx.compressor->destroy(ctx.compressor);
+ DEBUG("Done (ret=%d)", ret);
+ return ret;
+}
+
+static int
+write_wim_resource(struct wim_lookup_table_entry *lte,
+ struct filedes *out_fd,
+ int out_ctype,
+ u32 out_chunk_size,
+ int write_resource_flags,
+ struct wimlib_lzx_context **comp_ctx)
+{
+ LIST_HEAD(stream_list);
+ list_add(<e->write_streams_list, &stream_list);
+ lte->will_be_in_output_wim = 1;
+ return write_stream_list(&stream_list,
+ out_fd,
+ write_resource_flags & ~WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS,
+ out_ctype,
+ out_chunk_size,
+ 1,
+ NULL,
+ NULL,
+ comp_ctx,
+ NULL);
+}
+
+int
+write_wim_resource_from_buffer(const void *buf, size_t buf_size,
+ int reshdr_flags, struct filedes *out_fd,
+ int out_ctype,
+ u32 out_chunk_size,
+ struct wim_reshdr *out_reshdr,
+ u8 *hash,
+ int write_resource_flags,
+ struct wimlib_lzx_context **comp_ctx)
+{
+ int ret;
+ struct wim_lookup_table_entry *lte;
+
+ /* Set up a temporary lookup table entry to provide to
+ * write_wim_resource(). */
+
+ lte = new_lookup_table_entry();
+ if (lte == NULL)
+ return WIMLIB_ERR_NOMEM;
+
+ lte->resource_location = RESOURCE_IN_ATTACHED_BUFFER;
+ lte->attached_buffer = (void*)buf;
+ lte->size = buf_size;
+ lte->flags = reshdr_flags;
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
+ sha1_buffer(buf, buf_size, lte->hash);
+ lte->unhashed = 0;
+ } else {
+ lte->unhashed = 1;
+ }
+
+ ret = write_wim_resource(lte, out_fd, out_ctype, out_chunk_size,
+ write_resource_flags, comp_ctx);
+ if (ret)
+ goto out_free_lte;
+
+ copy_reshdr(out_reshdr, <e->out_reshdr);
+
+ if (hash)
+ copy_hash(hash, lte->hash);
+ ret = 0;
+out_free_lte:
+ lte->resource_location = RESOURCE_NONEXISTENT;
+ free_lookup_table_entry(lte);
+ return ret;
+}
+
+struct stream_size_table {
+ struct hlist_head *array;
+ size_t num_entries;
+ size_t capacity;
+};
+
+static int
+init_stream_size_table(struct stream_size_table *tab, size_t capacity)
+{
+ tab->array = CALLOC(capacity, sizeof(tab->array[0]));
+ if (tab->array == NULL)
+ return WIMLIB_ERR_NOMEM;
+ tab->num_entries = 0;
+ tab->capacity = capacity;
+ return 0;
+}
+
+static void
+destroy_stream_size_table(struct stream_size_table *tab)
+{
+ FREE(tab->array);
+}
+
+static int
+stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
+{
+ struct stream_size_table *tab = _tab;
+ size_t pos;
+ struct wim_lookup_table_entry *same_size_lte;
+ struct hlist_node *tmp;
+
+ pos = hash_u64(lte->size) % tab->capacity;
+ lte->unique_size = 1;
+ hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
+ if (same_size_lte->size == lte->size) {
+ lte->unique_size = 0;
+ same_size_lte->unique_size = 0;
+ break;
+ }
+ }
+
+ hlist_add_head(<e->hash_list_2, &tab->array[pos]);
+ tab->num_entries++;
+ return 0;
+}
+
+struct find_streams_ctx {
+ WIMStruct *wim;
+ int write_flags;
+ struct list_head stream_list;
+ struct stream_size_table stream_size_tab;
+};
+
+static void
+reference_stream_for_write(struct wim_lookup_table_entry *lte,
+ struct list_head *stream_list, u32 nref)
+{
+ if (!lte->will_be_in_output_wim) {
+ lte->out_refcnt = 0;
+ list_add_tail(<e->write_streams_list, stream_list);
+ lte->will_be_in_output_wim = 1;
+ }
+ lte->out_refcnt += nref;
+}
+
+static int
+fully_reference_stream_for_write(struct wim_lookup_table_entry *lte,
+ void *_stream_list)
+{
+ struct list_head *stream_list = _stream_list;
+ lte->will_be_in_output_wim = 0;
+ reference_stream_for_write(lte, stream_list, lte->refcnt);
+ return 0;
+}
+
+static int
+inode_find_streams_to_reference(const struct wim_inode *inode,
+ const struct wim_lookup_table *table,
+ struct list_head *stream_list)
+{
+ struct wim_lookup_table_entry *lte;
+ unsigned i;
+
+ wimlib_assert(inode->i_nlink > 0);
+
+ for (i = 0; i <= inode->i_num_ads; i++) {
+ lte = inode_stream_lte(inode, i, table);
+ if (lte)
+ reference_stream_for_write(lte, stream_list,
+ inode->i_nlink);
+ else if (!is_zero_hash(inode_stream_hash(inode, i)))
+ return WIMLIB_ERR_RESOURCE_NOT_FOUND;
+ }
+ return 0;
+}
+
+static int
+do_stream_set_not_in_output_wim(struct wim_lookup_table_entry *lte, void *_ignore)
+{
+ lte->will_be_in_output_wim = 0;
+ return 0;
+}
+
+static int
+image_find_streams_to_reference(WIMStruct *wim)
+{
+ struct wim_image_metadata *imd;
+ struct wim_inode *inode;
+ struct wim_lookup_table_entry *lte;
+ struct list_head *stream_list;
+ int ret;
+
+ imd = wim_get_current_image_metadata(wim);
+
+ image_for_each_unhashed_stream(lte, imd)
+ lte->will_be_in_output_wim = 0;
+
+ stream_list = wim->private;
+ image_for_each_inode(inode, imd) {
+ ret = inode_find_streams_to_reference(inode,
+ wim->lookup_table,
+ stream_list);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int
+prepare_unfiltered_list_of_streams_in_output_wim(WIMStruct *wim,
+ int image,
+ int streams_ok,
+ struct list_head *stream_list_ret)
+{
+ int ret;
+
+ INIT_LIST_HEAD(stream_list_ret);
+
+ if (streams_ok && (image == WIMLIB_ALL_IMAGES ||
+ (image == 1 && wim->hdr.image_count == 1)))
+ {
+ /* Fast case: Assume that all streams are being written and
+ * that the reference counts are correct. */
+ struct wim_lookup_table_entry *lte;
+ struct wim_image_metadata *imd;
+ unsigned i;
+
+ for_lookup_table_entry(wim->lookup_table,
+ fully_reference_stream_for_write,
+ stream_list_ret);
+
+ for (i = 0; i < wim->hdr.image_count; i++) {
+ imd = wim->image_metadata[i];
+ image_for_each_unhashed_stream(lte, imd)
+ fully_reference_stream_for_write(lte, stream_list_ret);
+ }
+ } else {
+ /* Slow case: Walk through the images being written and
+ * determine the streams referenced. */
+ for_lookup_table_entry(wim->lookup_table,
+ do_stream_set_not_in_output_wim, NULL);
+ wim->private = stream_list_ret;
+ ret = for_image(wim, image, image_find_streams_to_reference);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct insert_other_if_hard_filtered_ctx {
+ struct stream_size_table *tab;
+ struct filter_context *filter_ctx;
+};
+
+static int
+insert_other_if_hard_filtered(struct wim_lookup_table_entry *lte, void *_ctx)
+{
+ struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
+
+ if (!lte->will_be_in_output_wim &&
+ stream_hard_filtered(lte, ctx->filter_ctx))
+ stream_size_table_insert(lte, ctx->tab);
+ return 0;
+}
+
+static int
+determine_stream_size_uniquity(struct list_head *stream_list,
+ struct wim_lookup_table *lt,
+ struct filter_context *filter_ctx)
+{
+ int ret;
+ struct stream_size_table tab;
+ struct wim_lookup_table_entry *lte;
+
+ ret = init_stream_size_table(&tab, lt->capacity);
+ if (ret)
+ return ret;
+
+ if (may_hard_filter_streams(filter_ctx)) {
+ struct insert_other_if_hard_filtered_ctx ctx = {
+ .tab = &tab,
+ .filter_ctx = filter_ctx,
+ };
+ for_lookup_table_entry(lt, insert_other_if_hard_filtered, &ctx);
+ }
+
+ list_for_each_entry(lte, stream_list, write_streams_list)
+ stream_size_table_insert(lte, &tab);
+
+ destroy_stream_size_table(&tab);
+ return 0;
+}
+
+static void
+filter_stream_list_for_write(struct list_head *stream_list,
+ struct filter_context *filter_ctx)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+
+ list_for_each_entry_safe(lte, tmp,
+ stream_list, write_streams_list)
+ {
+ int status = stream_filtered(lte, filter_ctx);
+
+ if (status == 0) {
+ /* Not filtered. */
+ continue;
+ } else {
+ if (status > 0) {
+ /* Soft filtered. */
+ } else {
+ /* Hard filtered. */
+ lte->will_be_in_output_wim = 0;
+ list_del(<e->lookup_table_list);
+ }
+ list_del(<e->write_streams_list);
+ }
+ }
+}
+
+/*
+ * prepare_stream_list_for_write() -
+ *
+ * Prepare the list of streams to write for writing a WIM containing the
+ * specified image(s) with the specified write flags.
+ *
+ * @wim
+ * The WIMStruct on whose behalf the write is occurring.
+ *
+ * @image
+ * Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES.
+ *
+ * @write_flags
+ * WIMLIB_WRITE_FLAG_* flags for the write operation:
+ *
+ * STREAMS_OK: For writes of all images, assume that all streams in the
+ * lookup table of @wim and the per-image lists of unhashed streams should
+ * be taken as-is, and image metadata should not be searched for
+ * references. This does not exclude filtering with OVERWRITE and
+ * SKIP_EXTERNAL_WIMS, below.
+ *
+ * OVERWRITE: Streams already present in @wim shall not be returned in
+ * @stream_list_ret.
+ *
+ * SKIP_EXTERNAL_WIMS: Streams already present in a WIM file, but not
+ * @wim, shall be be returned in neither @stream_list_ret nor
+ * @lookup_table_list_ret.
+ *
+ * @stream_list_ret
+ * List of streams, linked by write_streams_list, that need to be written
+ * will be returned here.
+ *
+ * Note that this function assumes that unhashed streams will be written;
+ * it does not take into account that they may become duplicates when
+ * actually hashed.
+ *
+ * @lookup_table_list_ret
+ * List of streams, linked by lookup_table_list, that need to be included
+ * in the WIM's lookup table will be returned here. This will be a
+ * superset of the streams in @stream_list_ret.
+ *
+ * This list will be a proper superset of @stream_list_ret if and only if
+ * WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
+ * the streams that would otherwise need to be written were already located
+ * in the WIM file.
+ *
+ * All streams in this list will have @out_refcnt set to the number of
+ * references to the stream in the output WIM. If
+ * WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt
+ * may be as low as 0.
+ *
+ * @filter_ctx_ret
+ * A context for queries of stream filter status with stream_filtered() is
+ * returned in this location.
+ *
+ * In addition, @will_be_in_output_wim will be set to 1 in all stream entries
+ * inserted into @lookup_table_list_ret and to 0 in all stream entries in the
+ * lookup table of @wim not inserted into @lookup_table_list_ret.
+ *
+ * Still furthermore, @unique_size will be set to 1 on all stream entries in
+ * @stream_list_ret that have unique size among all stream entries in
+ * @stream_list_ret and among all stream entries in the lookup table of @wim
+ * that are ineligible for being written due to filtering.
+ *
+ * Returns 0 on success; nonzero on read error, memory allocation error, or
+ * otherwise.