+ ctx.progress_data.progress.write_streams.num_threads = 1;
+
+ DEBUG("Actually using %u threads",
+ ctx.progress_data.progress.write_streams.num_threads);
+
+ INIT_LIST_HEAD(&ctx.pending_streams);
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
+ if (ret)
+ goto out_destroy_context;
+ }
+
+ /* Read the list of streams needing to be compressed, using the
+ * specified callbacks to execute processing of the data. */
+
+ struct read_stream_list_callbacks cbs = {
+ .begin_stream = write_stream_begin_read,
+ .begin_stream_ctx = &ctx,
+ .consume_chunk = write_stream_process_chunk,
+ .consume_chunk_ctx = &ctx,
+ .end_stream = write_stream_end_read,
+ .end_stream_ctx = &ctx,
+ };
+
+ ret = read_stream_list(stream_list,
+ offsetof(struct wim_lookup_table_entry, write_streams_list),
+ &cbs,
+ STREAM_LIST_ALREADY_SORTED |
+ VERIFY_STREAM_HASHES |
+ COMPUTE_MISSING_STREAM_HASHES);
+
+ if (ret)
+ goto out_destroy_context;
+
+ ret = finish_remaining_chunks(&ctx);
+ if (ret)
+ goto out_destroy_context;
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ struct wim_reshdr reshdr;
+ struct wim_lookup_table_entry *lte;
+ u64 offset_in_res;
+
+ ret = end_write_resource(&ctx, &reshdr);
+ if (ret)
+ goto out_destroy_context;
+
+ DEBUG("Ending packed resource: %lu %lu %lu.",
+ reshdr.offset_in_wim,
+ reshdr.size_in_wim,
+ reshdr.uncompressed_size);
+
+ offset_in_res = 0;
+ list_for_each_entry(lte, &ctx.pending_streams, write_streams_list) {
+ lte->out_reshdr.size_in_wim = lte->size;
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ lte->out_reshdr.flags |= WIM_RESHDR_FLAG_PACKED_STREAMS;
+ lte->out_reshdr.uncompressed_size = 0;
+ lte->out_reshdr.offset_in_wim = offset_in_res;
+ lte->out_res_offset_in_wim = reshdr.offset_in_wim;
+ lte->out_res_size_in_wim = reshdr.size_in_wim;
+ /*lte->out_res_uncompressed_size = reshdr.uncompressed_size;*/
+ offset_in_res += lte->size;
+ }
+ wimlib_assert(offset_in_res == reshdr.uncompressed_size);
+ }
+
+out_write_raw_copy_resources:
+ /* Copy any compressed resources for which the raw data can be reused
+ * without decompression. */
+ ret = write_raw_copy_resources(&raw_copy_resources, ctx.out_fd);
+
+out_destroy_context:
+ if (out_chunk_size > STACK_MAX)
+ FREE(ctx.chunk_buf);
+ FREE(ctx.chunk_csizes);
+ if (ctx.compressor)
+ ctx.compressor->destroy(ctx.compressor);
+ DEBUG("Done (ret=%d)", ret);
+ return ret;
+}
+
+static int
+write_wim_resource(struct wim_lookup_table_entry *lte,
+ struct filedes *out_fd,
+ int out_ctype,
+ u32 out_chunk_size,
+ int write_resource_flags,
+ struct wimlib_lzx_context **comp_ctx)
+{
+ LIST_HEAD(stream_list);
+ list_add(<e->write_streams_list, &stream_list);
+ lte->will_be_in_output_wim = 1;
+ return write_stream_list(&stream_list,
+ out_fd,
+ write_resource_flags & ~WIMLIB_WRITE_RESOURCE_FLAG_PACK_STREAMS,
+ out_ctype,
+ out_chunk_size,
+ 1,
+ NULL,
+ NULL,
+ comp_ctx,
+ NULL);
+}
+
+int
+write_wim_resource_from_buffer(const void *buf, size_t buf_size,
+ int reshdr_flags, struct filedes *out_fd,
+ int out_ctype,
+ u32 out_chunk_size,
+ struct wim_reshdr *out_reshdr,
+ u8 *hash,
+ int write_resource_flags,
+ struct wimlib_lzx_context **comp_ctx)
+{
+ int ret;
+ struct wim_lookup_table_entry *lte;
+
+ /* Set up a temporary lookup table entry to provide to
+ * write_wim_resource(). */
+
+ lte = new_lookup_table_entry();
+ if (lte == NULL)
+ return WIMLIB_ERR_NOMEM;
+
+ lte->resource_location = RESOURCE_IN_ATTACHED_BUFFER;
+ lte->attached_buffer = (void*)buf;
+ lte->size = buf_size;
+ lte->flags = reshdr_flags;
+
+ if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
+ sha1_buffer(buf, buf_size, lte->hash);
+ lte->unhashed = 0;
+ } else {
+ lte->unhashed = 1;
+ }
+
+ ret = write_wim_resource(lte, out_fd, out_ctype, out_chunk_size,
+ write_resource_flags, comp_ctx);
+ if (ret)
+ goto out_free_lte;
+
+ copy_reshdr(out_reshdr, <e->out_reshdr);
+
+ if (hash)
+ copy_hash(hash, lte->hash);
+ ret = 0;
+out_free_lte:
+ lte->resource_location = RESOURCE_NONEXISTENT;
+ free_lookup_table_entry(lte);
+ return ret;
+}
+
+struct stream_size_table {
+ struct hlist_head *array;
+ size_t num_entries;
+ size_t capacity;
+};
+
+static int
+init_stream_size_table(struct stream_size_table *tab, size_t capacity)
+{
+ tab->array = CALLOC(capacity, sizeof(tab->array[0]));
+ if (tab->array == NULL)
+ return WIMLIB_ERR_NOMEM;
+ tab->num_entries = 0;
+ tab->capacity = capacity;
+ return 0;
+}
+
+static void
+destroy_stream_size_table(struct stream_size_table *tab)
+{
+ FREE(tab->array);
+}
+
+static int
+stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
+{
+ struct stream_size_table *tab = _tab;
+ size_t pos;
+ struct wim_lookup_table_entry *same_size_lte;
+ struct hlist_node *tmp;
+
+ pos = hash_u64(lte->size) % tab->capacity;
+ lte->unique_size = 1;
+ hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
+ if (same_size_lte->size == lte->size) {
+ lte->unique_size = 0;
+ same_size_lte->unique_size = 0;
+ break;
+ }
+ }
+
+ hlist_add_head(<e->hash_list_2, &tab->array[pos]);
+ tab->num_entries++;
+ return 0;
+}
+
+struct find_streams_ctx {
+ WIMStruct *wim;
+ int write_flags;
+ struct list_head stream_list;
+ struct stream_size_table stream_size_tab;
+};
+
+static void
+reference_stream_for_write(struct wim_lookup_table_entry *lte,
+ struct list_head *stream_list, u32 nref)
+{
+ if (!lte->will_be_in_output_wim) {
+ lte->out_refcnt = 0;
+ list_add_tail(<e->write_streams_list, stream_list);
+ lte->will_be_in_output_wim = 1;
+ }
+ lte->out_refcnt += nref;
+}
+
+static int
+fully_reference_stream_for_write(struct wim_lookup_table_entry *lte,
+ void *_stream_list)
+{
+ struct list_head *stream_list = _stream_list;
+ lte->will_be_in_output_wim = 0;
+ reference_stream_for_write(lte, stream_list, lte->refcnt);
+ return 0;
+}
+
+static int
+inode_find_streams_to_reference(const struct wim_inode *inode,
+ const struct wim_lookup_table *table,
+ struct list_head *stream_list)
+{
+ struct wim_lookup_table_entry *lte;
+ unsigned i;
+
+ wimlib_assert(inode->i_nlink > 0);
+
+ for (i = 0; i <= inode->i_num_ads; i++) {
+ lte = inode_stream_lte(inode, i, table);
+ if (lte)
+ reference_stream_for_write(lte, stream_list,
+ inode->i_nlink);
+ else if (!is_zero_hash(inode_stream_hash(inode, i)))
+ return WIMLIB_ERR_RESOURCE_NOT_FOUND;
+ }
+ return 0;
+}
+
+static int
+do_stream_set_not_in_output_wim(struct wim_lookup_table_entry *lte, void *_ignore)
+{
+ lte->will_be_in_output_wim = 0;
+ return 0;
+}
+
+static int
+image_find_streams_to_reference(WIMStruct *wim)
+{
+ struct wim_image_metadata *imd;
+ struct wim_inode *inode;
+ struct wim_lookup_table_entry *lte;
+ struct list_head *stream_list;
+ int ret;
+
+ imd = wim_get_current_image_metadata(wim);
+
+ image_for_each_unhashed_stream(lte, imd)
+ lte->will_be_in_output_wim = 0;
+
+ stream_list = wim->private;
+ image_for_each_inode(inode, imd) {
+ ret = inode_find_streams_to_reference(inode,
+ wim->lookup_table,
+ stream_list);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int
+prepare_unfiltered_list_of_streams_in_output_wim(WIMStruct *wim,
+ int image,
+ int streams_ok,
+ struct list_head *stream_list_ret)
+{
+ int ret;
+
+ INIT_LIST_HEAD(stream_list_ret);
+
+ if (streams_ok && (image == WIMLIB_ALL_IMAGES ||
+ (image == 1 && wim->hdr.image_count == 1)))
+ {
+ /* Fast case: Assume that all streams are being written and
+ * that the reference counts are correct. */
+ struct wim_lookup_table_entry *lte;
+ struct wim_image_metadata *imd;
+ unsigned i;
+
+ for_lookup_table_entry(wim->lookup_table,
+ fully_reference_stream_for_write,
+ stream_list_ret);
+
+ for (i = 0; i < wim->hdr.image_count; i++) {
+ imd = wim->image_metadata[i];
+ image_for_each_unhashed_stream(lte, imd)
+ fully_reference_stream_for_write(lte, stream_list_ret);
+ }
+ } else {
+ /* Slow case: Walk through the images being written and
+ * determine the streams referenced. */
+ for_lookup_table_entry(wim->lookup_table,
+ do_stream_set_not_in_output_wim, NULL);
+ wim->private = stream_list_ret;
+ ret = for_image(wim, image, image_find_streams_to_reference);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct insert_other_if_hard_filtered_ctx {
+ struct stream_size_table *tab;
+ struct filter_context *filter_ctx;
+};
+
+static int
+insert_other_if_hard_filtered(struct wim_lookup_table_entry *lte, void *_ctx)
+{
+ struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
+
+ if (!lte->will_be_in_output_wim &&
+ stream_hard_filtered(lte, ctx->filter_ctx))
+ stream_size_table_insert(lte, ctx->tab);
+ return 0;
+}
+
+static int
+determine_stream_size_uniquity(struct list_head *stream_list,
+ struct wim_lookup_table *lt,
+ struct filter_context *filter_ctx)
+{
+ int ret;
+ struct stream_size_table tab;
+ struct wim_lookup_table_entry *lte;
+
+ ret = init_stream_size_table(&tab, lt->capacity);
+ if (ret)
+ return ret;
+
+ if (may_hard_filter_streams(filter_ctx)) {
+ struct insert_other_if_hard_filtered_ctx ctx = {
+ .tab = &tab,
+ .filter_ctx = filter_ctx,
+ };
+ for_lookup_table_entry(lt, insert_other_if_hard_filtered, &ctx);
+ }
+
+ list_for_each_entry(lte, stream_list, write_streams_list)
+ stream_size_table_insert(lte, &tab);
+
+ destroy_stream_size_table(&tab);
+ return 0;
+}
+
+static void
+filter_stream_list_for_write(struct list_head *stream_list,
+ struct filter_context *filter_ctx)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+
+ list_for_each_entry_safe(lte, tmp,
+ stream_list, write_streams_list)