+ /* Was this the last chunk of the stream? If so, finish
+ * it. */
+ if (list_empty(&cur_lte->msg_list) &&
+ msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
+ {
+ DEBUG2("Finish wim chunk tab");
+ u64 res_csize;
+ ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
+ ctx->out_fp,
+ &res_csize);
+ if (ret)
+ return ret;
+
+ list_del(&cur_lte->being_compressed_list);
+#if 0
+ if (res_csize >= wim_resource_size(cur_lte)) {
+ /* Oops! We compressed the resource to
+ * larger than the original size. Write
+ * the resource uncompressed instead. */
+ ret = write_uncompressed_resource_and_truncate(
+ cur_lte,
+ ctx->out_fp,
+ ctx->cur_chunk_tab->file_offset,
+ &cur_lte->output_resource_entry);
+ if (ret)
+ goto out;
+ } else
+#endif
+ {
+ cur_lte->output_resource_entry.size =
+ res_csize;
+
+ cur_lte->output_resource_entry.original_size =
+ cur_lte->resource_entry.original_size;
+
+ cur_lte->output_resource_entry.offset =
+ ctx->cur_chunk_tab->file_offset;
+
+ cur_lte->output_resource_entry.flags =
+ cur_lte->resource_entry.flags |
+ WIM_RESHDR_FLAG_COMPRESSED;
+ }
+ do_write_streams_progress(ctx->progress, ctx->progress_func,
+ wim_resource_size(cur_lte));
+ FREE(ctx->cur_chunk_tab);
+ ctx->cur_chunk_tab = NULL;
+
+ /* Since we just finished writing a stream, write any
+ * streams that have been added to the serial_streams
+ * list for direct writing by the main thread (e.g.
+ * resources that don't need to be compressed because
+ * the desired compression type is the same as the
+ * previous compression type). */
+ ret = do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fp,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_func,
+ ctx->progress);
+ if (ret)
+ return ret;
+ if (list_empty(&ctx->outstanding_streams)) {
+ cur_lte = NULL;
+ } else {
+ cur_lte = container_of(ctx->outstanding_streams.next,
+ struct wim_lookup_table_entry,
+ being_compressed_list);
+ #ifdef ENABLE_MORE_DEBUG
+ DEBUG2("Advance to stream:");
+ print_lookup_table_entry(cur_lte, stderr);
+ #endif
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+ struct message *next_msg;
+ u64 next_chunk_in_msg;
+
+ DEBUG2("chunk_size=%zu, wim_resource_size(next_lte)=%"PRIu64,
+ chunk_size, wim_resource_size(ctx->next_lte));
+
+ sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
+ next_msg = ctx->next_msg;
+ if (!next_msg) {
+ /* Start filling in a new message */
+
+ DEBUG2("Start new msg");
+
+ while (list_empty(&ctx->available_msgs)) {
+ /* No message available; receive messages, writing
+ * compressed data. */
+ DEBUG2("No msgs available!");
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ return ret;
+ }
+
+ next_msg = container_of(ctx->available_msgs.next,
+ struct message, list);
+ list_del(&next_msg->list);
+ next_msg->complete = false;
+ next_msg->begin_chunk = ctx->next_chunk;
+ next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
+ ctx->next_num_chunks - ctx->next_chunk);
+ DEBUG2("next_msg {begin_chunk=%"PRIu64", num_chunks=%"PRIu64"}",
+ next_msg->begin_chunk, next_msg->num_chunks);
+ ctx->next_msg = next_msg;
+ }
+
+ next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
+
+ /* Fill in the next chunk to compress */
+ next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
+ memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
+ chunk, chunk_size);
+ ctx->next_chunk++;
+ if (++next_chunk_in_msg == next_msg->num_chunks) {
+ DEBUG2("Sending message %p", next_msg);
+ /* Send off an array of chunks to compress */
+ list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
+ shared_queue_put(ctx->res_to_compress_queue, next_msg);
+ ++ctx->num_outstanding_messages;
+ ctx->next_msg = NULL;
+ }
+ return 0;
+}
+
+static int
+main_writer_thread_finish(void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+ DEBUG2("finishing");
+ while (ctx->num_outstanding_messages != 0) {
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ return ret;
+ }
+ wimlib_assert(list_empty(&ctx->outstanding_streams));
+ return do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fp,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_func,
+ ctx->progress);
+}
+
+static int
+submit_stream_for_compression(struct wim_lookup_table_entry *lte,
+ struct main_writer_thread_ctx *ctx)
+{
+ int ret;
+
+#ifdef ENABLE_MORE_DEBUG
+ DEBUG2("Submit for compression:");
+ print_lookup_table_entry(lte, stderr);
+#endif
+
+ sha1_init(&ctx->next_sha_ctx);
+ ctx->next_chunk = 0;
+ ctx->next_num_chunks = wim_resource_chunks(lte);
+ ctx->next_lte = lte;
+ INIT_LIST_HEAD(<e->msg_list);
+ list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams);
+ ret = read_resource_prefix(lte, wim_resource_size(lte),
+ main_writer_thread_cb, ctx, 0);
+ if (ret == 0) {
+ wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
+ ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
+ }
+ return ret;
+}
+
+static int
+main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+
+ if (wim_resource_size(lte) < 1000 ||
+ ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
+ (lte->resource_location == RESOURCE_IN_WIM &&
+ !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
+ wimlib_get_compression_type(lte->wim) == ctx->out_ctype))
+ {
+ list_add_tail(<e->write_streams_list, &ctx->serial_streams);
+ lte->deferred = 1;
+ ret = 0;
+ } else {
+ ret = submit_stream_for_compression(lte, ctx);
+ }
+ return ret;
+}
+
+static long
+get_default_num_threads()
+{
+#ifdef __WIN32__
+ return win32_get_number_of_processors();
+#else
+ return sysconf(_SC_NPROCESSORS_ONLN);
+#endif
+}
+
+static int
+write_stream_list_parallel(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp,
+ int out_ctype,
+ int write_resource_flags,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress,
+ unsigned num_threads)
+{
+ int ret;
+ struct shared_queue res_to_compress_queue;
+ struct shared_queue compressed_res_queue;
+ pthread_t *compressor_threads = NULL;
+
+ if (num_threads == 0) {
+ long nthreads = get_default_num_threads();
+ if (nthreads < 1 || nthreads > UINT_MAX) {
+ WARNING("Could not determine number of processors! Assuming 1");
+ goto out_serial;
+ } else {
+ num_threads = nthreads;
+ }
+ }
+
+ progress->write_streams.num_threads = num_threads;
+
+ static const size_t MESSAGES_PER_THREAD = 2;
+ size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
+
+ DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
+
+ ret = shared_queue_init(&res_to_compress_queue, queue_size);
+ if (ret)
+ goto out_serial;
+
+ ret = shared_queue_init(&compressed_res_queue, queue_size);
+ if (ret)
+ goto out_destroy_res_to_compress_queue;
+
+ struct compressor_thread_params params;
+ params.res_to_compress_queue = &res_to_compress_queue;
+ params.compressed_res_queue = &compressed_res_queue;
+ params.compress = get_compress_func(out_ctype);
+
+ compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
+ if (!compressor_threads) {
+ ret = WIMLIB_ERR_NOMEM;
+ goto out_destroy_compressed_res_queue;
+ }
+
+ for (unsigned i = 0; i < num_threads; i++) {
+ DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
+ ret = pthread_create(&compressor_threads[i], NULL,
+ compressor_thread_proc, ¶ms);
+ if (ret != 0) {
+ ret = -1;
+ ERROR_WITH_ERRNO("Failed to create compressor "
+ "thread %u of %u",
+ i + 1, num_threads);
+ num_threads = i;
+ goto out_join;
+ }
+ }
+
+ if (progress_func)
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
+
+ struct main_writer_thread_ctx ctx;
+ ctx.stream_list = stream_list;
+ ctx.lookup_table = lookup_table;
+ ctx.out_fp = out_fp;
+ ctx.out_ctype = out_ctype;
+ ctx.res_to_compress_queue = &res_to_compress_queue;
+ ctx.compressed_res_queue = &compressed_res_queue;
+ ctx.num_messages = queue_size;
+ ctx.write_resource_flags = write_resource_flags | WIMLIB_RESOURCE_FLAG_THREADSAFE_READ;
+ ctx.progress_func = progress_func;
+ ctx.progress = progress;
+ ret = main_writer_thread_init_ctx(&ctx);
+ if (ret)
+ goto out_join;
+ ret = do_write_stream_list(stream_list, lookup_table,
+ main_thread_process_next_stream,
+ &ctx, NULL, NULL);
+ if (ret)
+ goto out_destroy_ctx;
+ ret = main_writer_thread_finish(&ctx);
+out_destroy_ctx:
+ main_writer_thread_destroy_ctx(&ctx);
+out_join:
+ for (unsigned i = 0; i < num_threads; i++)
+ shared_queue_put(&res_to_compress_queue, NULL);
+
+ for (unsigned i = 0; i < num_threads; i++) {
+ if (pthread_join(compressor_threads[i], NULL)) {
+ WARNING_WITH_ERRNO("Failed to join compressor "
+ "thread %u of %u",
+ i + 1, num_threads);
+ }
+ }
+ FREE(compressor_threads);
+out_destroy_compressed_res_queue:
+ shared_queue_destroy(&compressed_res_queue);
+out_destroy_res_to_compress_queue:
+ shared_queue_destroy(&res_to_compress_queue);
+ if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
+ return ret;
+out_serial:
+ WARNING("Falling back to single-threaded compression");
+ return write_stream_list_serial(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_resource_flags,
+ progress_func,
+ progress);
+
+}
+#endif
+
+/*
+ * Write a list of streams to a WIM (@out_fp) using the compression type
+ * @out_ctype and up to @num_threads compressor threads.
+ */
+static int
+write_stream_list(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp, int out_ctype, int write_flags,
+ unsigned num_threads, wimlib_progress_func_t progress_func)
+{
+ struct wim_lookup_table_entry *lte;
+ size_t num_streams = 0;
+ u64 total_bytes = 0;
+ u64 total_compression_bytes = 0;
+ union wimlib_progress_info progress;
+ int ret;
+ int write_resource_flags;
+
+ if (list_empty(stream_list))
+ return 0;
+
+ write_resource_flags = write_flags_to_resource_flags(write_flags);
+
+ /* Calculate the total size of the streams to be written. Note: this
+ * will be the uncompressed size, as we may not know the compressed size
+ * yet, and also this will assume that every unhashed stream will be
+ * written (which will not necessarily be the case). */
+ list_for_each_entry(lte, stream_list, write_streams_list) {
+ num_streams++;
+ total_bytes += wim_resource_size(lte);
+ if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
+ && (wim_resource_compression_type(lte) != out_ctype ||
+ (write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS)))
+ {
+ total_compression_bytes += wim_resource_size(lte);
+ }
+ }
+ progress.write_streams.total_bytes = total_bytes;
+ progress.write_streams.total_streams = num_streams;
+ progress.write_streams.completed_bytes = 0;
+ progress.write_streams.completed_streams = 0;
+ progress.write_streams.num_threads = num_threads;
+ progress.write_streams.compression_type = out_ctype;
+ progress.write_streams._private = 0;
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+ if (total_compression_bytes >= 1000000 && num_threads != 1)
+ ret = write_stream_list_parallel(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_resource_flags,
+ progress_func,
+ &progress,
+ num_threads);
+ else
+#endif
+ ret = write_stream_list_serial(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_resource_flags,
+ progress_func,
+ &progress);
+ return ret;
+}
+
+struct stream_size_table {
+ struct hlist_head *array;
+ size_t num_entries;
+ size_t capacity;
+};
+
+static int
+init_stream_size_table(struct stream_size_table *tab, size_t capacity)
+{
+ tab->array = CALLOC(capacity, sizeof(tab->array[0]));
+ if (!tab->array)
+ return WIMLIB_ERR_NOMEM;
+ tab->num_entries = 0;
+ tab->capacity = capacity;
+ return 0;
+}
+
+static void
+destroy_stream_size_table(struct stream_size_table *tab)
+{
+ FREE(tab->array);
+}
+
+static int
+stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
+{
+ struct stream_size_table *tab = _tab;
+ size_t pos;
+ struct wim_lookup_table_entry *same_size_lte;
+ struct hlist_node *tmp;
+
+ pos = hash_u64(wim_resource_size(lte)) % tab->capacity;
+ lte->unique_size = 1;
+ hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
+ if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) {
+ lte->unique_size = 0;
+ same_size_lte->unique_size = 0;
+ break;
+ }
+ }
+
+ hlist_add_head(<e->hash_list_2, &tab->array[pos]);
+ tab->num_entries++;
+ return 0;
+}
+
+
+struct lte_overwrite_prepare_args {
+ WIMStruct *wim;
+ off_t end_offset;
+ struct list_head stream_list;
+ struct stream_size_table stream_size_tab;
+};
+
+/* First phase of preparing streams for an in-place overwrite. This is called
+ * on all streams, both hashed and unhashed, except the metadata resources. */
+static int
+lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args)
+{
+ struct lte_overwrite_prepare_args *args = _args;
+
+ wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA));
+ if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim)
+ list_add_tail(<e->write_streams_list, &args->stream_list);
+ lte->out_refcnt = lte->refcnt;
+ stream_size_table_insert(lte, &args->stream_size_tab);
+ return 0;
+}
+
+/* Second phase of preparing streams for an in-place overwrite. This is called
+ * on existing metadata resources and hashed streams, but not unhashed streams.
+ *
+ * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so
+ * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as
+ * the latter uses lte->hash_list_2, while the former expects to set
+ * lte->output_resource_entry. */
+static int
+lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args)
+{
+ struct lte_overwrite_prepare_args *args = _args;
+
+ if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) {
+ /* We can't do an in place overwrite on the WIM if there are
+ * streams after the XML data. */
+ if (lte->resource_entry.offset +
+ lte->resource_entry.size > args->end_offset)
+ {
+ #ifdef ENABLE_ERROR_MESSAGES
+ ERROR("The following resource is after the XML data:");
+ print_lookup_table_entry(lte, stderr);
+ #endif
+ return WIMLIB_ERR_RESOURCE_ORDER;
+ }
+ copy_resource_entry(<e->output_resource_entry,
+ <e->resource_entry);
+ }
+ return 0;
+}
+
+/* Given a WIM that we are going to overwrite in place with zero or more
+ * additional streams added, construct a list the list of new unique streams
+ * ('struct wim_lookup_table_entry's) that must be written, plus any unhashed
+ * streams that need to be added but may be identical to other hashed or
+ * unhashed streams. These unhashed streams are checksummed while the streams
+ * are being written. To aid this process, the member @unique_size is set to 1
+ * on streams that have a unique size and therefore must be written.
+ *
+ * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
+ * indicate the number of times the stream is referenced in only the streams
+ * that are being written; this may still be adjusted later when unhashed
+ * streams are being resolved.
+ */
+static int
+prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset,
+ struct list_head *stream_list)
+{
+ int ret;
+ struct lte_overwrite_prepare_args args;
+ unsigned i;
+
+ args.wim = wim;
+ args.end_offset = end_offset;
+ ret = init_stream_size_table(&args.stream_size_tab,
+ wim->lookup_table->capacity);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&args.stream_list);
+ for (i = 0; i < wim->hdr.image_count; i++) {
+ struct wim_image_metadata *imd;
+ struct wim_lookup_table_entry *lte;
+
+ imd = wim->image_metadata[i];
+ image_for_each_unhashed_stream(lte, imd)
+ lte_overwrite_prepare(lte, &args);
+ }
+ for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args);
+ list_transfer(&args.stream_list, stream_list);
+
+ for (i = 0; i < wim->hdr.image_count; i++) {
+ ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte,
+ &args);
+ if (ret)
+ goto out_destroy_stream_size_table;
+ }
+ ret = for_lookup_table_entry(wim->lookup_table,
+ lte_overwrite_prepare_2, &args);
+out_destroy_stream_size_table:
+ destroy_stream_size_table(&args.stream_size_tab);
+ return ret;
+}
+
+
+struct find_streams_ctx {
+ struct list_head stream_list;
+ struct stream_size_table stream_size_tab;
+};
+
+static void
+inode_find_streams_to_write(struct wim_inode *inode,
+ struct wim_lookup_table *table,
+ struct list_head *stream_list,
+ struct stream_size_table *tab)
+{
+ struct wim_lookup_table_entry *lte;
+ for (unsigned i = 0; i <= inode->i_num_ads; i++) {
+ lte = inode_stream_lte(inode, i, table);
+ if (lte) {
+ if (lte->out_refcnt == 0) {
+ if (lte->unhashed)
+ stream_size_table_insert(lte, tab);
+ list_add_tail(<e->write_streams_list, stream_list);
+ }
+ lte->out_refcnt += inode->i_nlink;
+ }
+ }
+}
+
+static int
+image_find_streams_to_write(WIMStruct *w)
+{
+ struct find_streams_ctx *ctx;
+ struct wim_image_metadata *imd;
+ struct wim_inode *inode;
+ struct wim_lookup_table_entry *lte;
+
+ ctx = w->private;
+ imd = wim_get_current_image_metadata(w);
+
+ image_for_each_unhashed_stream(lte, imd)
+ lte->out_refcnt = 0;
+
+ /* Go through this image's inodes to find any streams that have not been
+ * found yet. */
+ image_for_each_inode(inode, imd) {
+ inode_find_streams_to_write(inode, w->lookup_table,
+ &ctx->stream_list,
+ &ctx->stream_size_tab);
+ }
+ return 0;
+}
+
+/* Given a WIM that from which one or all of the images is being written, build
+ * the list of unique streams ('struct wim_lookup_table_entry's) that must be
+ * written, plus any unhashed streams that need to be written but may be
+ * identical to other hashed or unhashed streams being written. These unhashed
+ * streams are checksummed while the streams are being written. To aid this
+ * process, the member @unique_size is set to 1 on streams that have a unique
+ * size and therefore must be written.
+ *
+ * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
+ * indicate the number of times the stream is referenced in only the streams
+ * that are being written; this may still be adjusted later when unhashed
+ * streams are being resolved.
+ */
+static int
+prepare_stream_list(WIMStruct *wim, int image, struct list_head *stream_list)
+{
+ int ret;
+ struct find_streams_ctx ctx;
+
+ for_lookup_table_entry(wim->lookup_table, lte_zero_out_refcnt, NULL);
+ ret = init_stream_size_table(&ctx.stream_size_tab,
+ wim->lookup_table->capacity);
+ if (ret)
+ return ret;
+ for_lookup_table_entry(wim->lookup_table, stream_size_table_insert,
+ &ctx.stream_size_tab);
+ INIT_LIST_HEAD(&ctx.stream_list);
+ wim->private = &ctx;
+ ret = for_image(wim, image, image_find_streams_to_write);
+ destroy_stream_size_table(&ctx.stream_size_tab);
+ if (ret == 0)
+ list_transfer(&ctx.stream_list, stream_list);
+ return ret;
+}
+
+/* Writes the streams for the specified @image in @wim to @wim->out_fp.
+ */
+static int
+write_wim_streams(WIMStruct *wim, int image, int write_flags,
+ unsigned num_threads,
+ wimlib_progress_func_t progress_func)
+{
+ int ret;
+ struct list_head stream_list;
+
+ ret = prepare_stream_list(wim, image, &stream_list);
+ if (ret)
+ return ret;
+ return write_stream_list(&stream_list,
+ wim->lookup_table,
+ wim->out_fp,
+ wimlib_get_compression_type(wim),
+ write_flags,
+ num_threads,
+ progress_func);
+}
+
+/*
+ * Finish writing a WIM file: write the lookup table, xml data, and integrity
+ * table (optional), then overwrite the WIM header.
+ *
+ * write_flags is a bitwise OR of the following:
+ *
+ * (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
+ * Include an integrity table.
+ *
+ * (public) WIMLIB_WRITE_FLAG_SHOW_PROGRESS:
+ * Show progress information when (if) writing the integrity table.
+ *
+ * (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE:
+ * Don't write the lookup table.
+ *
+ * (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE:
+ * When (if) writing the integrity table, re-use entries from the
+ * existing integrity table, if possible.
+ *
+ * (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
+ * After writing the XML data but before writing the integrity
+ * table, write a temporary WIM header and flush the stream so that
+ * the WIM is less likely to become corrupted upon abrupt program
+ * termination.
+ *
+ * (private) WIMLIB_WRITE_FLAG_FSYNC:
+ * fsync() the output file before closing it.
+ *
+ */
+int
+finish_write(WIMStruct *w, int image, int write_flags,
+ wimlib_progress_func_t progress_func)
+{
+ int ret;
+ struct wim_header hdr;
+ FILE *out = w->out_fp;
+
+ /* @hdr will be the header for the new WIM. First copy all the data
+ * from the header in the WIMStruct; then set all the fields that may
+ * have changed, including the resource entries, boot index, and image
+ * count. */
+ memcpy(&hdr, &w->hdr, sizeof(struct wim_header));
+
+ /* Set image count and boot index correctly for single image writes */
+ if (image != WIMLIB_ALL_IMAGES) {
+ hdr.image_count = 1;
+ if (hdr.boot_idx == image)
+ hdr.boot_idx = 1;
+ else
+ hdr.boot_idx = 0;
+ }