4 * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5 * compressed file resources, etc.
9 * Copyright (C) 2012, 2013 Eric Biggers
11 * This file is part of wimlib, a library for working with WIM files.
13 * wimlib is free software; you can redistribute it and/or modify it under the
14 * terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 3 of the License, or (at your option)
18 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
19 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
20 * A PARTICULAR PURPOSE. See the GNU General Public License for more
23 * You should have received a copy of the GNU General Public License
24 * along with wimlib; if not, see http://www.gnu.org/licenses/.
31 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
32 /* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
33 * overwrite the LIST_HEAD macro. */
34 # include <sys/file.h>
37 #include "wimlib/endianness.h"
38 #include "wimlib/error.h"
39 #include "wimlib/file_io.h"
40 #include "wimlib/header.h"
41 #include "wimlib/integrity.h"
42 #include "wimlib/lookup_table.h"
43 #include "wimlib/metadata.h"
44 #include "wimlib/resource.h"
45 #include "wimlib/write.h"
46 #include "wimlib/xml.h"
49 # include "wimlib/win32.h" /* win32_get_number_of_processors() */
52 #ifdef ENABLE_MULTITHREADED_COMPRESSION
68 # include <sys/uio.h> /* for `struct iovec' */
71 /* Chunk table that's located at the beginning of each compressed resource in
72 * the WIM. (This is not the on-disk format; the on-disk format just has an
73 * array of offsets.) */
75 u64 original_resource_size;
78 unsigned bytes_per_chunk_entry;
84 /* Beginning of chunk offsets, in either 32-bit or 64-bit little endian
85 * integers, including the first offset of 0, which will not be written.
87 u8 offsets[] _aligned_attribute(8);
90 /* Allocate and initializes a chunk table, then reserve space for it in the
91 * output file unless writing a pipable resource. */
93 begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
94 struct filedes *out_fd,
95 struct chunk_table **chunk_tab_ret,
100 unsigned bytes_per_chunk_entry;
102 struct chunk_table *chunk_tab;
105 size = wim_resource_size(lte);
106 num_chunks = wim_resource_chunks(lte);
107 bytes_per_chunk_entry = (size > (1ULL << 32)) ? 8 : 4;
108 alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
109 chunk_tab = CALLOC(1, alloc_size);
112 ERROR("Failed to allocate chunk table for %"PRIu64" byte "
114 return WIMLIB_ERR_NOMEM;
116 chunk_tab->num_chunks = num_chunks;
117 chunk_tab->original_resource_size = size;
118 chunk_tab->bytes_per_chunk_entry = bytes_per_chunk_entry;
119 chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
121 chunk_tab->cur_offset_p = chunk_tab->offsets;
123 /* We don't know the correct offsets yet; so just write zeroes to
124 * reserve space for the table, so we can go back to it later after
125 * we've written the compressed chunks following it.
127 * Special case: if writing a pipable WIM, compressed resources are in a
128 * modified format (see comment above write_pipable_wim()) and do not
129 * have a chunk table at the beginning, so don't reserve any space for
131 if (!(resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) {
132 ret = full_write(out_fd, chunk_tab->offsets,
133 chunk_tab->table_disk_size);
135 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
141 *chunk_tab_ret = chunk_tab;
145 /* Add the offset for the next chunk to the chunk table being constructed for a
146 * compressed stream. */
148 chunk_tab_record_chunk(struct chunk_table *chunk_tab, unsigned out_chunk_size)
150 if (chunk_tab->bytes_per_chunk_entry == 4) {
151 *(le32*)chunk_tab->cur_offset_p = cpu_to_le32(chunk_tab->cur_offset_u32);
152 chunk_tab->cur_offset_p = (le32*)chunk_tab->cur_offset_p + 1;
153 chunk_tab->cur_offset_u32 += out_chunk_size;
155 *(le64*)chunk_tab->cur_offset_p = cpu_to_le64(chunk_tab->cur_offset_u64);
156 chunk_tab->cur_offset_p = (le64*)chunk_tab->cur_offset_p + 1;
157 chunk_tab->cur_offset_u64 += out_chunk_size;
162 * compress_func_t- Pointer to a function to compresses a chunk
163 * of a WIM resource. This may be either
164 * wimlib_xpress_compress() (xpress-compress.c) or
165 * wimlib_lzx_compress() (lzx-compress.c).
167 * @chunk: Uncompressed data of the chunk.
168 * @chunk_size: Size of the uncompressed chunk, in bytes.
169 * @out: Pointer to output buffer of size at least (@chunk_size - 1) bytes.
171 * Returns the size of the compressed data written to @out in bytes, or 0 if the
172 * data could not be compressed to (@chunk_size - 1) bytes or fewer.
174 * As a special requirement, the compression code is optimized for the WIM
175 * format and therefore requires (@chunk_size <= 32768).
177 * As another special requirement, the compression code will read up to 8 bytes
178 * off the end of the @chunk array for performance reasons. The values of these
179 * bytes will not affect the output of the compression, but the calling code
180 * must make sure that the buffer holding the uncompressed chunk is actually at
181 * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
182 * mapped memory that will not cause a memory access violation if accessed.
184 typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
187 static compress_func_t
188 get_compress_func(int out_ctype)
190 if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
191 return wimlib_lzx_compress;
193 return wimlib_xpress_compress;
196 /* Finishes a WIM chunk table and writes it to the output file at the correct
199 finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
200 struct filedes *out_fd,
201 off_t res_start_offset,
202 int write_resource_flags)
206 if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
207 ret = full_write(out_fd,
209 chunk_tab->bytes_per_chunk_entry,
210 chunk_tab->table_disk_size);
212 ret = full_pwrite(out_fd,
214 chunk_tab->bytes_per_chunk_entry,
215 chunk_tab->table_disk_size,
219 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
225 /* Write the header for a stream in a pipable WIM.
228 write_pwm_stream_header(const struct wim_lookup_table_entry *lte,
229 struct filedes *out_fd,
230 int additional_reshdr_flags)
232 struct pwm_stream_hdr stream_hdr;
236 stream_hdr.magic = PWM_STREAM_MAGIC;
237 stream_hdr.uncompressed_size = cpu_to_le64(lte->resource_entry.original_size);
238 if (additional_reshdr_flags & PWM_RESHDR_FLAG_UNHASHED) {
239 zero_out_hash(stream_hdr.hash);
241 wimlib_assert(!lte->unhashed);
242 copy_hash(stream_hdr.hash, lte->hash);
245 reshdr_flags = lte->resource_entry.flags & ~WIM_RESHDR_FLAG_COMPRESSED;
246 reshdr_flags |= additional_reshdr_flags;
247 stream_hdr.flags = cpu_to_le32(reshdr_flags);
248 ret = full_write(out_fd, &stream_hdr, sizeof(stream_hdr));
250 ERROR_WITH_ERRNO("Error writing stream header");
255 seek_and_truncate(struct filedes *out_fd, off_t offset)
257 if (filedes_seek(out_fd, offset) == -1 ||
258 ftruncate(out_fd->fd, offset))
260 ERROR_WITH_ERRNO("Failed to truncate output WIM file");
261 return WIMLIB_ERR_WRITE;
267 finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte)
269 u8 md[SHA1_HASH_SIZE];
271 sha1_final(md, sha_ctx);
273 copy_hash(lte->hash, md);
274 } else if (!hashes_equal(md, lte->hash)) {
275 ERROR("WIM resource has incorrect hash!");
276 if (lte_filename_valid(lte)) {
277 ERROR("We were reading it from \"%"TS"\"; maybe "
278 "it changed while we were reading it.",
281 return WIMLIB_ERR_INVALID_RESOURCE_HASH;
286 struct write_resource_ctx {
287 compress_func_t compress;
288 struct chunk_table *chunk_tab;
289 struct filedes *out_fd;
296 write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx)
298 struct write_resource_ctx *ctx = _ctx;
299 const void *out_chunk;
300 unsigned out_chunk_size;
304 sha1_update(&ctx->sha_ctx, chunk, chunk_size);
307 out_chunk_size = chunk_size;
309 void *compressed_chunk;
310 unsigned compressed_size;
312 /* Compress the chunk. */
313 compressed_chunk = alloca(chunk_size);
314 compressed_size = (*ctx->compress)(chunk, chunk_size,
317 /* Use compressed data if compression to less than input size
319 if (compressed_size) {
320 out_chunk = compressed_chunk;
321 out_chunk_size = compressed_size;
325 if (ctx->chunk_tab) {
326 /* Update chunk table accounting. */
327 chunk_tab_record_chunk(ctx->chunk_tab, out_chunk_size);
329 /* If writing compressed chunks to a pipable WIM, before the
330 * chunk data write a chunk header that provides the compressed
332 if (ctx->resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
333 struct pwm_chunk_hdr chunk_hdr = {
334 .compressed_size = cpu_to_le32(out_chunk_size),
336 ret = full_write(ctx->out_fd, &chunk_hdr,
343 /* Write the chunk data. */
344 ret = full_write(ctx->out_fd, out_chunk, out_chunk_size);
350 ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
355 * write_wim_resource()-
357 * Write a resource to an output WIM.
360 * Lookup table entry for the resource, which could be in another WIM, in
361 * an external file, or in another location.
364 * File descriptor opened to the output WIM.
367 * One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate which
368 * compression algorithm to use.
371 * On success, this is filled in with the offset, flags, compressed size,
372 * and uncompressed size of the resource in the output WIM.
374 * @write_resource_flags:
375 * * WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed even
376 * if it could otherwise be copied directly from the input;
377 * * WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE if writing a resource for a pipable WIM
378 * (and the output file descriptor may be a pipe).
380 * Additional notes: The SHA1 message digest of the uncompressed data is
381 * calculated (except when doing a raw copy --- see below). If the @unhashed
382 * flag is set on the lookup table entry, this message digest is simply copied
383 * to it; otherwise, the message digest is compared with the existing one, and
384 * the function will fail if they do not match.
387 write_wim_resource(struct wim_lookup_table_entry *lte,
388 struct filedes *out_fd, int out_ctype,
389 struct resource_entry *out_res_entry,
392 struct write_resource_ctx write_ctx;
393 off_t res_start_offset;
397 /* Mask out any irrelevant flags, since this function also uses this
398 * variable to store WIMLIB_READ_RESOURCE flags. */
399 resource_flags &= WIMLIB_WRITE_RESOURCE_MASK;
401 /* Get current position in output WIM. */
402 res_start_offset = out_fd->offset;
404 /* If we are not forcing the data to be recompressed, and the input
405 * resource is located in a WIM with the same compression type as that
406 * desired other than no compression, we can simply copy the compressed
407 * data without recompressing it. This also means we must skip
408 * calculating the SHA1, as we never will see the uncompressed data. */
409 if (lte->resource_location == RESOURCE_IN_WIM &&
410 out_ctype == wim_resource_compression_type(lte) &&
411 out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
412 !(resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS))
414 /* Normally we can request a RAW_FULL read, but if we're reading
415 * from a pipable resource and writing a non-pipable resource or
416 * vice versa, then a RAW_CHUNKS read needs to be requested so
417 * that the written resource can be appropriately formatted.
418 * However, in neither case is any actual decompression needed.
420 if (lte->is_pipable == !!(resource_flags &
421 WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE))
422 resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_FULL;
424 resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS;
425 write_ctx.doing_sha = false;
426 read_size = lte->resource_entry.size;
428 write_ctx.doing_sha = true;
429 sha1_init(&write_ctx.sha_ctx);
430 read_size = lte->resource_entry.original_size;
433 /* If the output resource is to be compressed, initialize the chunk
434 * table and set the function to use for chunk compression. Exceptions:
435 * no compression function is needed if doing a raw copy; also, no chunk
436 * table is needed if doing a *full* (not per-chunk) raw copy. */
437 write_ctx.compress = NULL;
438 write_ctx.chunk_tab = NULL;
439 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
440 if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW))
441 write_ctx.compress = get_compress_func(out_ctype);
442 if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW_FULL)) {
443 ret = begin_wim_resource_chunk_tab(lte, out_fd,
444 &write_ctx.chunk_tab,
451 /* If writing a pipable resource, write the stream header and update
452 * @res_start_offset to be the end of the stream header. */
453 if (resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
454 int reshdr_flags = 0;
455 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE)
456 reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
457 ret = write_pwm_stream_header(lte, out_fd, reshdr_flags);
459 goto out_free_chunk_tab;
460 res_start_offset = out_fd->offset;
463 /* Write the entire resource by reading the entire resource and feeding
464 * the data through the write_resource_cb function. */
465 write_ctx.out_fd = out_fd;
466 write_ctx.resource_flags = resource_flags;
468 ret = read_resource_prefix(lte, read_size,
469 write_resource_cb, &write_ctx, resource_flags);
471 goto out_free_chunk_tab;
473 /* Verify SHA1 message digest of the resource, or set the hash for the
475 if (write_ctx.doing_sha) {
476 ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte);
478 goto out_free_chunk_tab;
481 /* Write chunk table if needed. */
482 if (write_ctx.chunk_tab) {
483 ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab,
488 goto out_free_chunk_tab;
491 /* Fill in out_res_entry with information about the newly written
493 out_res_entry->size = out_fd->offset - res_start_offset;
494 out_res_entry->flags = lte->resource_entry.flags;
495 if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
496 out_res_entry->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
498 out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
499 out_res_entry->offset = res_start_offset;
500 out_res_entry->original_size = wim_resource_size(lte);
502 /* Check for resources compressed to greater than their original size
503 * and write them uncompressed instead. (But never do this if writing
504 * to a pipe, and don't bother if we did a raw copy.) */
505 if (out_res_entry->size > out_res_entry->original_size &&
506 !(resource_flags & (WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE |
507 WIMLIB_READ_RESOURCE_FLAG_RAW)))
509 DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
510 "writing uncompressed instead",
511 out_res_entry->original_size, out_res_entry->size);
512 ret = seek_and_truncate(out_fd, res_start_offset);
514 goto out_free_chunk_tab;
515 out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
516 FREE(write_ctx.chunk_tab);
517 write_ctx.compress = NULL;
518 write_ctx.chunk_tab = NULL;
519 write_ctx.doing_sha = false;
520 goto try_write_again;
522 if (resource_flags & (WIMLIB_READ_RESOURCE_FLAG_RAW)) {
523 DEBUG("Copied raw compressed data "
524 "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
525 out_res_entry->original_size, out_res_entry->size,
526 out_res_entry->offset, out_res_entry->flags);
527 } else if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
528 DEBUG("Wrote compressed resource "
529 "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
530 out_res_entry->original_size, out_res_entry->size,
531 out_res_entry->offset, out_res_entry->flags);
533 DEBUG("Wrote uncompressed resource "
534 "(%"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
535 out_res_entry->original_size,
536 out_res_entry->offset, out_res_entry->flags);
540 FREE(write_ctx.chunk_tab);
545 /* Like write_wim_resource(), but the resource is specified by a buffer of
546 * uncompressed data rather a lookup table entry; also writes the SHA1 hash of
547 * the buffer to @hash_ret. */
549 write_wim_resource_from_buffer(const void *buf, size_t buf_size,
550 int reshdr_flags, struct filedes *out_fd,
552 struct resource_entry *out_res_entry,
553 u8 *hash_ret, int write_resource_flags)
555 /* Set up a temporary lookup table entry to provide to
556 * write_wim_resource(). */
557 struct wim_lookup_table_entry lte;
560 lte.resource_location = RESOURCE_IN_ATTACHED_BUFFER;
561 lte.attached_buffer = (void*)buf;
562 lte.resource_entry.original_size = buf_size;
563 lte.resource_entry.flags = reshdr_flags;
565 if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
566 sha1_buffer(buf, buf_size, lte.hash);
572 ret = write_wim_resource(<e, out_fd, out_ctype, out_res_entry,
573 write_resource_flags);
577 copy_hash(hash_ret, lte.hash);
582 #ifdef ENABLE_MULTITHREADED_COMPRESSION
584 /* Blocking shared queue (solves the producer-consumer problem) */
585 struct shared_queue {
589 unsigned filled_slots;
591 pthread_mutex_t lock;
592 pthread_cond_t msg_avail_cond;
593 pthread_cond_t space_avail_cond;
597 shared_queue_init(struct shared_queue *q, unsigned size)
599 wimlib_assert(size != 0);
600 q->array = CALLOC(sizeof(q->array[0]), size);
607 if (pthread_mutex_init(&q->lock, NULL)) {
608 ERROR_WITH_ERRNO("Failed to initialize mutex");
611 if (pthread_cond_init(&q->msg_avail_cond, NULL)) {
612 ERROR_WITH_ERRNO("Failed to initialize condition variable");
613 goto err_destroy_lock;
615 if (pthread_cond_init(&q->space_avail_cond, NULL)) {
616 ERROR_WITH_ERRNO("Failed to initialize condition variable");
617 goto err_destroy_msg_avail_cond;
620 err_destroy_msg_avail_cond:
621 pthread_cond_destroy(&q->msg_avail_cond);
623 pthread_mutex_destroy(&q->lock);
625 return WIMLIB_ERR_NOMEM;
629 shared_queue_destroy(struct shared_queue *q)
632 pthread_mutex_destroy(&q->lock);
633 pthread_cond_destroy(&q->msg_avail_cond);
634 pthread_cond_destroy(&q->space_avail_cond);
638 shared_queue_put(struct shared_queue *q, void *obj)
640 pthread_mutex_lock(&q->lock);
641 while (q->filled_slots == q->size)
642 pthread_cond_wait(&q->space_avail_cond, &q->lock);
644 q->back = (q->back + 1) % q->size;
645 q->array[q->back] = obj;
648 pthread_cond_broadcast(&q->msg_avail_cond);
649 pthread_mutex_unlock(&q->lock);
653 shared_queue_get(struct shared_queue *q)
657 pthread_mutex_lock(&q->lock);
658 while (q->filled_slots == 0)
659 pthread_cond_wait(&q->msg_avail_cond, &q->lock);
661 obj = q->array[q->front];
662 q->array[q->front] = NULL;
663 q->front = (q->front + 1) % q->size;
666 pthread_cond_broadcast(&q->space_avail_cond);
667 pthread_mutex_unlock(&q->lock);
671 struct compressor_thread_params {
672 struct shared_queue *res_to_compress_queue;
673 struct shared_queue *compressed_res_queue;
674 compress_func_t compress;
677 #define MAX_CHUNKS_PER_MSG 2
680 struct wim_lookup_table_entry *lte;
681 u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
682 u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
683 unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
684 struct iovec out_chunks[MAX_CHUNKS_PER_MSG];
686 struct list_head list;
692 compress_chunks(struct message *msg, compress_func_t compress)
694 for (unsigned i = 0; i < msg->num_chunks; i++) {
695 unsigned len = compress(msg->uncompressed_chunks[i],
696 msg->uncompressed_chunk_sizes[i],
697 msg->compressed_chunks[i]);
701 /* To be written compressed */
702 out_chunk = msg->compressed_chunks[i];
705 /* To be written uncompressed */
706 out_chunk = msg->uncompressed_chunks[i];
707 out_len = msg->uncompressed_chunk_sizes[i];
709 msg->out_chunks[i].iov_base = out_chunk;
710 msg->out_chunks[i].iov_len = out_len;
714 /* Compressor thread routine. This is a lot simpler than the main thread
715 * routine: just repeatedly get a group of chunks from the
716 * res_to_compress_queue, compress them, and put them in the
717 * compressed_res_queue. A NULL pointer indicates that the thread should stop.
720 compressor_thread_proc(void *arg)
722 struct compressor_thread_params *params = arg;
723 struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
724 struct shared_queue *compressed_res_queue = params->compressed_res_queue;
725 compress_func_t compress = params->compress;
728 DEBUG("Compressor thread ready");
729 while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
730 compress_chunks(msg, compress);
731 shared_queue_put(compressed_res_queue, msg);
733 DEBUG("Compressor thread terminating");
736 #endif /* ENABLE_MULTITHREADED_COMPRESSION */
738 struct write_streams_progress_data {
739 wimlib_progress_func_t progress_func;
740 union wimlib_progress_info progress;
741 uint64_t next_progress;
742 WIMStruct *prev_wim_part;
746 do_write_streams_progress(struct write_streams_progress_data *progress_data,
747 struct wim_lookup_table_entry *lte,
748 bool stream_discarded)
750 union wimlib_progress_info *progress = &progress_data->progress;
753 if (stream_discarded) {
754 progress->write_streams.total_bytes -= wim_resource_size(lte);
755 if (progress_data->next_progress != ~(uint64_t)0 &&
756 progress_data->next_progress > progress->write_streams.total_bytes)
758 progress_data->next_progress = progress->write_streams.total_bytes;
761 progress->write_streams.completed_bytes += wim_resource_size(lte);
763 new_wim_part = false;
764 if (lte->resource_location == RESOURCE_IN_WIM &&
765 lte->wim != progress_data->prev_wim_part)
767 if (progress_data->prev_wim_part) {
769 progress->write_streams.completed_parts++;
771 progress_data->prev_wim_part = lte->wim;
773 progress->write_streams.completed_streams++;
774 if (progress_data->progress_func
775 && (progress->write_streams.completed_bytes >= progress_data->next_progress
778 progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
780 if (progress_data->next_progress == progress->write_streams.total_bytes) {
781 progress_data->next_progress = ~(uint64_t)0;
783 progress_data->next_progress =
784 min(progress->write_streams.total_bytes,
785 progress->write_streams.completed_bytes +
786 progress->write_streams.total_bytes / 100);
791 struct serial_write_stream_ctx {
792 struct filedes *out_fd;
794 int write_resource_flags;
798 serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx)
800 struct serial_write_stream_ctx *ctx = _ctx;
801 return write_wim_resource(lte, ctx->out_fd,
802 ctx->out_ctype, <e->output_resource_entry,
803 ctx->write_resource_flags);
807 /* Write a list of streams, taking into account that some streams may be
808 * duplicates that are checksummed and discarded on the fly, and also delegating
809 * the actual writing of a stream to a function @write_stream_cb, which is
810 * passed the context @write_stream_ctx. */
812 do_write_stream_list(struct list_head *stream_list,
813 struct wim_lookup_table *lookup_table,
814 int (*write_stream_cb)(struct wim_lookup_table_entry *, void *),
815 void *write_stream_ctx,
816 struct write_streams_progress_data *progress_data)
819 struct wim_lookup_table_entry *lte;
820 bool stream_discarded;
822 /* For each stream in @stream_list ... */
823 while (!list_empty(stream_list)) {
824 stream_discarded = false;
825 lte = container_of(stream_list->next,
826 struct wim_lookup_table_entry,
828 list_del(<e->write_streams_list);
829 if (lte->unhashed && !lte->unique_size) {
830 /* Unhashed stream that shares a size with some other
831 * stream in the WIM we are writing. The stream must be
832 * checksummed to know if we need to write it or not. */
833 struct wim_lookup_table_entry *tmp;
834 u32 orig_refcnt = lte->out_refcnt;
836 ret = hash_unhashed_stream(lte, lookup_table, &tmp);
841 /* We found a duplicate stream. */
842 if (orig_refcnt != tmp->out_refcnt) {
843 /* We have already written, or are going
844 * to write, the duplicate stream. So
845 * just skip to the next stream. */
846 DEBUG("Discarding duplicate stream of length %"PRIu64,
847 wim_resource_size(lte));
848 lte->no_progress = 0;
849 stream_discarded = true;
850 goto skip_to_progress;
855 /* Here, @lte is either a hashed stream or an unhashed stream
856 * with a unique size. In either case we know that the stream
857 * has to be written. In either case the SHA1 message digest
858 * will be calculated over the stream while writing it; however,
859 * in the former case this is done merely to check the data,
860 * while in the latter case this is done because we do not have
861 * the SHA1 message digest yet. */
862 wimlib_assert(lte->out_refcnt != 0);
864 lte->no_progress = 0;
865 ret = (*write_stream_cb)(lte, write_stream_ctx);
868 /* In parallel mode, some streams are deferred for later,
869 * serialized processing; ignore them here. */
873 list_del(<e->unhashed_list);
874 lookup_table_insert(lookup_table, lte);
878 if (!lte->no_progress) {
879 do_write_streams_progress(progress_data,
880 lte, stream_discarded);
887 do_write_stream_list_serial(struct list_head *stream_list,
888 struct wim_lookup_table *lookup_table,
889 struct filedes *out_fd,
891 int write_resource_flags,
892 struct write_streams_progress_data *progress_data)
894 struct serial_write_stream_ctx ctx = {
896 .out_ctype = out_ctype,
897 .write_resource_flags = write_resource_flags,
899 return do_write_stream_list(stream_list,
907 write_flags_to_resource_flags(int write_flags)
909 int resource_flags = 0;
911 if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
912 resource_flags |= WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS;
913 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
914 resource_flags |= WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE;
915 return resource_flags;
919 write_stream_list_serial(struct list_head *stream_list,
920 struct wim_lookup_table *lookup_table,
921 struct filedes *out_fd,
923 int write_resource_flags,
924 struct write_streams_progress_data *progress_data)
926 union wimlib_progress_info *progress = &progress_data->progress;
927 DEBUG("Writing stream list of size %"PRIu64" (serial version)",
928 progress->write_streams.total_streams);
929 progress->write_streams.num_threads = 1;
930 if (progress_data->progress_func) {
931 progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
934 return do_write_stream_list_serial(stream_list,
938 write_resource_flags,
942 #ifdef ENABLE_MULTITHREADED_COMPRESSION
944 write_wim_chunks(struct message *msg, struct filedes *out_fd,
945 struct chunk_table *chunk_tab,
946 int write_resource_flags)
949 struct pwm_chunk_hdr *chunk_hdrs;
953 for (unsigned i = 0; i < msg->num_chunks; i++)
954 chunk_tab_record_chunk(chunk_tab, msg->out_chunks[i].iov_len);
956 if (!(write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) {
957 nvecs = msg->num_chunks;
958 vecs = msg->out_chunks;
960 /* Special case: If writing a compressed resource to a pipable
961 * WIM, prefix each compressed chunk with a header that gives
962 * its compressed size. */
963 nvecs = msg->num_chunks * 2;
964 vecs = alloca(nvecs * sizeof(vecs[0]));
965 chunk_hdrs = alloca(msg->num_chunks * sizeof(chunk_hdrs[0]));
967 for (unsigned i = 0; i < msg->num_chunks; i++) {
968 chunk_hdrs[i].compressed_size = cpu_to_le32(msg->out_chunks[i].iov_len);
969 vecs[i * 2].iov_base = &chunk_hdrs[i];
970 vecs[i * 2].iov_len = sizeof(chunk_hdrs[i]);
971 vecs[i * 2 + 1].iov_base = msg->out_chunks[i].iov_base;
972 vecs[i * 2 + 1].iov_len = msg->out_chunks[i].iov_len;
975 ret = full_writev(out_fd, vecs, nvecs);
977 ERROR_WITH_ERRNO("Failed to write WIM chunks");
981 struct main_writer_thread_ctx {
982 struct list_head *stream_list;
983 struct wim_lookup_table *lookup_table;
984 struct filedes *out_fd;
985 off_t res_start_offset;
987 int write_resource_flags;
988 struct shared_queue *res_to_compress_queue;
989 struct shared_queue *compressed_res_queue;
991 struct write_streams_progress_data *progress_data;
993 struct list_head available_msgs;
994 struct list_head outstanding_streams;
995 struct list_head serial_streams;
996 size_t num_outstanding_messages;
998 SHA_CTX next_sha_ctx;
1000 u64 next_num_chunks;
1001 struct wim_lookup_table_entry *next_lte;
1003 struct message *msgs;
1004 struct message *next_msg;
1005 struct chunk_table *cur_chunk_tab;
1009 init_message(struct message *msg)
1011 for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
1012 msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
1013 msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
1014 if (msg->compressed_chunks[i] == NULL ||
1015 msg->uncompressed_chunks[i] == NULL)
1016 return WIMLIB_ERR_NOMEM;
1022 destroy_message(struct message *msg)
1024 for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
1025 FREE(msg->compressed_chunks[i]);
1026 FREE(msg->uncompressed_chunks[i]);
1031 free_messages(struct message *msgs, size_t num_messages)
1034 for (size_t i = 0; i < num_messages; i++)
1035 destroy_message(&msgs[i]);
1040 static struct message *
1041 allocate_messages(size_t num_messages)
1043 struct message *msgs;
1045 msgs = CALLOC(num_messages, sizeof(struct message));
1048 for (size_t i = 0; i < num_messages; i++) {
1049 if (init_message(&msgs[i])) {
1050 free_messages(msgs, num_messages);
1058 main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx)
1060 while (ctx->num_outstanding_messages--)
1061 shared_queue_get(ctx->compressed_res_queue);
1062 free_messages(ctx->msgs, ctx->num_messages);
1063 FREE(ctx->cur_chunk_tab);
1067 main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx)
1069 /* Pre-allocate all the buffers that will be needed to do the chunk
1071 ctx->msgs = allocate_messages(ctx->num_messages);
1073 return WIMLIB_ERR_NOMEM;
1075 /* Initially, all the messages are available to use. */
1076 INIT_LIST_HEAD(&ctx->available_msgs);
1077 for (size_t i = 0; i < ctx->num_messages; i++)
1078 list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs);
1080 /* outstanding_streams is the list of streams that currently have had
1081 * chunks sent off for compression.
1083 * The first stream in outstanding_streams is the stream that is
1084 * currently being written.
1086 * The last stream in outstanding_streams is the stream that is
1087 * currently being read and having chunks fed to the compressor threads.
1089 INIT_LIST_HEAD(&ctx->outstanding_streams);
1090 ctx->num_outstanding_messages = 0;
1092 ctx->next_msg = NULL;
1094 /* Resources that don't need any chunks compressed are added to this
1095 * list and written directly by the main thread. */
1096 INIT_LIST_HEAD(&ctx->serial_streams);
1098 ctx->cur_chunk_tab = NULL;
1104 receive_compressed_chunks(struct main_writer_thread_ctx *ctx)
1106 struct message *msg;
1107 struct wim_lookup_table_entry *cur_lte;
1110 wimlib_assert(!list_empty(&ctx->outstanding_streams));
1111 wimlib_assert(ctx->num_outstanding_messages != 0);
1113 cur_lte = container_of(ctx->outstanding_streams.next,
1114 struct wim_lookup_table_entry,
1115 being_compressed_list);
1117 /* Get the next message from the queue and process it.
1118 * The message will contain 1 or more data chunks that have been
1120 msg = shared_queue_get(ctx->compressed_res_queue);
1121 msg->complete = true;
1122 --ctx->num_outstanding_messages;
1124 /* Is this the next chunk in the current resource? If it's not
1125 * (i.e., an earlier chunk in a same or different resource
1126 * hasn't been compressed yet), do nothing, and keep this
1127 * message around until all earlier chunks are received.
1129 * Otherwise, write all the chunks we can. */
1130 while (cur_lte != NULL &&
1131 !list_empty(&cur_lte->msg_list)
1132 && (msg = container_of(cur_lte->msg_list.next,
1136 list_move(&msg->list, &ctx->available_msgs);
1137 if (msg->begin_chunk == 0) {
1138 /* First set of chunks. */
1140 /* Write pipable WIM stream header if needed. */
1141 if (ctx->write_resource_flags &
1142 WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)
1144 ret = write_pwm_stream_header(cur_lte, ctx->out_fd,
1145 WIM_RESHDR_FLAG_COMPRESSED);
1150 /* Save current offset. */
1151 ctx->res_start_offset = ctx->out_fd->offset;
1153 /* Begin building the chunk table, and leave space for
1155 ret = begin_wim_resource_chunk_tab(cur_lte,
1157 &ctx->cur_chunk_tab,
1158 ctx->write_resource_flags);
1164 /* Write the compressed chunks from the message. */
1165 ret = write_wim_chunks(msg, ctx->out_fd, ctx->cur_chunk_tab,
1166 ctx->write_resource_flags);
1170 /* Was this the last chunk of the stream? If so, finish
1172 if (list_empty(&cur_lte->msg_list) &&
1173 msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
1177 ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
1179 ctx->res_start_offset,
1180 ctx->write_resource_flags);
1184 list_del(&cur_lte->being_compressed_list);
1186 res_csize = ctx->out_fd->offset - ctx->res_start_offset;
1188 FREE(ctx->cur_chunk_tab);
1189 ctx->cur_chunk_tab = NULL;
1191 /* Check for resources compressed to greater than or
1192 * equal to their original size and write them
1193 * uncompressed instead. (But never do this if writing
1195 if (res_csize >= wim_resource_size(cur_lte) &&
1196 !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE))
1198 DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
1199 "writing uncompressed instead",
1200 wim_resource_size(cur_lte), res_csize);
1201 ret = seek_and_truncate(ctx->out_fd, ctx->res_start_offset);
1204 ret = write_wim_resource(cur_lte,
1206 WIMLIB_COMPRESSION_TYPE_NONE,
1207 &cur_lte->output_resource_entry,
1208 ctx->write_resource_flags);
1212 cur_lte->output_resource_entry.size =
1215 cur_lte->output_resource_entry.original_size =
1216 cur_lte->resource_entry.original_size;
1218 cur_lte->output_resource_entry.offset =
1219 ctx->res_start_offset;
1221 cur_lte->output_resource_entry.flags =
1222 cur_lte->resource_entry.flags |
1223 WIM_RESHDR_FLAG_COMPRESSED;
1225 DEBUG("Wrote compressed resource "
1226 "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
1227 cur_lte->output_resource_entry.original_size,
1228 cur_lte->output_resource_entry.size,
1229 cur_lte->output_resource_entry.offset,
1230 cur_lte->output_resource_entry.flags);
1233 do_write_streams_progress(ctx->progress_data,
1236 /* Since we just finished writing a stream, write any
1237 * streams that have been added to the serial_streams
1238 * list for direct writing by the main thread (e.g.
1239 * resources that don't need to be compressed because
1240 * the desired compression type is the same as the
1241 * previous compression type). */
1242 if (!list_empty(&ctx->serial_streams)) {
1243 ret = do_write_stream_list_serial(&ctx->serial_streams,
1247 ctx->write_resource_flags,
1248 ctx->progress_data);
1253 /* Advance to the next stream to write. */
1254 if (list_empty(&ctx->outstanding_streams)) {
1257 cur_lte = container_of(ctx->outstanding_streams.next,
1258 struct wim_lookup_table_entry,
1259 being_compressed_list);
1266 /* Called when the main thread has read a new chunk of data. */
1268 main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
1270 struct main_writer_thread_ctx *ctx = _ctx;
1272 struct message *next_msg;
1273 u64 next_chunk_in_msg;
1275 /* Update SHA1 message digest for the stream currently being read by the
1277 sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
1279 /* We send chunks of data to the compressor chunks in batches which we
1280 * refer to as "messages". @next_msg is the message that is currently
1281 * being prepared to send off. If it is NULL, that indicates that we
1282 * need to start a new message. */
1283 next_msg = ctx->next_msg;
1285 /* We need to start a new message. First check to see if there
1286 * is a message available in the list of available messages. If
1287 * so, we can just take one. If not, all the messages (there is
1288 * a fixed number of them, proportional to the number of
1289 * threads) have been sent off to the compressor threads, so we
1290 * receive messages from the compressor threads containing
1291 * compressed chunks of data.
1293 * We may need to receive multiple messages before one is
1294 * actually available to use because messages received that are
1295 * *not* for the very next set of chunks to compress must be
1296 * buffered until it's time to write those chunks. */
1297 while (list_empty(&ctx->available_msgs)) {
1298 ret = receive_compressed_chunks(ctx);
1303 next_msg = container_of(ctx->available_msgs.next,
1304 struct message, list);
1305 list_del(&next_msg->list);
1306 next_msg->complete = false;
1307 next_msg->begin_chunk = ctx->next_chunk;
1308 next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
1309 ctx->next_num_chunks - ctx->next_chunk);
1310 ctx->next_msg = next_msg;
1313 /* Fill in the next chunk to compress */
1314 next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
1316 next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
1317 memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
1320 if (++next_chunk_in_msg == next_msg->num_chunks) {
1321 /* Send off an array of chunks to compress */
1322 list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
1323 shared_queue_put(ctx->res_to_compress_queue, next_msg);
1324 ++ctx->num_outstanding_messages;
1325 ctx->next_msg = NULL;
1331 main_writer_thread_finish(void *_ctx)
1333 struct main_writer_thread_ctx *ctx = _ctx;
1335 while (ctx->num_outstanding_messages != 0) {
1336 ret = receive_compressed_chunks(ctx);
1340 wimlib_assert(list_empty(&ctx->outstanding_streams));
1341 return do_write_stream_list_serial(&ctx->serial_streams,
1345 ctx->write_resource_flags,
1346 ctx->progress_data);
1350 submit_stream_for_compression(struct wim_lookup_table_entry *lte,
1351 struct main_writer_thread_ctx *ctx)
1355 /* Read the entire stream @lte, feeding its data chunks to the
1356 * compressor threads. Also SHA1-sum the stream; this is required in
1357 * the case that @lte is unhashed, and a nice additional verification
1358 * when @lte is already hashed. */
1359 sha1_init(&ctx->next_sha_ctx);
1360 ctx->next_chunk = 0;
1361 ctx->next_num_chunks = wim_resource_chunks(lte);
1362 ctx->next_lte = lte;
1363 INIT_LIST_HEAD(<e->msg_list);
1364 list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams);
1365 ret = read_resource_prefix(lte, wim_resource_size(lte),
1366 main_writer_thread_cb, ctx, 0);
1369 wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
1370 return finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
1374 main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx)
1376 struct main_writer_thread_ctx *ctx = _ctx;
1379 if (wim_resource_size(lte) < 1000 ||
1380 ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
1381 (lte->resource_location == RESOURCE_IN_WIM &&
1382 !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS) &&
1383 lte->wim->compression_type == ctx->out_ctype))
1385 /* Stream is too small or isn't being compressed. Process it by
1386 * the main thread when we have a chance. We can't necessarily
1387 * process it right here, as the main thread could be in the
1388 * middle of writing a different stream. */
1389 list_add_tail(<e->write_streams_list, &ctx->serial_streams);
1393 ret = submit_stream_for_compression(lte, ctx);
1395 lte->no_progress = 1;
1400 get_default_num_threads(void)
1403 return win32_get_number_of_processors();
1405 return sysconf(_SC_NPROCESSORS_ONLN);
1409 /* Equivalent to write_stream_list_serial(), except this takes a @num_threads
1410 * parameter and will perform compression using that many threads. Falls
1411 * back to write_stream_list_serial() on certain errors, such as a failure to
1412 * create the number of threads requested.
1414 * High level description of the algorithm for writing compressed streams in
1415 * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes
1416 * rather than on full files. The currently executing thread becomes the main
1417 * thread and is entirely in charge of reading the data to compress (which may
1418 * be in any location understood by the resource code--- such as in an external
1419 * file being captured, or in another WIM file from which an image is being
1420 * exported) and actually writing the compressed data to the output file.
1421 * Additional threads are "compressor threads" and all execute the
1422 * compressor_thread_proc, where they repeatedly retrieve buffers of data from
1423 * the main thread, compress them, and hand them back to the main thread.
1425 * Certain streams, such as streams that do not need to be compressed (e.g.
1426 * input compression type same as output compression type) or streams of very
1427 * small size are placed in a list (main_writer_thread_ctx.serial_list) and
1428 * handled entirely by the main thread at an appropriate time.
1430 * At any given point in time, multiple streams may be having chunks compressed
1431 * concurrently. The stream that the main thread is currently *reading* may be
1432 * later in the list that the stream that the main thread is currently
1436 write_stream_list_parallel(struct list_head *stream_list,
1437 struct wim_lookup_table *lookup_table,
1438 struct filedes *out_fd,
1440 int write_resource_flags,
1441 struct write_streams_progress_data *progress_data,
1442 unsigned num_threads)
1445 struct shared_queue res_to_compress_queue;
1446 struct shared_queue compressed_res_queue;
1447 pthread_t *compressor_threads = NULL;
1448 union wimlib_progress_info *progress = &progress_data->progress;
1450 if (num_threads == 0) {
1451 long nthreads = get_default_num_threads();
1452 if (nthreads < 1 || nthreads > UINT_MAX) {
1453 WARNING("Could not determine number of processors! Assuming 1");
1455 } else if (nthreads == 1) {
1456 goto out_serial_quiet;
1458 num_threads = nthreads;
1462 DEBUG("Writing stream list of size %"PRIu64" "
1463 "(parallel version, num_threads=%u)",
1464 progress->write_streams.total_streams, num_threads);
1466 progress->write_streams.num_threads = num_threads;
1468 static const size_t MESSAGES_PER_THREAD = 2;
1469 size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
1471 DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
1473 ret = shared_queue_init(&res_to_compress_queue, queue_size);
1477 ret = shared_queue_init(&compressed_res_queue, queue_size);
1479 goto out_destroy_res_to_compress_queue;
1481 struct compressor_thread_params params;
1482 params.res_to_compress_queue = &res_to_compress_queue;
1483 params.compressed_res_queue = &compressed_res_queue;
1484 params.compress = get_compress_func(out_ctype);
1486 compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
1487 if (!compressor_threads) {
1488 ret = WIMLIB_ERR_NOMEM;
1489 goto out_destroy_compressed_res_queue;
1492 for (unsigned i = 0; i < num_threads; i++) {
1493 DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
1494 ret = pthread_create(&compressor_threads[i], NULL,
1495 compressor_thread_proc, ¶ms);
1498 ERROR_WITH_ERRNO("Failed to create compressor "
1500 i + 1, num_threads);
1506 if (progress_data->progress_func) {
1507 progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1511 struct main_writer_thread_ctx ctx;
1512 ctx.stream_list = stream_list;
1513 ctx.lookup_table = lookup_table;
1514 ctx.out_fd = out_fd;
1515 ctx.out_ctype = out_ctype;
1516 ctx.res_to_compress_queue = &res_to_compress_queue;
1517 ctx.compressed_res_queue = &compressed_res_queue;
1518 ctx.num_messages = queue_size;
1519 ctx.write_resource_flags = write_resource_flags;
1520 ctx.progress_data = progress_data;
1521 ret = main_writer_thread_init_ctx(&ctx);
1524 ret = do_write_stream_list(stream_list, lookup_table,
1525 main_thread_process_next_stream,
1526 &ctx, progress_data);
1528 goto out_destroy_ctx;
1530 /* The main thread has finished reading all streams that are going to be
1531 * compressed in parallel, and it now needs to wait for all remaining
1532 * chunks to be compressed so that the remaining streams can actually be
1533 * written to the output file. Furthermore, any remaining streams that
1534 * had processing deferred to the main thread need to be handled. These
1535 * tasks are done by the main_writer_thread_finish() function. */
1536 ret = main_writer_thread_finish(&ctx);
1538 main_writer_thread_destroy_ctx(&ctx);
1540 for (unsigned i = 0; i < num_threads; i++)
1541 shared_queue_put(&res_to_compress_queue, NULL);
1543 for (unsigned i = 0; i < num_threads; i++) {
1544 if (pthread_join(compressor_threads[i], NULL)) {
1545 WARNING_WITH_ERRNO("Failed to join compressor "
1547 i + 1, num_threads);
1550 FREE(compressor_threads);
1551 out_destroy_compressed_res_queue:
1552 shared_queue_destroy(&compressed_res_queue);
1553 out_destroy_res_to_compress_queue:
1554 shared_queue_destroy(&res_to_compress_queue);
1555 if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
1558 WARNING("Falling back to single-threaded compression");
1560 return write_stream_list_serial(stream_list,
1564 write_resource_flags,
1571 * Write a list of streams to a WIM (@out_fd) using the compression type
1572 * @out_ctype and up to @num_threads compressor threads.
1575 write_stream_list(struct list_head *stream_list,
1576 struct wim_lookup_table *lookup_table,
1577 struct filedes *out_fd, int out_ctype, int write_flags,
1578 unsigned num_threads, wimlib_progress_func_t progress_func)
1580 struct wim_lookup_table_entry *lte;
1581 size_t num_streams = 0;
1582 u64 total_bytes = 0;
1583 u64 total_compression_bytes = 0;
1584 struct write_streams_progress_data progress_data;
1586 int write_resource_flags;
1587 unsigned total_parts = 0;
1588 WIMStruct *prev_wim_part = NULL;
1590 if (list_empty(stream_list))
1593 write_resource_flags = write_flags_to_resource_flags(write_flags);
1595 DEBUG("write_resource_flags=0x%08x", write_resource_flags);
1597 sort_stream_list_by_sequential_order(stream_list,
1598 offsetof(struct wim_lookup_table_entry,
1599 write_streams_list));
1601 /* Calculate the total size of the streams to be written. Note: this
1602 * will be the uncompressed size, as we may not know the compressed size
1603 * yet, and also this will assume that every unhashed stream will be
1604 * written (which will not necessarily be the case). */
1605 list_for_each_entry(lte, stream_list, write_streams_list) {
1607 total_bytes += wim_resource_size(lte);
1608 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
1609 && (wim_resource_compression_type(lte) != out_ctype ||
1610 (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS)))
1612 total_compression_bytes += wim_resource_size(lte);
1614 if (lte->resource_location == RESOURCE_IN_WIM) {
1615 if (prev_wim_part != lte->wim) {
1616 prev_wim_part = lte->wim;
1623 memset(&progress_data, 0, sizeof(progress_data));
1624 progress_data.progress_func = progress_func;
1626 progress_data.progress.write_streams.total_bytes = total_bytes;
1627 progress_data.progress.write_streams.total_streams = num_streams;
1628 progress_data.progress.write_streams.completed_bytes = 0;
1629 progress_data.progress.write_streams.completed_streams = 0;
1630 progress_data.progress.write_streams.num_threads = num_threads;
1631 progress_data.progress.write_streams.compression_type = out_ctype;
1632 progress_data.progress.write_streams.total_parts = total_parts;
1633 progress_data.progress.write_streams.completed_parts = 0;
1635 progress_data.next_progress = 0;
1636 progress_data.prev_wim_part = NULL;
1638 #ifdef ENABLE_MULTITHREADED_COMPRESSION
1639 if (total_compression_bytes >= 2000000 && num_threads != 1)
1640 ret = write_stream_list_parallel(stream_list,
1644 write_resource_flags,
1649 ret = write_stream_list_serial(stream_list,
1653 write_resource_flags,
1656 DEBUG("Successfully wrote stream list.");
1658 DEBUG("Failed to write stream list.");
1662 struct stream_size_table {
1663 struct hlist_head *array;
1669 init_stream_size_table(struct stream_size_table *tab, size_t capacity)
1671 tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1673 return WIMLIB_ERR_NOMEM;
1674 tab->num_entries = 0;
1675 tab->capacity = capacity;
1680 destroy_stream_size_table(struct stream_size_table *tab)
1686 stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
1688 struct stream_size_table *tab = _tab;
1690 struct wim_lookup_table_entry *same_size_lte;
1691 struct hlist_node *tmp;
1693 pos = hash_u64(wim_resource_size(lte)) % tab->capacity;
1694 lte->unique_size = 1;
1695 hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
1696 if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) {
1697 lte->unique_size = 0;
1698 same_size_lte->unique_size = 0;
1703 hlist_add_head(<e->hash_list_2, &tab->array[pos]);
1709 struct lte_overwrite_prepare_args {
1712 struct list_head stream_list;
1713 struct stream_size_table stream_size_tab;
1716 /* First phase of preparing streams for an in-place overwrite. This is called
1717 * on all streams, both hashed and unhashed, except the metadata resources. */
1719 lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args)
1721 struct lte_overwrite_prepare_args *args = _args;
1723 wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA));
1724 if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim)
1725 list_add_tail(<e->write_streams_list, &args->stream_list);
1726 lte->out_refcnt = lte->refcnt;
1727 stream_size_table_insert(lte, &args->stream_size_tab);
1731 /* Second phase of preparing streams for an in-place overwrite. This is called
1732 * on existing metadata resources and hashed streams, but not unhashed streams.
1734 * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so
1735 * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as
1736 * the latter uses lte->hash_list_2, while the former expects to set
1737 * lte->output_resource_entry. */
1739 lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args)
1741 struct lte_overwrite_prepare_args *args = _args;
1743 if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) {
1744 /* We can't do an in place overwrite on the WIM if there are
1745 * streams after the XML data. */
1746 if (lte->resource_entry.offset +
1747 lte->resource_entry.size > args->end_offset)
1749 if (wimlib_print_errors) {
1750 ERROR("The following resource is after the XML data:");
1751 print_lookup_table_entry(lte, stderr);
1753 return WIMLIB_ERR_RESOURCE_ORDER;
1755 copy_resource_entry(<e->output_resource_entry,
1756 <e->resource_entry);
1761 /* Given a WIM that we are going to overwrite in place with zero or more
1762 * additional streams added, construct a list the list of new unique streams
1763 * ('struct wim_lookup_table_entry's) that must be written, plus any unhashed
1764 * streams that need to be added but may be identical to other hashed or
1765 * unhashed streams. These unhashed streams are checksummed while the streams
1766 * are being written. To aid this process, the member @unique_size is set to 1
1767 * on streams that have a unique size and therefore must be written.
1769 * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
1770 * indicate the number of times the stream is referenced in only the streams
1771 * that are being written; this may still be adjusted later when unhashed
1772 * streams are being resolved.
1775 prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset,
1776 struct list_head *stream_list)
1779 struct lte_overwrite_prepare_args args;
1783 args.end_offset = end_offset;
1784 ret = init_stream_size_table(&args.stream_size_tab,
1785 wim->lookup_table->capacity);
1789 INIT_LIST_HEAD(&args.stream_list);
1790 for (i = 0; i < wim->hdr.image_count; i++) {
1791 struct wim_image_metadata *imd;
1792 struct wim_lookup_table_entry *lte;
1794 imd = wim->image_metadata[i];
1795 image_for_each_unhashed_stream(lte, imd)
1796 lte_overwrite_prepare(lte, &args);
1798 for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args);
1799 list_transfer(&args.stream_list, stream_list);
1801 for (i = 0; i < wim->hdr.image_count; i++) {
1802 ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte,
1805 goto out_destroy_stream_size_table;
1807 ret = for_lookup_table_entry(wim->lookup_table,
1808 lte_overwrite_prepare_2, &args);
1809 out_destroy_stream_size_table:
1810 destroy_stream_size_table(&args.stream_size_tab);
1815 struct find_streams_ctx {
1816 struct list_head stream_list;
1817 struct stream_size_table stream_size_tab;
1821 lte_reference_for_write(struct wim_lookup_table_entry *lte,
1822 struct find_streams_ctx *ctx,
1825 if (lte->out_refcnt == 0) {
1827 stream_size_table_insert(lte, &ctx->stream_size_tab);
1828 list_add_tail(<e->write_streams_list, &ctx->stream_list);
1830 lte->out_refcnt += nref;
1834 do_lte_reference_for_write(struct wim_lookup_table_entry *lte, void *_ctx)
1836 struct find_streams_ctx *ctx = _ctx;
1837 lte->out_refcnt = 0;
1838 lte_reference_for_write(lte, ctx, lte->refcnt);
1843 inode_find_streams_to_write(struct wim_inode *inode,
1844 struct wim_lookup_table *table,
1845 struct find_streams_ctx *ctx)
1847 struct wim_lookup_table_entry *lte;
1850 for (i = 0; i <= inode->i_num_ads; i++) {
1851 lte = inode_stream_lte(inode, i, table);
1853 lte_reference_for_write(lte, ctx, inode->i_nlink);
1858 image_find_streams_to_write(WIMStruct *wim)
1860 struct find_streams_ctx *ctx;
1861 struct wim_image_metadata *imd;
1862 struct wim_inode *inode;
1863 struct wim_lookup_table_entry *lte;
1866 imd = wim_get_current_image_metadata(wim);
1868 image_for_each_unhashed_stream(lte, imd)
1869 lte->out_refcnt = 0;
1871 /* Go through this image's inodes to find any streams that have not been
1873 image_for_each_inode(inode, imd)
1874 inode_find_streams_to_write(inode, wim->lookup_table, ctx);
1878 /* Given a WIM that from which one or all of the images is being written, build
1879 * the list of unique streams ('struct wim_lookup_table_entry's) that must be
1880 * written, plus any unhashed streams that need to be written but may be
1881 * identical to other hashed or unhashed streams being written. These unhashed
1882 * streams are checksummed while the streams are being written. To aid this
1883 * process, the member @unique_size is set to 1 on streams that have a unique
1884 * size and therefore must be written.
1886 * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
1887 * indicate the number of times the stream is referenced in only the streams
1888 * that are being written; this may still be adjusted later when unhashed
1889 * streams are being resolved.
1892 prepare_stream_list(WIMStruct *wim, int image, struct list_head *stream_list)
1895 struct find_streams_ctx ctx;
1897 DEBUG("Preparing list of streams to write for image %d.", image);
1899 for_lookup_table_entry(wim->lookup_table, lte_zero_out_refcnt, NULL);
1900 ret = init_stream_size_table(&ctx.stream_size_tab,
1901 wim->lookup_table->capacity);
1904 for_lookup_table_entry(wim->lookup_table, stream_size_table_insert,
1905 &ctx.stream_size_tab);
1906 INIT_LIST_HEAD(&ctx.stream_list);
1907 wim->private = &ctx;
1910 /* Optimization enabled by default: if we're writing all the images,
1911 * it's not strictly necessary to decompress, parse, and go through the
1912 * dentry tree in each image's metadata resource. Instead, include all
1913 * the hashed streams referenced from the lookup table as well as all
1914 * unhashed streams referenced in the per-image list. For 'out_refcnt'
1915 * for each stream, just copy the value from 'refcnt', which is the
1916 * reference count of that stream in the entire WIM. */
1917 if (image == WIMLIB_ALL_IMAGES) {
1918 struct wim_lookup_table_entry *lte;
1919 struct wim_image_metadata *imd;
1922 for_lookup_table_entry(wim->lookup_table,
1923 do_lte_reference_for_write, &ctx);
1924 for (i = 0; i < wim->hdr.image_count; i++) {
1925 imd = wim->image_metadata[i];
1926 image_for_each_unhashed_stream(lte, imd)
1927 do_lte_reference_for_write(lte, &ctx);
1932 ret = for_image(wim, image, image_find_streams_to_write);
1934 destroy_stream_size_table(&ctx.stream_size_tab);
1937 list_transfer(&ctx.stream_list, stream_list);
1941 /* Writes the streams for the specified @image in @wim to @wim->out_fd.
1942 * Alternatively, if @stream_list_override is specified, it is taken to be the
1943 * list of streams to write (connected with 'write_streams_list') and @image is
1946 write_wim_streams(WIMStruct *wim, int image, int write_flags,
1947 unsigned num_threads,
1948 wimlib_progress_func_t progress_func,
1949 struct list_head *stream_list_override)
1952 struct list_head _stream_list;
1953 struct list_head *stream_list;
1954 struct wim_lookup_table_entry *lte;
1956 if (stream_list_override) {
1957 stream_list = stream_list_override;
1958 list_for_each_entry(lte, stream_list, write_streams_list) {
1960 lte->out_refcnt = lte->refcnt;
1962 lte->out_refcnt = 1;
1965 stream_list = &_stream_list;
1966 ret = prepare_stream_list(wim, image, stream_list);
1970 list_for_each_entry(lte, stream_list, write_streams_list)
1971 lte->part_number = wim->hdr.part_number;
1972 return write_stream_list(stream_list,
1975 wim->compression_type,
1982 write_wim_metadata_resources(WIMStruct *wim, int image, int write_flags,
1983 wimlib_progress_func_t progress_func)
1988 int write_resource_flags;
1990 if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
1993 write_resource_flags = write_flags_to_resource_flags(write_flags);
1995 DEBUG("Writing metadata resources (offset=%"PRIu64")",
1996 wim->out_fd.offset);
1999 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL);
2001 if (image == WIMLIB_ALL_IMAGES) {
2003 end_image = wim->hdr.image_count;
2005 start_image = image;
2009 for (int i = start_image; i <= end_image; i++) {
2010 struct wim_image_metadata *imd;
2012 imd = wim->image_metadata[i - 1];
2013 /* Build a new metadata resource only if image was modified from
2014 * the original (or was newly added). Otherwise just copy the
2016 if (imd->modified) {
2017 ret = write_metadata_resource(wim, i,
2018 write_resource_flags);
2020 ret = write_wim_resource(imd->metadata_lte,
2022 wim->compression_type,
2023 &imd->metadata_lte->output_resource_entry,
2024 write_resource_flags);
2030 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL);
2035 open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
2038 DEBUG("Opening \"%"TS"\" for writing.", path);
2040 raw_fd = topen(path, open_flags | O_BINARY, 0644);
2042 ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
2043 return WIMLIB_ERR_OPEN;
2045 filedes_init(&wim->out_fd, raw_fd);
2050 close_wim_writable(WIMStruct *wim, int write_flags)
2054 if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR))
2055 if (filedes_valid(&wim->out_fd))
2056 if (filedes_close(&wim->out_fd))
2057 ret = WIMLIB_ERR_WRITE;
2058 filedes_invalidate(&wim->out_fd);
2063 * Finish writing a WIM file: write the lookup table, xml data, and integrity
2064 * table, then overwrite the WIM header. Always closes the WIM file descriptor
2067 * write_flags is a bitwise OR of the following:
2069 * (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
2070 * Include an integrity table.
2072 * (public) WIMLIB_WRITE_FLAG_FSYNC:
2073 * fsync() the output file before closing it.
2075 * (public) WIMLIB_WRITE_FLAG_PIPABLE:
2076 * Writing a pipable WIM, possibly to a pipe; include pipable WIM
2077 * stream headers before the lookup table and XML data, and also
2078 * write the WIM header at the end instead of seeking to the
2079 * beginning. Can't be combined with
2080 * WIMLIB_WRITE_FLAG_CHECK_INTEGRITY.
2082 * (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE:
2083 * Don't write the lookup table.
2085 * (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE:
2086 * When (if) writing the integrity table, re-use entries from the
2087 * existing integrity table, if possible.
2089 * (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
2090 * After writing the XML data but before writing the integrity
2091 * table, write a temporary WIM header and flush the stream so that
2092 * the WIM is less likely to become corrupted upon abrupt program
2094 * (private) WIMLIB_WRITE_FLAG_HEADER_AT_END:
2095 * Instead of overwriting the WIM header at the beginning of the
2096 * file, simply append it to the end of the file. (Used when
2098 * (private) WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES:
2099 * Use the existing <TOTALBYTES> stored in the in-memory XML
2100 * information, rather than setting it to the offset of the XML
2101 * data being written.
2104 finish_write(WIMStruct *wim, int image, int write_flags,
2105 wimlib_progress_func_t progress_func,
2106 struct list_head *stream_list_override)
2110 int write_resource_flags;
2111 off_t old_lookup_table_end;
2112 off_t new_lookup_table_end;
2115 write_resource_flags = write_flags_to_resource_flags(write_flags);
2117 /* In the WIM header, there is room for the resource entry for a
2118 * metadata resource labeled as the "boot metadata". This entry should
2119 * be zeroed out if there is no bootable image (boot_idx 0). Otherwise,
2120 * it should be a copy of the resource entry for the image that is
2121 * marked as bootable. This is not well documented... */
2122 if (wim->hdr.boot_idx == 0) {
2123 zero_resource_entry(&wim->hdr.boot_metadata_res_entry);
2125 copy_resource_entry(&wim->hdr.boot_metadata_res_entry,
2126 &wim->image_metadata[wim->hdr.boot_idx- 1
2127 ]->metadata_lte->output_resource_entry);
2130 /* Write lookup table. (Save old position first.) */
2131 old_lookup_table_end = wim->hdr.lookup_table_res_entry.offset +
2132 wim->hdr.lookup_table_res_entry.size;
2133 if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
2134 ret = write_wim_lookup_table(wim, image, write_flags,
2135 &wim->hdr.lookup_table_res_entry,
2136 stream_list_override);
2141 /* Write XML data. */
2142 xml_totalbytes = wim->out_fd.offset;
2143 if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2144 xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING;
2145 ret = write_wim_xml_data(wim, image, xml_totalbytes,
2146 &wim->hdr.xml_res_entry,
2147 write_resource_flags);
2151 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2152 if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
2153 struct wim_header checkpoint_hdr;
2154 memcpy(&checkpoint_hdr, &wim->hdr, sizeof(struct wim_header));
2155 zero_resource_entry(&checkpoint_hdr.integrity);
2156 checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2157 ret = write_wim_header_at_offset(&checkpoint_hdr,
2163 if (!(write_flags & WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE))
2164 old_lookup_table_end = 0;
2166 new_lookup_table_end = wim->hdr.lookup_table_res_entry.offset +
2167 wim->hdr.lookup_table_res_entry.size;
2169 ret = write_integrity_table(wim,
2170 new_lookup_table_end,
2171 old_lookup_table_end,
2176 zero_resource_entry(&wim->hdr.integrity);
2179 wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2181 if (write_flags & WIMLIB_WRITE_FLAG_HEADER_AT_END)
2182 hdr_offset = wim->out_fd.offset;
2183 ret = write_wim_header_at_offset(&wim->hdr, &wim->out_fd, hdr_offset);
2187 if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
2188 if (fsync(wim->out_fd.fd)) {
2189 ERROR_WITH_ERRNO("Error syncing data to WIM file");
2190 ret = WIMLIB_ERR_WRITE;
2197 if (close_wim_writable(wim, write_flags)) {
2199 ERROR_WITH_ERRNO("Failed to close the output WIM file");
2200 ret = WIMLIB_ERR_WRITE;
2206 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
2208 lock_wim(WIMStruct *wim, int fd)
2211 if (fd != -1 && !wim->wim_locked) {
2212 ret = flock(fd, LOCK_EX | LOCK_NB);
2214 if (errno == EWOULDBLOCK) {
2215 ERROR("`%"TS"' is already being modified or has been "
2216 "mounted read-write\n"
2217 " by another process!", wim->filename);
2218 ret = WIMLIB_ERR_ALREADY_LOCKED;
2220 WARNING_WITH_ERRNO("Failed to lock `%"TS"'",
2225 wim->wim_locked = 1;
2233 * Perform the intermediate stages of creating a "pipable" WIM (i.e. a WIM
2234 * capable of being applied from a pipe). Such a WIM looks like:
2236 * Pipable WIMs are a wimlib-specific modification of the WIM format such that
2237 * images can be applied from them sequentially when the file data is sent over
2238 * a pipe. In addition, a pipable WIM can be written sequentially to a pipe.
2239 * The modifications made to the WIM format for pipable WIMs are:
2241 * - Magic characters in header are "WLPWM\0\0\0" (wimlib pipable WIM) instead
2242 * of "MSWIM\0\0\0". This lets wimlib know that the WIM is pipable and also
2243 * should stop other software from trying to read the file as a normal WIM.
2245 * - The header at the beginning of the file does not contain all the normal
2246 * information; in particular it will have all 0's for the lookup table and
2247 * XML data resource entries. This is because this information cannot be
2248 * determined until the lookup table and XML data have been written.
2249 * Consequently, wimlib will write the full header at the very end of the
2250 * file. The header at the end, however, is only used when reading the WIM
2251 * from a seekable file (not a pipe).
2253 * - An extra copy of the XML data is placed directly after the header. This
2254 * allows image names and sizes to be determined at an appropriate time when
2255 * reading the WIM from a pipe. This copy of the XML data is ignored if the
2256 * WIM is read from a seekable file (not a pipe).
2258 * - The format of resources, or streams, has been modified to allow them to be
2259 * used before the "lookup table" has been read. Each stream is prefixed with
2260 * a `struct pwm_stream_hdr' that is basically an abbreviated form of `struct
2261 * wim_lookup_table_entry_disk' that only contains the SHA1 message digest,
2262 * uncompressed stream size, and flags that indicate whether the stream is
2263 * compressed. The data of uncompressed streams then follows literally, while
2264 * the data of compressed streams follows in a modified format. Compressed
2265 * streams have no chunk table, since the chunk table cannot be written until
2266 * all chunks have been compressed; instead, each compressed chunk is prefixed
2267 * by a `struct pwm_chunk_hdr' that gives its size. However, the offsets are
2268 * given in the chunk table as if these chunk headers were not present.
2270 * - Metadata resources always come before other file resources (streams).
2271 * (This does not by itself constitute an incompatibility with normal WIMs,
2272 * since this is valid in normal WIMs.)
2274 * - At least up to the end of the file resources, all components must be packed
2275 * as tightly as possible; there cannot be any "holes" in the WIM. (This does
2276 * not by itself consititute an incompatibility with normal WIMs, since this
2277 * is valid in normal WIMs.)
2279 * Note: the lookup table, XML data, and header at the end are not used when
2280 * applying from a pipe. They exist to support functionality such as image
2281 * application and export when the WIM is *not* read from a pipe.
2283 * Layout of pipable WIM:
2285 * ----------+----------+--------------------+----------------+--------------+------------+--------+
2286 * | Header | XML data | Metadata resources | File resources | Lookup table | XML data | Header |
2287 * ----------+----------+--------------------+----------------+--------------+------------+--------+
2289 * Layout of normal WIM:
2291 * +---------+--------------------+----------------+--------------+----------+
2292 * | Header | Metadata resources | File resources | Lookup table | XML data |
2293 * +---------+--------------------+----------------+--------------+----------+
2295 * Do note that since pipable WIMs are not supported by Microsoft's software,
2296 * wimlib does not create them unless explicitly requested (with
2297 * WIMLIB_WRITE_FLAG_PIPABLE) and as stated above they use different magic
2298 * characters to identify the file.
2301 write_pipable_wim(WIMStruct *wim, int image, int write_flags,
2302 unsigned num_threads, wimlib_progress_func_t progress_func,
2303 struct list_head *stream_list_override)
2306 struct resource_entry xml_res_entry;
2308 WARNING("Creating a pipable WIM, which will "
2310 " with Microsoft's software (wimgapi/imagex/Dism).");
2312 /* At this point, the header at the beginning of the file has already
2315 /* For efficiency, when wimlib adds an image to the WIM with
2316 * wimlib_add_image(), the SHA1 message digests of files is not
2317 * calculated; instead, they are calculated while the files are being
2318 * written. However, this does not work when writing a pipable WIM,
2319 * since when writing a stream to a pipable WIM, its SHA1 message digest
2320 * needs to be known before the stream data is written. Therefore,
2321 * before getting much farther, we need to pre-calculate the SHA1
2322 * message digests of all streams that will be written. */
2323 ret = wim_checksum_unhashed_streams(wim);
2327 /* Write extra copy of the XML data. */
2328 ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT,
2330 WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE);
2334 /* Write metadata resources for the image(s) being included in the
2336 ret = write_wim_metadata_resources(wim, image, write_flags,
2341 /* Write streams needed for the image(s) being included in the output
2342 * WIM, or streams needed for the split WIM part. */
2343 return write_wim_streams(wim, image, write_flags, num_threads,
2344 progress_func, stream_list_override);
2346 /* The lookup table, XML data, and header at end are handled by
2347 * finish_write(). */
2350 /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file
2353 write_wim_part(WIMStruct *wim,
2354 const void *path_or_fd,
2357 unsigned num_threads,
2358 wimlib_progress_func_t progress_func,
2359 unsigned part_number,
2360 unsigned total_parts,
2361 struct list_head *stream_list_override,
2365 struct wim_header hdr_save;
2366 struct list_head lt_stream_list_override;
2368 if (total_parts == 1)
2369 DEBUG("Writing standalone WIM.");
2371 DEBUG("Writing split WIM part %u/%u", part_number, total_parts);
2372 if (image == WIMLIB_ALL_IMAGES)
2373 DEBUG("Including all images.");
2375 DEBUG("Including image %d only.", image);
2376 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2377 DEBUG("File descriptor: %d", *(const int*)path_or_fd);
2379 DEBUG("Path: \"%"TS"\"", (const tchar*)path_or_fd);
2380 DEBUG("Write flags: 0x%08x", write_flags);
2381 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2382 DEBUG("\tCHECK_INTEGRITY");
2383 if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
2385 if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
2386 DEBUG("\tRECOMPRESS");
2387 if (write_flags & WIMLIB_WRITE_FLAG_FSYNC)
2389 if (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE)
2391 if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
2392 DEBUG("\tIGNORE_READONLY_FLAG");
2393 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2395 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2396 DEBUG("\tFILE_DESCRIPTOR");
2397 if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
2398 DEBUG("\tNO_METADATA");
2399 if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2400 DEBUG("\tUSE_EXISTING_TOTALBYTES");
2401 if (num_threads == 0)
2402 DEBUG("Number of threads: autodetect");
2404 DEBUG("Number of threads: %u", num_threads);
2405 DEBUG("Progress function: %s", (progress_func ? "yes" : "no"));
2406 DEBUG("Stream list: %s", (stream_list_override ? "specified" : "autodetect"));
2407 DEBUG("GUID: %s", (guid ? "specified" : "generate new"));
2409 /* Internally, this is always called with a valid part number and total
2411 wimlib_assert(total_parts >= 1);
2412 wimlib_assert(part_number >= 1 && part_number <= total_parts);
2414 /* A valid image (or all images) must be specified. */
2415 if (image != WIMLIB_ALL_IMAGES &&
2416 (image < 1 || image > wim->hdr.image_count))
2417 return WIMLIB_ERR_INVALID_IMAGE;
2419 /* If we need to write metadata resources, make sure the ::WIMStruct has
2420 * the needed information attached (e.g. is not a resource-only WIM,
2421 * such as a non-first part of a split WIM). */
2422 if (!wim_has_metadata(wim) &&
2423 !(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA))
2424 return WIMLIB_ERR_METADATA_NOT_FOUND;
2426 /* Check for contradictory flags. */
2427 if ((write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2428 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2429 == (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2430 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2431 return WIMLIB_ERR_INVALID_PARAM;
2433 if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2434 WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2435 == (WIMLIB_WRITE_FLAG_PIPABLE |
2436 WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2437 return WIMLIB_ERR_INVALID_PARAM;
2439 /* Save previous header, then start initializing the new one. */
2440 memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
2442 /* Set default integrity and pipable flags. */
2443 if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2444 WIMLIB_WRITE_FLAG_NOT_PIPABLE)))
2445 if (wim_is_pipable(wim))
2446 write_flags |= WIMLIB_WRITE_FLAG_PIPABLE;
2448 if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2449 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2450 if (wim_has_integrity_table(wim))
2451 write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2453 /* Set appropriate magic number. */
2454 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2455 wim->hdr.magic = PWM_MAGIC;
2457 wim->hdr.magic = WIM_MAGIC;
2459 /* Clear header flags that will be set automatically. */
2460 wim->hdr.flags &= ~(WIM_HDR_FLAG_METADATA_ONLY |
2461 WIM_HDR_FLAG_RESOURCE_ONLY |
2462 WIM_HDR_FLAG_SPANNED |
2463 WIM_HDR_FLAG_WRITE_IN_PROGRESS);
2465 /* Set SPANNED header flag if writing part of a split WIM. */
2466 if (total_parts != 1)
2467 wim->hdr.flags |= WIM_HDR_FLAG_SPANNED;
2469 /* Set part number and total parts of split WIM. This will be 1 and 1
2470 * if the WIM is standalone. */
2471 wim->hdr.part_number = part_number;
2472 wim->hdr.total_parts = total_parts;
2474 /* Use GUID if specified; otherwise generate a new one. */
2476 memcpy(wim->hdr.guid, guid, WIMLIB_GUID_LEN);
2478 randomize_byte_array(wim->hdr.guid, WIMLIB_GUID_LEN);
2480 /* Clear references to resources that have not been written yet. */
2481 zero_resource_entry(&wim->hdr.lookup_table_res_entry);
2482 zero_resource_entry(&wim->hdr.xml_res_entry);
2483 zero_resource_entry(&wim->hdr.boot_metadata_res_entry);
2484 zero_resource_entry(&wim->hdr.integrity);
2486 /* Set image count and boot index correctly for single image writes. */
2487 if (image != WIMLIB_ALL_IMAGES) {
2488 wim->hdr.image_count = 1;
2489 if (wim->hdr.boot_idx == image)
2490 wim->hdr.boot_idx = 1;
2492 wim->hdr.boot_idx = 0;
2495 /* Split WIMs can't be bootable. */
2496 if (total_parts != 1)
2497 wim->hdr.boot_idx = 0;
2499 /* Initialize output file descriptor. */
2500 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) {
2501 /* File descriptor was explicitly provided. Return error if
2502 * file descriptor is not seekable, unless writing a pipable WIM
2504 wim->out_fd.fd = *(const int*)path_or_fd;
2505 wim->out_fd.offset = 0;
2506 if (!filedes_is_seekable(&wim->out_fd)) {
2507 ret = WIMLIB_ERR_INVALID_PARAM;
2508 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2509 goto out_restore_hdr;
2510 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2511 ERROR("Can't include integrity check when "
2512 "writing pipable WIM to pipe!");
2513 goto out_restore_hdr;
2518 /* Filename of WIM to write was provided; open file descriptor
2520 ret = open_wim_writable(wim, (const tchar*)path_or_fd,
2521 O_TRUNC | O_CREAT | O_RDWR);
2523 goto out_restore_hdr;
2526 /* Write initial header. This is merely a "dummy" header since it
2527 * doesn't have all the information yet, so it will be overwritten later
2528 * (unless writing a pipable WIM). */
2529 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2530 wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2531 ret = write_wim_header(&wim->hdr, &wim->out_fd);
2532 wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2534 goto out_restore_hdr;
2536 if (stream_list_override) {
2537 struct wim_lookup_table_entry *lte;
2538 INIT_LIST_HEAD(<_stream_list_override);
2539 list_for_each_entry(lte, stream_list_override,
2542 list_add_tail(<e->lookup_table_list,
2543 <_stream_list_override);
2547 /* Write metadata resources and streams. */
2548 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) {
2549 /* Default case: create a normal (non-pipable) WIM. */
2550 ret = write_wim_streams(wim, image, write_flags, num_threads,
2551 progress_func, stream_list_override);
2553 goto out_restore_hdr;
2555 ret = write_wim_metadata_resources(wim, image, write_flags,
2558 goto out_restore_hdr;
2560 /* Non-default case: create pipable WIM. */
2561 ret = write_pipable_wim(wim, image, write_flags, num_threads,
2562 progress_func, stream_list_override);
2564 goto out_restore_hdr;
2565 write_flags |= WIMLIB_WRITE_FLAG_HEADER_AT_END;
2568 if (stream_list_override)
2569 stream_list_override = <_stream_list_override;
2571 /* Write lookup table, XML data, and (optional) integrity table. */
2572 ret = finish_write(wim, image, write_flags, progress_func,
2573 stream_list_override);
2575 memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
2576 close_wim_writable(wim, write_flags);
2580 /* Write a standalone WIM to a file or file descriptor. */
2582 write_standalone_wim(WIMStruct *wim, const void *path_or_fd,
2583 int image, int write_flags, unsigned num_threads,
2584 wimlib_progress_func_t progress_func)
2586 return write_wim_part(wim, path_or_fd, image, write_flags,
2587 num_threads, progress_func, 1, 1, NULL, NULL);
2590 /* API function documented in wimlib.h */
2592 wimlib_write(WIMStruct *wim, const tchar *path,
2593 int image, int write_flags, unsigned num_threads,
2594 wimlib_progress_func_t progress_func)
2597 return WIMLIB_ERR_INVALID_PARAM;
2599 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
2601 return write_standalone_wim(wim, path, image, write_flags,
2602 num_threads, progress_func);
2605 /* API function documented in wimlib.h */
2607 wimlib_write_to_fd(WIMStruct *wim, int fd,
2608 int image, int write_flags, unsigned num_threads,
2609 wimlib_progress_func_t progress_func)
2612 return WIMLIB_ERR_INVALID_PARAM;
2614 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
2615 write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR;
2617 return write_standalone_wim(wim, &fd, image, write_flags,
2618 num_threads, progress_func);
2622 any_images_modified(WIMStruct *wim)
2624 for (int i = 0; i < wim->hdr.image_count; i++)
2625 if (wim->image_metadata[i]->modified)
2631 * Overwrite a WIM, possibly appending streams to it.
2633 * A WIM looks like (or is supposed to look like) the following:
2635 * Header (212 bytes)
2636 * Streams and metadata resources (variable size)
2637 * Lookup table (variable size)
2638 * XML data (variable size)
2639 * Integrity table (optional) (variable size)
2641 * If we are not adding any streams or metadata resources, the lookup table is
2642 * unchanged--- so we only need to overwrite the XML data, integrity table, and
2643 * header. This operation is potentially unsafe if the program is abruptly
2644 * terminated while the XML data or integrity table are being overwritten, but
2645 * before the new header has been written. To partially alleviate this problem,
2646 * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to
2647 * finish_write() to cause a temporary WIM header to be written after the XML
2648 * data has been written. This may prevent the WIM from becoming corrupted if
2649 * the program is terminated while the integrity table is being calculated (but
2650 * no guarantees, due to write re-ordering...).
2652 * If we are adding new streams or images (metadata resources), the lookup table
2653 * needs to be changed, and those streams need to be written. In this case, we
2654 * try to perform a safe update of the WIM file by writing the streams *after*
2655 * the end of the previous WIM, then writing the new lookup table, XML data, and
2656 * (optionally) integrity table following the new streams. This will produce a
2657 * layout like the following:
2659 * Header (212 bytes)
2660 * (OLD) Streams and metadata resources (variable size)
2661 * (OLD) Lookup table (variable size)
2662 * (OLD) XML data (variable size)
2663 * (OLD) Integrity table (optional) (variable size)
2664 * (NEW) Streams and metadata resources (variable size)
2665 * (NEW) Lookup table (variable size)
2666 * (NEW) XML data (variable size)
2667 * (NEW) Integrity table (optional) (variable size)
2669 * At all points, the WIM is valid as nothing points to the new data yet. Then,
2670 * the header is overwritten to point to the new lookup table, XML data, and
2671 * integrity table, to produce the following layout:
2673 * Header (212 bytes)
2674 * Streams and metadata resources (variable size)
2675 * Nothing (variable size)
2676 * More Streams and metadata resources (variable size)
2677 * Lookup table (variable size)
2678 * XML data (variable size)
2679 * Integrity table (optional) (variable size)
2681 * This method allows an image to be appended to a large WIM very quickly, and
2682 * is is crash-safe except in the case of write re-ordering, but the
2683 * disadvantage is that a small hole is left in the WIM where the old lookup
2684 * table, xml data, and integrity table were. (These usually only take up a
2685 * small amount of space compared to the streams, however.)
2688 overwrite_wim_inplace(WIMStruct *wim, int write_flags,
2689 unsigned num_threads,
2690 wimlib_progress_func_t progress_func)
2693 struct list_head stream_list;
2695 u64 old_lookup_table_end, old_xml_begin, old_xml_end;
2698 DEBUG("Overwriting `%"TS"' in-place", wim->filename);
2700 /* Set default integrity flag. */
2701 if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2702 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2703 if (wim_has_integrity_table(wim))
2704 write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2706 /* Make sure that the integrity table (if present) is after the XML
2707 * data, and that there are no stream resources, metadata resources, or
2708 * lookup tables after the XML data. Otherwise, these data would be
2710 old_xml_begin = wim->hdr.xml_res_entry.offset;
2711 old_xml_end = old_xml_begin + wim->hdr.xml_res_entry.size;
2712 old_lookup_table_end = wim->hdr.lookup_table_res_entry.offset +
2713 wim->hdr.lookup_table_res_entry.size;
2714 if (wim->hdr.integrity.offset != 0 && wim->hdr.integrity.offset < old_xml_end) {
2715 ERROR("Didn't expect the integrity table to be before the XML data");
2716 return WIMLIB_ERR_RESOURCE_ORDER;
2719 if (old_lookup_table_end > old_xml_begin) {
2720 ERROR("Didn't expect the lookup table to be after the XML data");
2721 return WIMLIB_ERR_RESOURCE_ORDER;
2724 /* Set @old_wim_end, which indicates the point beyond which we don't
2725 * allow any file and metadata resources to appear without returning
2726 * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
2727 * overwrite these resources). */
2728 if (!wim->deletion_occurred && !any_images_modified(wim)) {
2729 /* If no images have been modified and no images have been
2730 * deleted, a new lookup table does not need to be written. We
2731 * shall write the new XML data and optional integrity table
2732 * immediately after the lookup table. Note that this may
2733 * overwrite an existing integrity table. */
2734 DEBUG("Skipping writing lookup table "
2735 "(no images modified or deleted)");
2736 old_wim_end = old_lookup_table_end;
2737 write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE |
2738 WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
2739 } else if (wim->hdr.integrity.offset) {
2740 /* Old WIM has an integrity table; begin writing new streams
2742 old_wim_end = wim->hdr.integrity.offset + wim->hdr.integrity.size;
2744 /* No existing integrity table; begin writing new streams after
2745 * the old XML data. */
2746 old_wim_end = old_xml_end;
2749 ret = prepare_streams_for_overwrite(wim, old_wim_end, &stream_list);
2753 ret = open_wim_writable(wim, wim->filename, O_RDWR);
2757 ret = lock_wim(wim, wim->out_fd.fd);
2759 close_wim_writable(wim, write_flags);
2763 /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */
2764 ret = write_wim_header_flags(wim->hdr.flags | WIM_HDR_FLAG_WRITE_IN_PROGRESS,
2767 ERROR_WITH_ERRNO("Error updating WIM header flags");
2768 close_wim_writable(wim, write_flags);
2769 goto out_unlock_wim;
2772 if (filedes_seek(&wim->out_fd, old_wim_end) == -1) {
2773 ERROR_WITH_ERRNO("Can't seek to end of WIM");
2774 close_wim_writable(wim, write_flags);
2775 ret = WIMLIB_ERR_WRITE;
2776 goto out_unlock_wim;
2779 DEBUG("Writing newly added streams (offset = %"PRIu64")",
2781 ret = write_stream_list(&stream_list,
2784 wim->compression_type,
2791 for (unsigned i = 1; i <= wim->hdr.image_count; i++) {
2792 if (wim->image_metadata[i - 1]->modified) {
2793 ret = write_metadata_resource(wim, i, 0);
2798 write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE;
2799 ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags,
2800 progress_func, NULL);
2802 close_wim_writable(wim, write_flags);
2803 if (ret && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
2804 WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",
2805 wim->filename, old_wim_end);
2806 /* Return value of truncate() is ignored because this is already
2808 (void)ttruncate(wim->filename, old_wim_end);
2811 wim->wim_locked = 0;
2816 overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags,
2817 unsigned num_threads,
2818 wimlib_progress_func_t progress_func)
2820 size_t wim_name_len;
2823 DEBUG("Overwriting `%"TS"' via a temporary file", wim->filename);
2825 /* Write the WIM to a temporary file in the same directory as the
2827 wim_name_len = tstrlen(wim->filename);
2828 tchar tmpfile[wim_name_len + 10];
2829 tmemcpy(tmpfile, wim->filename, wim_name_len);
2830 randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
2831 tmpfile[wim_name_len + 9] = T('\0');
2833 ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
2834 write_flags | WIMLIB_WRITE_FLAG_FSYNC,
2835 num_threads, progress_func);
2841 DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, wim->filename);
2842 /* Rename the new file to the old file .*/
2843 if (trename(tmpfile, wim->filename) != 0) {
2844 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
2845 tmpfile, wim->filename);
2846 ret = WIMLIB_ERR_RENAME;
2850 if (progress_func) {
2851 union wimlib_progress_info progress;
2852 progress.rename.from = tmpfile;
2853 progress.rename.to = wim->filename;
2854 progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress);
2859 /* Remove temporary file. */
2864 /* API function documented in wimlib.h */
2866 wimlib_overwrite(WIMStruct *wim, int write_flags,
2867 unsigned num_threads,
2868 wimlib_progress_func_t progress_func)
2873 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
2875 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2876 return WIMLIB_ERR_INVALID_PARAM;
2879 return WIMLIB_ERR_NO_FILENAME;
2881 orig_hdr_flags = wim->hdr.flags;
2882 if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
2883 wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
2884 ret = can_modify_wim(wim);
2885 wim->hdr.flags = orig_hdr_flags;
2889 if ((!wim->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
2890 && !(write_flags & (WIMLIB_WRITE_FLAG_REBUILD |
2891 WIMLIB_WRITE_FLAG_PIPABLE))
2892 && !(wim_is_pipable(wim)))
2894 ret = overwrite_wim_inplace(wim, write_flags, num_threads,
2896 if (ret != WIMLIB_ERR_RESOURCE_ORDER)
2898 WARNING("Falling back to re-building entire WIM");
2900 return overwrite_wim_via_tmpfile(wim, write_flags, num_threads,