4 * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5 * compressed file resources, etc.
9 * Copyright (C) 2012, 2013 Eric Biggers
11 * This file is part of wimlib, a library for working with WIM files.
13 * wimlib is free software; you can redistribute it and/or modify it under the
14 * terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 3 of the License, or (at your option)
18 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
19 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
20 * A PARTICULAR PURPOSE. See the GNU General Public License for more
23 * You should have received a copy of the GNU General Public License
24 * along with wimlib; if not, see http://www.gnu.org/licenses/.
31 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
32 /* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
33 * overwrite the LIST_HEAD macro. */
34 # include <sys/file.h>
37 #include "wimlib/endianness.h"
38 #include "wimlib/error.h"
39 #include "wimlib/file_io.h"
40 #include "wimlib/header.h"
41 #include "wimlib/integrity.h"
42 #include "wimlib/lookup_table.h"
43 #include "wimlib/metadata.h"
44 #include "wimlib/resource.h"
45 #include "wimlib/write.h"
46 #include "wimlib/xml.h"
49 # include "wimlib/win32.h" /* win32_get_number_of_processors() */
52 #ifdef ENABLE_MULTITHREADED_COMPRESSION
68 # include <sys/uio.h> /* for `struct iovec' */
71 /* Return true if the specified resource is compressed and the compressed data
72 * can be reused with the specified output parameters. */
74 can_raw_copy(const struct wim_lookup_table_entry *lte,
75 int write_resource_flags, int out_ctype, u32 out_chunk_size)
77 if (lte->resource_location != RESOURCE_IN_WIM)
79 if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
81 if (lte->rspec->ctype != out_ctype)
83 if (out_chunk_size != lte->rspec->cchunk_size)
85 if (lte->offset_in_res != 0)
87 if (lte->size != lte->rspec->uncompressed_size)
93 /* Return true if the specified resource must be recompressed when the specified
94 * output parameters are used. */
96 must_compress_stream(const struct wim_lookup_table_entry *lte,
97 int write_resource_flags, int out_ctype, u32 out_chunk_size)
99 return (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
100 && ((write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS)
101 || !can_raw_copy(lte, write_resource_flags,
102 out_ctype, out_chunk_size)));
106 compress_chunk(const void * uncompressed_data,
107 unsigned uncompressed_len,
108 void *compressed_data,
110 struct wimlib_lzx_context *comp_ctx)
113 case WIMLIB_COMPRESSION_TYPE_XPRESS:
114 return wimlib_xpress_compress(uncompressed_data,
117 case WIMLIB_COMPRESSION_TYPE_LZX:
118 return wimlib_lzx_compress2(uncompressed_data,
128 /* Chunk table that's located at the beginning of each compressed resource in
129 * the WIM. (This is not the on-disk format; the on-disk format just has an
130 * array of offsets.) */
132 u64 original_resource_size;
135 unsigned bytes_per_chunk_entry;
141 /* Beginning of chunk offsets, in either 32-bit or 64-bit little endian
142 * integers, including the first offset of 0, which will not be written.
144 u8 offsets[] _aligned_attribute(8);
147 /* Allocate and initializes a chunk table, then reserve space for it in the
148 * output file unless writing a pipable resource. */
150 begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
151 struct filedes *out_fd,
153 struct chunk_table **chunk_tab_ret,
158 unsigned bytes_per_chunk_entry;
160 struct chunk_table *chunk_tab;
164 num_chunks = DIV_ROUND_UP(size, out_chunk_size);
165 bytes_per_chunk_entry = (size > (1ULL << 32)) ? 8 : 4;
166 alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
167 chunk_tab = CALLOC(1, alloc_size);
170 ERROR("Failed to allocate chunk table for %"PRIu64" byte "
172 return WIMLIB_ERR_NOMEM;
174 chunk_tab->num_chunks = num_chunks;
175 chunk_tab->original_resource_size = size;
176 chunk_tab->bytes_per_chunk_entry = bytes_per_chunk_entry;
177 chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
179 chunk_tab->cur_offset_p = chunk_tab->offsets;
181 /* We don't know the correct offsets yet; so just write zeroes to
182 * reserve space for the table, so we can go back to it later after
183 * we've written the compressed chunks following it.
185 * Special case: if writing a pipable WIM, compressed resources are in a
186 * modified format (see comment above write_pipable_wim()) and do not
187 * have a chunk table at the beginning, so don't reserve any space for
189 if (!(resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) {
190 ret = full_write(out_fd, chunk_tab->offsets,
191 chunk_tab->table_disk_size);
193 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
199 *chunk_tab_ret = chunk_tab;
203 /* Add the offset for the next chunk to the chunk table being constructed for a
204 * compressed stream. */
206 chunk_tab_record_chunk(struct chunk_table *chunk_tab, unsigned out_chunk_size)
208 if (chunk_tab->bytes_per_chunk_entry == 4) {
209 *(le32*)chunk_tab->cur_offset_p = cpu_to_le32(chunk_tab->cur_offset_u32);
210 chunk_tab->cur_offset_p = (le32*)chunk_tab->cur_offset_p + 1;
211 chunk_tab->cur_offset_u32 += out_chunk_size;
213 *(le64*)chunk_tab->cur_offset_p = cpu_to_le64(chunk_tab->cur_offset_u64);
214 chunk_tab->cur_offset_p = (le64*)chunk_tab->cur_offset_p + 1;
215 chunk_tab->cur_offset_u64 += out_chunk_size;
219 /* Finishes a WIM chunk table and writes it to the output file at the correct
222 finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
223 struct filedes *out_fd,
224 off_t res_start_offset,
225 int write_resource_flags)
229 if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
230 ret = full_write(out_fd,
232 chunk_tab->bytes_per_chunk_entry,
233 chunk_tab->table_disk_size);
235 ret = full_pwrite(out_fd,
237 chunk_tab->bytes_per_chunk_entry,
238 chunk_tab->table_disk_size,
242 ERROR_WITH_ERRNO("Write error");
246 /* Write the header for a stream in a pipable WIM.
249 write_pwm_stream_header(const struct wim_lookup_table_entry *lte,
250 struct filedes *out_fd,
251 int additional_reshdr_flags)
253 struct pwm_stream_hdr stream_hdr;
257 stream_hdr.magic = PWM_STREAM_MAGIC;
258 stream_hdr.uncompressed_size = cpu_to_le64(lte->size);
259 if (additional_reshdr_flags & PWM_RESHDR_FLAG_UNHASHED) {
260 zero_out_hash(stream_hdr.hash);
262 wimlib_assert(!lte->unhashed);
263 copy_hash(stream_hdr.hash, lte->hash);
266 reshdr_flags = lte->flags & ~(WIM_RESHDR_FLAG_COMPRESSED | WIM_RESHDR_FLAG_CONCAT);
267 reshdr_flags |= additional_reshdr_flags;
268 stream_hdr.flags = cpu_to_le32(reshdr_flags);
269 ret = full_write(out_fd, &stream_hdr, sizeof(stream_hdr));
271 ERROR_WITH_ERRNO("Error writing stream header");
276 seek_and_truncate(struct filedes *out_fd, off_t offset)
278 if (filedes_seek(out_fd, offset) == -1 ||
279 ftruncate(out_fd->fd, offset))
281 ERROR_WITH_ERRNO("Failed to truncate output WIM file");
282 return WIMLIB_ERR_WRITE;
288 finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte)
290 u8 md[SHA1_HASH_SIZE];
292 sha1_final(md, sha_ctx);
294 copy_hash(lte->hash, md);
295 } else if (!hashes_equal(md, lte->hash)) {
296 ERROR("WIM resource has incorrect hash!");
297 if (lte_filename_valid(lte)) {
298 ERROR("We were reading it from \"%"TS"\"; maybe "
299 "it changed while we were reading it.",
302 return WIMLIB_ERR_INVALID_RESOURCE_HASH;
307 struct write_resource_ctx {
310 struct wimlib_lzx_context *comp_ctx;
311 struct chunk_table *chunk_tab;
312 struct filedes *out_fd;
319 write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx)
321 struct write_resource_ctx *ctx = _ctx;
322 const void *out_chunk;
323 unsigned out_chunk_size;
325 void *compressed_chunk = NULL;
326 unsigned compressed_size;
327 bool compressed_chunk_malloced = false;
330 sha1_update(&ctx->sha_ctx, chunk, chunk_size);
333 out_chunk_size = chunk_size;
334 if (ctx->out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
336 /* Compress the chunk. */
337 if (chunk_size <= STACK_MAX) {
338 compressed_chunk = alloca(chunk_size);
340 compressed_chunk = MALLOC(chunk_size);
341 if (compressed_chunk == NULL)
342 return WIMLIB_ERR_NOMEM;
343 compressed_chunk_malloced = true;
346 compressed_size = compress_chunk(chunk, chunk_size,
350 /* Use compressed data if compression to less than input size
352 if (compressed_size) {
353 out_chunk = compressed_chunk;
354 out_chunk_size = compressed_size;
358 if (ctx->chunk_tab) {
359 /* Update chunk table accounting. */
360 chunk_tab_record_chunk(ctx->chunk_tab, out_chunk_size);
362 /* If writing compressed chunks to a pipable WIM, before the
363 * chunk data write a chunk header that provides the compressed
365 if (ctx->resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
366 struct pwm_chunk_hdr chunk_hdr = {
367 .compressed_size = cpu_to_le32(out_chunk_size),
369 ret = full_write(ctx->out_fd, &chunk_hdr,
376 /* Write the chunk data. */
377 ret = full_write(ctx->out_fd, out_chunk, out_chunk_size);
382 if (compressed_chunk_malloced)
383 FREE(compressed_chunk);
387 ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
388 goto out_free_memory;
392 * write_wim_resource()-
394 * Write a resource to an output WIM.
397 * Lookup table entry for the resource, which could be in another WIM, in
398 * an external file, or in another location.
401 * File descriptor opened to the output WIM.
404 * One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate which
405 * compression algorithm to use.
408 * Compressed chunk size to use.
411 * On success, this is filled in with the offset, flags, compressed size,
412 * and uncompressed size of the resource in the output WIM.
415 * * WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed even
416 * if it could otherwise be copied directly from the input;
417 * * WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE if writing a resource for a pipable WIM
418 * (and the output file descriptor may be a pipe).
421 * Location of LZX compression context pointer, which will be allocated or
422 * updated if needed. (Initialize to NULL.)
424 * Additional notes: The SHA1 message digest of the uncompressed data is
425 * calculated (except when doing a raw copy --- see below). If the @unhashed
426 * flag is set on the lookup table entry, this message digest is simply copied
427 * to it; otherwise, the message digest is compared with the existing one, and
428 * this function will fail if they do not match.
431 write_wim_resource(struct wim_lookup_table_entry *lte,
432 struct filedes *out_fd, int out_ctype,
434 struct wim_reshdr *out_reshdr,
436 struct wimlib_lzx_context **comp_ctx)
438 struct write_resource_ctx write_ctx;
439 off_t res_start_offset;
444 /* Mask out any irrelevant flags, since this function also uses this
445 * variable to store WIMLIB_READ_RESOURCE flags. */
446 resource_flags &= WIMLIB_WRITE_RESOURCE_MASK;
448 /* Get current position in output WIM. */
449 res_start_offset = out_fd->offset;
451 /* If we are not forcing the data to be recompressed, and the input
452 * resource is located in a WIM with a compression mode compatible with
453 * the output, we can simply copy the compressed data without
454 * recompressing it. This also means we must skip calculating the SHA1,
455 * as we never will see the uncompressed data. */
456 if (can_raw_copy(lte, resource_flags, out_ctype, out_chunk_size)) {
457 /* Normally, for raw copies we can request a RAW_FULL read, but
458 * if we're reading from a pipable resource and writing a
459 * non-pipable resource or vice versa, then a RAW_CHUNKS read
460 * needs to be requested so that the written resource can be
461 * appropriately formatted. However, in neither case is any
462 * actual decompression needed. */
463 if (lte->rspec->is_pipable == !!(resource_flags &
464 WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE))
466 resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_FULL;
467 read_size = lte->rspec->size_in_wim;
469 resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS;
470 read_size = lte->size;
472 write_ctx.doing_sha = false;
474 write_ctx.doing_sha = true;
475 sha1_init(&write_ctx.sha_ctx);
476 read_size = lte->size;
479 /* Set the output compression mode and initialize chunk table if needed.
481 write_ctx.out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
482 write_ctx.out_chunk_size = out_chunk_size;
483 write_ctx.chunk_tab = NULL;
484 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
485 wimlib_assert(out_chunk_size > 0);
486 if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW)) {
487 /* Compression needed. */
488 write_ctx.out_ctype = out_ctype;
489 if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) {
490 ret = wimlib_lzx_alloc_context(out_chunk_size,
495 write_ctx.comp_ctx = *comp_ctx;
497 if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW_FULL)) {
498 /* Chunk table needed. */
499 ret = begin_wim_resource_chunk_tab(lte, out_fd,
501 &write_ctx.chunk_tab,
508 /* If writing a pipable resource, write the stream header and update
509 * @res_start_offset to be the end of the stream header. */
510 if (resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
511 int reshdr_flags = 0;
512 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE)
513 reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
514 ret = write_pwm_stream_header(lte, out_fd, reshdr_flags);
516 goto out_free_chunk_tab;
517 res_start_offset = out_fd->offset;
520 /* Write the entire resource by reading the entire resource and feeding
521 * the data through write_resource_cb(). */
522 write_ctx.out_fd = out_fd;
523 write_ctx.resource_flags = resource_flags;
525 if (write_ctx.out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
526 in_chunk_size = lte_cchunk_size(lte);
528 in_chunk_size = out_chunk_size;
529 ret = read_resource_prefix(lte, read_size,
531 in_chunk_size, &write_ctx, resource_flags);
533 goto out_free_chunk_tab;
535 /* Verify SHA1 message digest of the resource, or set the hash for the
537 if (write_ctx.doing_sha) {
538 ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte);
540 goto out_free_chunk_tab;
543 /* Write chunk table if needed. */
544 if (write_ctx.chunk_tab) {
545 ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab,
550 goto out_free_chunk_tab;
553 /* Fill in out_reshdr with information about the newly written
555 out_reshdr->size_in_wim = out_fd->offset - res_start_offset;
556 out_reshdr->flags = lte->flags & ~WIM_RESHDR_FLAG_CONCAT;
557 if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
558 out_reshdr->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
560 out_reshdr->flags |= WIM_RESHDR_FLAG_COMPRESSED;
561 out_reshdr->offset_in_wim = res_start_offset;
562 out_reshdr->uncompressed_size = lte->size;
564 /* Check for resources compressed to greater than their original size
565 * and write them uncompressed instead. (But never do this if writing
566 * to a pipe, and don't bother if we did a raw copy.) */
567 if (out_reshdr->size_in_wim > out_reshdr->uncompressed_size &&
568 !(resource_flags & (WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE |
569 WIMLIB_READ_RESOURCE_FLAG_RAW)))
571 DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
572 "writing uncompressed instead",
573 out_reshdr->uncompressed_size, out_reshdr->size_in_wim);
574 ret = seek_and_truncate(out_fd, res_start_offset);
576 goto out_free_chunk_tab;
577 out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
578 FREE(write_ctx.chunk_tab);
579 write_ctx.out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
580 write_ctx.chunk_tab = NULL;
581 write_ctx.doing_sha = false;
582 goto try_write_again;
584 if (resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW) {
585 DEBUG("Copied raw compressed data "
586 "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
587 out_reshdr->uncompressed_size, out_reshdr->size_in_wim,
588 out_reshdr->offset_in_wim, out_reshdr->flags);
589 } else if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
590 DEBUG("Wrote compressed resource "
591 "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
592 out_reshdr->uncompressed_size, out_reshdr->size_in_wim,
593 out_reshdr->offset_in_wim, out_reshdr->flags);
595 DEBUG("Wrote uncompressed resource "
596 "(%"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
597 out_reshdr->uncompressed_size,
598 out_reshdr->offset_in_wim, out_reshdr->flags);
602 FREE(write_ctx.chunk_tab);
607 /* Like write_wim_resource(), but the resource is specified by a buffer of
608 * uncompressed data rather a lookup table entry. Also writes the SHA1 message
609 * digest of the buffer to @hash_ret if it is non-NULL. */
611 write_wim_resource_from_buffer(const void *buf, size_t buf_size,
612 int reshdr_flags, struct filedes *out_fd,
615 struct wim_reshdr *out_reshdr,
616 u8 *hash_ret, int write_resource_flags,
617 struct wimlib_lzx_context **comp_ctx)
620 struct wim_lookup_table_entry *lte;
622 /* Set up a temporary lookup table entry to provide to
623 * write_wim_resource(). */
625 lte = new_lookup_table_entry();
627 return WIMLIB_ERR_NOMEM;
629 lte->resource_location = RESOURCE_IN_ATTACHED_BUFFER;
630 lte->attached_buffer = (void*)buf;
631 lte->size = buf_size;
632 lte->flags = reshdr_flags;
634 if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
635 sha1_buffer(buf, buf_size, lte->hash);
641 ret = write_wim_resource(lte, out_fd, out_ctype, out_chunk_size,
642 out_reshdr, write_resource_flags, comp_ctx);
646 copy_hash(hash_ret, lte->hash);
649 lte->resource_location = RESOURCE_NONEXISTENT;
650 free_lookup_table_entry(lte);
655 #ifdef ENABLE_MULTITHREADED_COMPRESSION
657 /* Blocking shared queue (solves the producer-consumer problem) */
658 struct shared_queue {
662 unsigned filled_slots;
664 pthread_mutex_t lock;
665 pthread_cond_t msg_avail_cond;
666 pthread_cond_t space_avail_cond;
670 shared_queue_init(struct shared_queue *q, unsigned size)
672 wimlib_assert(size != 0);
673 q->array = CALLOC(sizeof(q->array[0]), size);
680 if (pthread_mutex_init(&q->lock, NULL)) {
681 ERROR_WITH_ERRNO("Failed to initialize mutex");
684 if (pthread_cond_init(&q->msg_avail_cond, NULL)) {
685 ERROR_WITH_ERRNO("Failed to initialize condition variable");
686 goto err_destroy_lock;
688 if (pthread_cond_init(&q->space_avail_cond, NULL)) {
689 ERROR_WITH_ERRNO("Failed to initialize condition variable");
690 goto err_destroy_msg_avail_cond;
693 err_destroy_msg_avail_cond:
694 pthread_cond_destroy(&q->msg_avail_cond);
696 pthread_mutex_destroy(&q->lock);
698 return WIMLIB_ERR_NOMEM;
702 shared_queue_destroy(struct shared_queue *q)
705 pthread_mutex_destroy(&q->lock);
706 pthread_cond_destroy(&q->msg_avail_cond);
707 pthread_cond_destroy(&q->space_avail_cond);
711 shared_queue_put(struct shared_queue *q, void *obj)
713 pthread_mutex_lock(&q->lock);
714 while (q->filled_slots == q->size)
715 pthread_cond_wait(&q->space_avail_cond, &q->lock);
717 q->back = (q->back + 1) % q->size;
718 q->array[q->back] = obj;
721 pthread_cond_broadcast(&q->msg_avail_cond);
722 pthread_mutex_unlock(&q->lock);
726 shared_queue_get(struct shared_queue *q)
730 pthread_mutex_lock(&q->lock);
731 while (q->filled_slots == 0)
732 pthread_cond_wait(&q->msg_avail_cond, &q->lock);
734 obj = q->array[q->front];
735 q->array[q->front] = NULL;
736 q->front = (q->front + 1) % q->size;
739 pthread_cond_broadcast(&q->space_avail_cond);
740 pthread_mutex_unlock(&q->lock);
744 struct compressor_thread_params {
745 struct shared_queue *res_to_compress_queue;
746 struct shared_queue *compressed_res_queue;
748 struct wimlib_lzx_context *comp_ctx;
751 #define MAX_CHUNKS_PER_MSG 2
754 struct wim_lookup_table_entry *lte;
756 u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
757 u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
758 unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
759 struct iovec out_chunks[MAX_CHUNKS_PER_MSG];
761 struct list_head list;
767 compress_chunks(struct message *msg, int out_ctype,
768 struct wimlib_lzx_context *comp_ctx)
770 for (unsigned i = 0; i < msg->num_chunks; i++) {
773 len = compress_chunk(msg->uncompressed_chunks[i],
774 msg->uncompressed_chunk_sizes[i],
775 msg->compressed_chunks[i],
782 /* To be written compressed */
783 out_chunk = msg->compressed_chunks[i];
786 /* To be written uncompressed */
787 out_chunk = msg->uncompressed_chunks[i];
788 out_len = msg->uncompressed_chunk_sizes[i];
790 msg->out_chunks[i].iov_base = out_chunk;
791 msg->out_chunks[i].iov_len = out_len;
795 /* Compressor thread routine. This is a lot simpler than the main thread
796 * routine: just repeatedly get a group of chunks from the
797 * res_to_compress_queue, compress them, and put them in the
798 * compressed_res_queue. A NULL pointer indicates that the thread should stop.
801 compressor_thread_proc(void *arg)
803 struct compressor_thread_params *params = arg;
804 struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
805 struct shared_queue *compressed_res_queue = params->compressed_res_queue;
808 DEBUG("Compressor thread ready");
809 while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
810 compress_chunks(msg, params->out_ctype, params->comp_ctx);
811 shared_queue_put(compressed_res_queue, msg);
813 DEBUG("Compressor thread terminating");
816 #endif /* ENABLE_MULTITHREADED_COMPRESSION */
818 struct write_streams_progress_data {
819 wimlib_progress_func_t progress_func;
820 union wimlib_progress_info progress;
821 uint64_t next_progress;
822 WIMStruct *prev_wim_part;
826 do_write_streams_progress(struct write_streams_progress_data *progress_data,
827 struct wim_lookup_table_entry *lte,
828 bool stream_discarded)
830 union wimlib_progress_info *progress = &progress_data->progress;
833 if (stream_discarded) {
834 progress->write_streams.total_bytes -= lte->size;
835 if (progress_data->next_progress != ~(uint64_t)0 &&
836 progress_data->next_progress > progress->write_streams.total_bytes)
838 progress_data->next_progress = progress->write_streams.total_bytes;
841 progress->write_streams.completed_bytes += lte->size;
843 new_wim_part = false;
844 if (lte->resource_location == RESOURCE_IN_WIM &&
845 lte->rspec->wim != progress_data->prev_wim_part)
847 if (progress_data->prev_wim_part) {
849 progress->write_streams.completed_parts++;
851 progress_data->prev_wim_part = lte->rspec->wim;
853 progress->write_streams.completed_streams++;
854 if (progress_data->progress_func
855 && (progress->write_streams.completed_bytes >= progress_data->next_progress
858 progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
860 if (progress_data->next_progress == progress->write_streams.total_bytes) {
861 progress_data->next_progress = ~(uint64_t)0;
863 progress_data->next_progress =
864 min(progress->write_streams.total_bytes,
865 progress->write_streams.completed_bytes +
866 progress->write_streams.total_bytes / 100);
871 struct serial_write_stream_ctx {
872 struct filedes *out_fd;
875 struct wimlib_lzx_context **comp_ctx;
876 int write_resource_flags;
880 serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx)
882 struct serial_write_stream_ctx *ctx = _ctx;
883 return write_wim_resource(lte, ctx->out_fd,
887 ctx->write_resource_flags,
892 /* Write a list of streams, taking into account that some streams may be
893 * duplicates that are checksummed and discarded on the fly, and also delegating
894 * the actual writing of a stream to a function @write_stream_cb, which is
895 * passed the context @write_stream_ctx. */
897 do_write_stream_list(struct list_head *stream_list,
898 struct wim_lookup_table *lookup_table,
899 int (*write_stream_cb)(struct wim_lookup_table_entry *, void *),
900 void *write_stream_ctx,
901 struct write_streams_progress_data *progress_data)
904 struct wim_lookup_table_entry *lte;
905 bool stream_discarded;
907 /* For each stream in @stream_list ... */
908 while (!list_empty(stream_list)) {
909 stream_discarded = false;
910 lte = container_of(stream_list->next,
911 struct wim_lookup_table_entry,
913 list_del(<e->write_streams_list);
914 if (lte->unhashed && !lte->unique_size) {
915 /* Unhashed stream that shares a size with some other
916 * stream in the WIM we are writing. The stream must be
917 * checksummed to know if we need to write it or not. */
918 struct wim_lookup_table_entry *tmp;
919 u32 orig_out_refcnt = lte->out_refcnt;
921 ret = hash_unhashed_stream(lte, lookup_table, &tmp);
925 /* We found a duplicate stream. 'lte' was
926 * freed, so replace it with the duplicate. */
929 /* 'out_refcnt' was transferred to the
930 * duplicate, and we can detect if the duplicate
931 * stream was already referenced for writing by
932 * checking if its 'out_refcnt' is higher than
933 * that of the original stream. In such cases,
934 * the current stream can be discarded. We can
935 * also discard the current stream if it was
936 * previously marked as filtered (e.g. already
937 * present in the WIM being written). */
938 if (lte->out_refcnt > orig_out_refcnt ||
940 DEBUG("Discarding duplicate stream of "
943 lte->no_progress = 0;
944 stream_discarded = true;
945 goto skip_to_progress;
950 /* Here, @lte is either a hashed stream or an unhashed stream
951 * with a unique size. In either case we know that the stream
952 * has to be written. In either case the SHA1 message digest
953 * will be calculated over the stream while writing it; however,
954 * in the former case this is done merely to check the data,
955 * while in the latter case this is done because we do not have
956 * the SHA1 message digest yet. */
957 wimlib_assert(lte->out_refcnt != 0);
959 lte->no_progress = 0;
960 ret = (*write_stream_cb)(lte, write_stream_ctx);
963 /* In parallel mode, some streams are deferred for later,
964 * serialized processing; ignore them here. */
968 list_del(<e->unhashed_list);
969 lookup_table_insert(lookup_table, lte);
973 if (!lte->no_progress) {
974 do_write_streams_progress(progress_data,
975 lte, stream_discarded);
982 do_write_stream_list_serial(struct list_head *stream_list,
983 struct wim_lookup_table *lookup_table,
984 struct filedes *out_fd,
987 struct wimlib_lzx_context **comp_ctx,
988 int write_resource_flags,
989 struct write_streams_progress_data *progress_data)
991 struct serial_write_stream_ctx ctx = {
993 .out_ctype = out_ctype,
994 .out_chunk_size = out_chunk_size,
995 .write_resource_flags = write_resource_flags,
996 .comp_ctx = comp_ctx,
998 return do_write_stream_list(stream_list,
1000 serial_write_stream,
1006 write_flags_to_resource_flags(int write_flags)
1008 int resource_flags = 0;
1010 if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
1011 resource_flags |= WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS;
1012 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
1013 resource_flags |= WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE;
1014 return resource_flags;
1018 write_stream_list_serial(struct list_head *stream_list,
1019 struct wim_lookup_table *lookup_table,
1020 struct filedes *out_fd,
1023 struct wimlib_lzx_context **comp_ctx,
1024 int write_resource_flags,
1025 struct write_streams_progress_data *progress_data)
1027 union wimlib_progress_info *progress = &progress_data->progress;
1028 DEBUG("Writing stream list of size %"PRIu64" (serial version)",
1029 progress->write_streams.total_streams);
1030 progress->write_streams.num_threads = 1;
1031 if (progress_data->progress_func) {
1032 progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1035 return do_write_stream_list_serial(stream_list,
1041 write_resource_flags,
1045 #ifdef ENABLE_MULTITHREADED_COMPRESSION
1047 write_wim_chunks(struct message *msg, struct filedes *out_fd,
1048 struct chunk_table *chunk_tab,
1049 int write_resource_flags)
1052 struct pwm_chunk_hdr *chunk_hdrs;
1056 for (unsigned i = 0; i < msg->num_chunks; i++)
1057 chunk_tab_record_chunk(chunk_tab, msg->out_chunks[i].iov_len);
1059 if (!(write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) {
1060 nvecs = msg->num_chunks;
1061 vecs = msg->out_chunks;
1063 /* Special case: If writing a compressed resource to a pipable
1064 * WIM, prefix each compressed chunk with a header that gives
1065 * its compressed size. */
1066 nvecs = msg->num_chunks * 2;
1067 vecs = alloca(nvecs * sizeof(vecs[0]));
1068 chunk_hdrs = alloca(msg->num_chunks * sizeof(chunk_hdrs[0]));
1070 for (unsigned i = 0; i < msg->num_chunks; i++) {
1071 chunk_hdrs[i].compressed_size = cpu_to_le32(msg->out_chunks[i].iov_len);
1072 vecs[i * 2].iov_base = &chunk_hdrs[i];
1073 vecs[i * 2].iov_len = sizeof(chunk_hdrs[i]);
1074 vecs[i * 2 + 1].iov_base = msg->out_chunks[i].iov_base;
1075 vecs[i * 2 + 1].iov_len = msg->out_chunks[i].iov_len;
1078 ret = full_writev(out_fd, vecs, nvecs);
1080 ERROR_WITH_ERRNO("Write error");
1084 struct main_writer_thread_ctx {
1085 struct list_head *stream_list;
1086 struct wim_lookup_table *lookup_table;
1087 struct filedes *out_fd;
1088 off_t res_start_offset;
1091 struct wimlib_lzx_context **comp_ctx;
1092 int write_resource_flags;
1093 struct shared_queue *res_to_compress_queue;
1094 struct shared_queue *compressed_res_queue;
1095 size_t num_messages;
1096 struct write_streams_progress_data *progress_data;
1098 struct list_head available_msgs;
1099 struct list_head outstanding_streams;
1100 struct list_head serial_streams;
1101 size_t num_outstanding_messages;
1103 SHA_CTX next_sha_ctx;
1105 u64 next_num_chunks;
1106 struct wim_lookup_table_entry *next_lte;
1108 struct message *msgs;
1109 struct message *next_msg;
1110 struct chunk_table *cur_chunk_tab;
1114 init_message(struct message *msg, u32 out_chunk_size)
1116 msg->out_chunk_size = out_chunk_size;
1117 for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
1118 msg->compressed_chunks[i] = MALLOC(out_chunk_size);
1119 msg->uncompressed_chunks[i] = MALLOC(out_chunk_size);
1120 if (msg->compressed_chunks[i] == NULL ||
1121 msg->uncompressed_chunks[i] == NULL)
1122 return WIMLIB_ERR_NOMEM;
1128 destroy_message(struct message *msg)
1130 for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
1131 FREE(msg->compressed_chunks[i]);
1132 FREE(msg->uncompressed_chunks[i]);
1137 free_messages(struct message *msgs, size_t num_messages)
1140 for (size_t i = 0; i < num_messages; i++)
1141 destroy_message(&msgs[i]);
1146 static struct message *
1147 allocate_messages(size_t num_messages, u32 out_chunk_size)
1149 struct message *msgs;
1151 msgs = CALLOC(num_messages, sizeof(struct message));
1154 for (size_t i = 0; i < num_messages; i++) {
1155 if (init_message(&msgs[i], out_chunk_size)) {
1156 free_messages(msgs, num_messages);
1164 main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx)
1166 while (ctx->num_outstanding_messages--)
1167 shared_queue_get(ctx->compressed_res_queue);
1168 free_messages(ctx->msgs, ctx->num_messages);
1169 FREE(ctx->cur_chunk_tab);
1173 main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx)
1175 /* Pre-allocate all the buffers that will be needed to do the chunk
1177 ctx->msgs = allocate_messages(ctx->num_messages, ctx->out_chunk_size);
1178 if (ctx->msgs == NULL)
1179 return WIMLIB_ERR_NOMEM;
1181 /* Initially, all the messages are available to use. */
1182 INIT_LIST_HEAD(&ctx->available_msgs);
1183 for (size_t i = 0; i < ctx->num_messages; i++)
1184 list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs);
1186 /* outstanding_streams is the list of streams that currently have had
1187 * chunks sent off for compression.
1189 * The first stream in outstanding_streams is the stream that is
1190 * currently being written.
1192 * The last stream in outstanding_streams is the stream that is
1193 * currently being read and having chunks fed to the compressor threads.
1195 INIT_LIST_HEAD(&ctx->outstanding_streams);
1196 ctx->num_outstanding_messages = 0;
1198 /* Message currently being prepared. */
1199 ctx->next_msg = NULL;
1201 /* Resources that don't need any chunks compressed are added to this
1202 * list and written directly by the main thread. */
1203 INIT_LIST_HEAD(&ctx->serial_streams);
1205 /* Pointer to chunk table for stream currently being written. */
1206 ctx->cur_chunk_tab = NULL;
1212 receive_compressed_chunks(struct main_writer_thread_ctx *ctx)
1214 struct message *msg;
1215 struct wim_lookup_table_entry *cur_lte;
1218 wimlib_assert(!list_empty(&ctx->outstanding_streams));
1219 wimlib_assert(ctx->num_outstanding_messages != 0);
1221 cur_lte = container_of(ctx->outstanding_streams.next,
1222 struct wim_lookup_table_entry,
1223 being_compressed_list);
1225 /* Get the next message from the queue and process it.
1226 * The message will contain 1 or more data chunks that have been
1228 msg = shared_queue_get(ctx->compressed_res_queue);
1229 msg->complete = true;
1230 --ctx->num_outstanding_messages;
1232 /* Is this the next chunk in the current resource? If it's not (i.e.,
1233 * an earlier chunk in a same or different resource hasn't been
1234 * compressed yet), do nothing, and keep this message around until all
1235 * earlier chunks are received.
1237 * Otherwise, write all the chunks we can. */
1238 while (cur_lte != NULL &&
1239 !list_empty(&cur_lte->msg_list)
1240 && (msg = container_of(cur_lte->msg_list.next,
1244 list_move(&msg->list, &ctx->available_msgs);
1245 if (msg->begin_chunk == 0) {
1246 /* First set of chunks. */
1248 /* Write pipable WIM stream header if needed. */
1249 if (ctx->write_resource_flags &
1250 WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)
1252 ret = write_pwm_stream_header(cur_lte, ctx->out_fd,
1253 WIM_RESHDR_FLAG_COMPRESSED);
1258 /* Save current offset. */
1259 ctx->res_start_offset = ctx->out_fd->offset;
1261 /* Begin building the chunk table, and leave space for
1263 ret = begin_wim_resource_chunk_tab(cur_lte,
1265 ctx->out_chunk_size,
1266 &ctx->cur_chunk_tab,
1267 ctx->write_resource_flags);
1272 /* Write the compressed chunks from the message. */
1273 ret = write_wim_chunks(msg, ctx->out_fd, ctx->cur_chunk_tab,
1274 ctx->write_resource_flags);
1278 /* Was this the last chunk of the stream? If so, finish the
1279 * stream by writing the chunk table. */
1280 if (list_empty(&cur_lte->msg_list) &&
1281 msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
1285 ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
1287 ctx->res_start_offset,
1288 ctx->write_resource_flags);
1292 list_del(&cur_lte->being_compressed_list);
1294 res_csize = ctx->out_fd->offset - ctx->res_start_offset;
1296 FREE(ctx->cur_chunk_tab);
1297 ctx->cur_chunk_tab = NULL;
1299 /* Check for resources compressed to greater than or
1300 * equal to their original size and write them
1301 * uncompressed instead. (But never do this if writing
1303 if (res_csize >= cur_lte->size &&
1304 !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE))
1306 DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
1307 "writing uncompressed instead",
1308 cur_lte->size, res_csize);
1309 ret = seek_and_truncate(ctx->out_fd, ctx->res_start_offset);
1312 ret = write_wim_resource(cur_lte,
1314 WIMLIB_COMPRESSION_TYPE_NONE,
1316 &cur_lte->out_reshdr,
1317 ctx->write_resource_flags,
1322 cur_lte->out_reshdr.size_in_wim =
1325 cur_lte->out_reshdr.uncompressed_size =
1328 cur_lte->out_reshdr.offset_in_wim =
1329 ctx->res_start_offset;
1331 cur_lte->out_reshdr.flags =
1333 WIM_RESHDR_FLAG_COMPRESSED;
1335 DEBUG("Wrote compressed resource "
1336 "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)",
1337 cur_lte->out_reshdr.uncompressed_size,
1338 cur_lte->out_reshdr.size_in_wim,
1339 cur_lte->out_reshdr.offset_in_wim,
1340 cur_lte->out_reshdr.flags);
1343 do_write_streams_progress(ctx->progress_data,
1346 /* Since we just finished writing a stream, write any
1347 * streams that have been added to the serial_streams
1348 * list for direct writing by the main thread (e.g.
1349 * resources that don't need to be compressed because
1350 * the desired compression type is the same as the
1351 * previous compression type). */
1352 if (!list_empty(&ctx->serial_streams)) {
1353 ret = do_write_stream_list_serial(&ctx->serial_streams,
1357 ctx->out_chunk_size,
1359 ctx->write_resource_flags,
1360 ctx->progress_data);
1365 /* Advance to the next stream to write. */
1366 if (list_empty(&ctx->outstanding_streams)) {
1369 cur_lte = container_of(ctx->outstanding_streams.next,
1370 struct wim_lookup_table_entry,
1371 being_compressed_list);
1378 /* Called when the main thread has read a new chunk of data. */
1380 main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
1382 struct main_writer_thread_ctx *ctx = _ctx;
1384 struct message *next_msg;
1385 u64 next_chunk_in_msg;
1387 /* Update SHA1 message digest for the stream currently being read by the
1389 sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
1391 /* We send chunks of data to the compressor chunks in batches which we
1392 * refer to as "messages". @next_msg is the message that is currently
1393 * being prepared to send off. If it is NULL, that indicates that we
1394 * need to start a new message. */
1395 next_msg = ctx->next_msg;
1397 /* We need to start a new message. First check to see if there
1398 * is a message available in the list of available messages. If
1399 * so, we can just take one. If not, all the messages (there is
1400 * a fixed number of them, proportional to the number of
1401 * threads) have been sent off to the compressor threads, so we
1402 * receive messages from the compressor threads containing
1403 * compressed chunks of data.
1405 * We may need to receive multiple messages before one is
1406 * actually available to use because messages received that are
1407 * *not* for the very next set of chunks to compress must be
1408 * buffered until it's time to write those chunks. */
1409 while (list_empty(&ctx->available_msgs)) {
1410 ret = receive_compressed_chunks(ctx);
1415 next_msg = container_of(ctx->available_msgs.next,
1416 struct message, list);
1417 list_del(&next_msg->list);
1418 next_msg->complete = false;
1419 next_msg->begin_chunk = ctx->next_chunk;
1420 next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
1421 ctx->next_num_chunks - ctx->next_chunk);
1422 ctx->next_msg = next_msg;
1425 /* Fill in the next chunk to compress */
1426 next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
1428 next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
1429 memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
1432 if (++next_chunk_in_msg == next_msg->num_chunks) {
1433 /* Send off an array of chunks to compress */
1434 list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
1435 shared_queue_put(ctx->res_to_compress_queue, next_msg);
1436 ++ctx->num_outstanding_messages;
1437 ctx->next_msg = NULL;
1443 main_writer_thread_finish(void *_ctx)
1445 struct main_writer_thread_ctx *ctx = _ctx;
1447 while (ctx->num_outstanding_messages != 0) {
1448 ret = receive_compressed_chunks(ctx);
1452 wimlib_assert(list_empty(&ctx->outstanding_streams));
1453 return do_write_stream_list_serial(&ctx->serial_streams,
1457 ctx->out_chunk_size,
1459 ctx->write_resource_flags,
1460 ctx->progress_data);
1464 submit_stream_for_compression(struct wim_lookup_table_entry *lte,
1465 struct main_writer_thread_ctx *ctx)
1469 /* Read the entire stream @lte, feeding its data chunks to the
1470 * compressor threads. Also SHA1-sum the stream; this is required in
1471 * the case that @lte is unhashed, and a nice additional verification
1472 * when @lte is already hashed. */
1473 sha1_init(&ctx->next_sha_ctx);
1474 ctx->next_chunk = 0;
1475 ctx->next_num_chunks = DIV_ROUND_UP(lte->size, ctx->out_chunk_size);
1476 ctx->next_lte = lte;
1477 INIT_LIST_HEAD(<e->msg_list);
1478 list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams);
1479 ret = read_resource_prefix(lte, lte->size,
1480 main_writer_thread_cb,
1481 ctx->out_chunk_size, ctx, 0);
1484 wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
1485 return finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
1489 main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx)
1491 struct main_writer_thread_ctx *ctx = _ctx;
1494 if (lte->size < 1000 ||
1495 !must_compress_stream(lte, ctx->write_resource_flags,
1496 ctx->out_ctype, ctx->out_chunk_size))
1498 /* Stream is too small or isn't being compressed. Process it by
1499 * the main thread when we have a chance. We can't necessarily
1500 * process it right here, as the main thread could be in the
1501 * middle of writing a different stream. */
1502 list_add_tail(<e->write_streams_list, &ctx->serial_streams);
1506 ret = submit_stream_for_compression(lte, ctx);
1508 lte->no_progress = 1;
1513 get_default_num_threads(void)
1516 return win32_get_number_of_processors();
1518 return sysconf(_SC_NPROCESSORS_ONLN);
1522 /* Equivalent to write_stream_list_serial(), except this takes a @num_threads
1523 * parameter and will perform compression using that many threads. Falls
1524 * back to write_stream_list_serial() on certain errors, such as a failure to
1525 * create the number of threads requested.
1527 * High level description of the algorithm for writing compressed streams in
1528 * parallel: We perform compression on chunks rather than on full files. The
1529 * currently executing thread becomes the main thread and is entirely in charge
1530 * of reading the data to compress (which may be in any location understood by
1531 * the resource code--- such as in an external file being captured, or in
1532 * another WIM file from which an image is being exported) and actually writing
1533 * the compressed data to the output file. Additional threads are "compressor
1534 * threads" and all execute the compressor_thread_proc, where they repeatedly
1535 * retrieve buffers of data from the main thread, compress them, and hand them
1536 * back to the main thread.
1538 * Certain streams, such as streams that do not need to be compressed (e.g.
1539 * input compression type same as output compression type) or streams of very
1540 * small size are placed in a list (main_writer_thread_ctx.serial_list) and
1541 * handled entirely by the main thread at an appropriate time.
1543 * At any given point in time, multiple streams may be having chunks compressed
1544 * concurrently. The stream that the main thread is currently *reading* may be
1545 * later in the list that the stream that the main thread is currently
1548 write_stream_list_parallel(struct list_head *stream_list,
1549 struct wim_lookup_table *lookup_table,
1550 struct filedes *out_fd,
1553 struct wimlib_lzx_context **comp_ctx,
1554 int write_resource_flags,
1555 struct write_streams_progress_data *progress_data,
1556 unsigned num_threads)
1559 struct shared_queue res_to_compress_queue;
1560 struct shared_queue compressed_res_queue;
1561 pthread_t *compressor_threads = NULL;
1562 union wimlib_progress_info *progress = &progress_data->progress;
1563 unsigned num_started_threads;
1564 bool can_retry = true;
1566 if (num_threads == 0) {
1567 long nthreads = get_default_num_threads();
1568 if (nthreads < 1 || nthreads > UINT_MAX) {
1569 WARNING("Could not determine number of processors! Assuming 1");
1570 goto out_serial_quiet;
1571 } else if (nthreads == 1) {
1572 goto out_serial_quiet;
1574 num_threads = nthreads;
1578 DEBUG("Writing stream list of size %"PRIu64" "
1579 "(parallel version, num_threads=%u)",
1580 progress->write_streams.total_streams, num_threads);
1582 progress->write_streams.num_threads = num_threads;
1584 static const size_t MESSAGES_PER_THREAD = 2;
1585 size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
1587 DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
1589 ret = shared_queue_init(&res_to_compress_queue, queue_size);
1593 ret = shared_queue_init(&compressed_res_queue, queue_size);
1595 goto out_destroy_res_to_compress_queue;
1597 struct compressor_thread_params *params;
1599 params = CALLOC(num_threads, sizeof(params[0]));
1600 if (params == NULL) {
1601 ret = WIMLIB_ERR_NOMEM;
1602 goto out_destroy_compressed_res_queue;
1605 for (unsigned i = 0; i < num_threads; i++) {
1606 params[i].res_to_compress_queue = &res_to_compress_queue;
1607 params[i].compressed_res_queue = &compressed_res_queue;
1608 params[i].out_ctype = out_ctype;
1609 if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) {
1610 ret = wimlib_lzx_alloc_context(out_chunk_size,
1611 NULL, ¶ms[i].comp_ctx);
1613 goto out_free_params;
1617 compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
1618 if (compressor_threads == NULL) {
1619 ret = WIMLIB_ERR_NOMEM;
1620 goto out_free_params;
1623 for (unsigned i = 0; i < num_threads; i++) {
1624 DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
1625 ret = pthread_create(&compressor_threads[i], NULL,
1626 compressor_thread_proc, ¶ms[i]);
1630 ERROR_WITH_ERRNO("Failed to create compressor "
1632 i + 1, num_threads);
1633 num_started_threads = i;
1637 num_started_threads = num_threads;
1639 if (progress_data->progress_func) {
1640 progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1644 struct main_writer_thread_ctx ctx;
1646 memset(&ctx, 0, sizeof(ctx));
1648 ctx.stream_list = stream_list;
1649 ctx.lookup_table = lookup_table;
1650 ctx.out_fd = out_fd;
1651 ctx.out_ctype = out_ctype;
1652 ctx.out_chunk_size = out_chunk_size;
1653 ctx.comp_ctx = comp_ctx;
1654 ctx.res_to_compress_queue = &res_to_compress_queue;
1655 ctx.compressed_res_queue = &compressed_res_queue;
1656 ctx.num_messages = queue_size;
1657 ctx.write_resource_flags = write_resource_flags;
1658 ctx.progress_data = progress_data;
1659 ret = main_writer_thread_init_ctx(&ctx);
1664 ret = do_write_stream_list(stream_list, lookup_table,
1665 main_thread_process_next_stream,
1666 &ctx, progress_data);
1668 goto out_destroy_ctx;
1670 /* The main thread has finished reading all streams that are going to be
1671 * compressed in parallel, and it now needs to wait for all remaining
1672 * chunks to be compressed so that the remaining streams can actually be
1673 * written to the output file. Furthermore, any remaining streams that
1674 * had processing deferred to the main thread need to be handled. These
1675 * tasks are done by the main_writer_thread_finish() function. */
1676 ret = main_writer_thread_finish(&ctx);
1678 main_writer_thread_destroy_ctx(&ctx);
1680 for (unsigned i = 0; i < num_started_threads; i++)
1681 shared_queue_put(&res_to_compress_queue, NULL);
1683 for (unsigned i = 0; i < num_started_threads; i++) {
1684 if (pthread_join(compressor_threads[i], NULL)) {
1685 WARNING_WITH_ERRNO("Failed to join compressor "
1687 i + 1, num_threads);
1690 FREE(compressor_threads);
1692 for (unsigned i = 0; i < num_threads; i++)
1693 wimlib_lzx_free_context(params[i].comp_ctx);
1695 out_destroy_compressed_res_queue:
1696 shared_queue_destroy(&compressed_res_queue);
1697 out_destroy_res_to_compress_queue:
1698 shared_queue_destroy(&res_to_compress_queue);
1699 if (!can_retry || (ret >= 0 && ret != WIMLIB_ERR_NOMEM))
1702 WARNING("Falling back to single-threaded compression");
1704 return write_stream_list_serial(stream_list,
1710 write_resource_flags,
1716 /* Write a list of streams to a WIM (@out_fd) using the compression type
1717 * @out_ctype, chunk size @out_chunk_size, and up to @num_threads compressor
1720 write_stream_list(struct list_head *stream_list,
1721 struct wim_lookup_table *lookup_table,
1722 struct filedes *out_fd, int out_ctype,
1724 struct wimlib_lzx_context **comp_ctx,
1726 unsigned num_threads, wimlib_progress_func_t progress_func)
1729 int write_resource_flags;
1731 u64 total_compression_bytes;
1732 unsigned total_parts;
1733 WIMStruct *prev_wim_part;
1735 struct wim_lookup_table_entry *lte;
1736 struct write_streams_progress_data progress_data;
1738 if (list_empty(stream_list)) {
1739 DEBUG("No streams to write.");
1743 write_resource_flags = write_flags_to_resource_flags(write_flags);
1745 DEBUG("Writing stream list (offset = %"PRIu64", write_resource_flags=0x%08x)",
1746 out_fd->offset, write_resource_flags);
1748 /* Sort the stream list into a good order for reading. */
1749 ret = sort_stream_list_by_sequential_order(stream_list,
1750 offsetof(struct wim_lookup_table_entry,
1751 write_streams_list));
1755 /* Calculate the total size of the streams to be written. Note: this
1756 * will be the uncompressed size, as we may not know the compressed size
1757 * yet, and also this will assume that every unhashed stream will be
1758 * written (which will not necessarily be the case). */
1760 total_compression_bytes = 0;
1763 prev_wim_part = NULL;
1764 list_for_each_entry(lte, stream_list, write_streams_list) {
1766 total_bytes += lte->size;
1767 if (must_compress_stream(lte, write_resource_flags,
1768 out_ctype, out_chunk_size))
1769 total_compression_bytes += lte->size;
1770 if (lte->resource_location == RESOURCE_IN_WIM) {
1771 if (prev_wim_part != lte->rspec->wim) {
1772 prev_wim_part = lte->rspec->wim;
1778 memset(&progress_data, 0, sizeof(progress_data));
1779 progress_data.progress_func = progress_func;
1781 progress_data.progress.write_streams.total_bytes = total_bytes;
1782 progress_data.progress.write_streams.total_streams = num_streams;
1783 progress_data.progress.write_streams.completed_bytes = 0;
1784 progress_data.progress.write_streams.completed_streams = 0;
1785 progress_data.progress.write_streams.num_threads = num_threads;
1786 progress_data.progress.write_streams.compression_type = out_ctype;
1787 progress_data.progress.write_streams.total_parts = total_parts;
1788 progress_data.progress.write_streams.completed_parts = 0;
1790 progress_data.next_progress = 0;
1791 progress_data.prev_wim_part = NULL;
1793 #ifdef ENABLE_MULTITHREADED_COMPRESSION
1794 if (total_compression_bytes >= 2000000 && num_threads != 1)
1795 ret = write_stream_list_parallel(stream_list,
1801 write_resource_flags,
1806 ret = write_stream_list_serial(stream_list,
1812 write_resource_flags,
1815 DEBUG("Successfully wrote stream list.");
1817 DEBUG("Failed to write stream list (ret=%d).", ret);
1821 struct stream_size_table {
1822 struct hlist_head *array;
1828 init_stream_size_table(struct stream_size_table *tab, size_t capacity)
1830 tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1832 return WIMLIB_ERR_NOMEM;
1833 tab->num_entries = 0;
1834 tab->capacity = capacity;
1839 destroy_stream_size_table(struct stream_size_table *tab)
1845 stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
1847 struct stream_size_table *tab = _tab;
1849 struct wim_lookup_table_entry *same_size_lte;
1850 struct hlist_node *tmp;
1852 pos = hash_u64(lte->size) % tab->capacity;
1853 lte->unique_size = 1;
1854 hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
1855 if (same_size_lte->size == lte->size) {
1856 lte->unique_size = 0;
1857 same_size_lte->unique_size = 0;
1862 hlist_add_head(<e->hash_list_2, &tab->array[pos]);
1867 struct find_streams_ctx {
1870 struct list_head stream_list;
1871 struct stream_size_table stream_size_tab;
1875 lte_reference_for_logical_write(struct wim_lookup_table_entry *lte,
1876 struct find_streams_ctx *ctx,
1879 if (lte->out_refcnt == 0) {
1880 stream_size_table_insert(lte, &ctx->stream_size_tab);
1881 list_add_tail(<e->write_streams_list, &ctx->stream_list);
1883 lte->out_refcnt += nref;
1887 do_lte_full_reference_for_logical_write(struct wim_lookup_table_entry *lte,
1890 struct find_streams_ctx *ctx = _ctx;
1891 lte->out_refcnt = 0;
1892 lte_reference_for_logical_write(lte, ctx,
1893 (lte->refcnt ? lte->refcnt : 1));
1898 inode_find_streams_to_write(struct wim_inode *inode,
1899 struct wim_lookup_table *table,
1900 struct find_streams_ctx *ctx)
1902 struct wim_lookup_table_entry *lte;
1905 for (i = 0; i <= inode->i_num_ads; i++) {
1906 lte = inode_stream_lte(inode, i, table);
1908 lte_reference_for_logical_write(lte, ctx, inode->i_nlink);
1909 else if (!is_zero_hash(inode_stream_hash(inode, i)))
1910 return WIMLIB_ERR_RESOURCE_NOT_FOUND;
1916 image_find_streams_to_write(WIMStruct *wim)
1918 struct find_streams_ctx *ctx;
1919 struct wim_image_metadata *imd;
1920 struct wim_inode *inode;
1921 struct wim_lookup_table_entry *lte;
1925 imd = wim_get_current_image_metadata(wim);
1927 image_for_each_unhashed_stream(lte, imd)
1928 lte->out_refcnt = 0;
1930 /* Go through this image's inodes to find any streams that have not been
1932 image_for_each_inode(inode, imd) {
1933 ret = inode_find_streams_to_write(inode, wim->lookup_table, ctx);
1941 * Build a list of streams (via `struct wim_lookup_table_entry's) included in
1942 * the "logical write" of the WIM, meaning all streams that are referenced at
1943 * least once by dentries in the the image(s) being written. 'out_refcnt' on
1944 * each stream being included in the logical write is set to the number of
1945 * references from dentries in the image(s). Furthermore, 'unique_size' on each
1946 * stream being included in the logical write is set to indicate whether that
1947 * stream has a unique size relative to the streams being included in the
1948 * logical write. Still furthermore, 'part_number' on each stream being
1949 * included in the logical write is set to the part number given in the
1950 * in-memory header of @p wim.
1952 * This is considered a "logical write" because it does not take into account
1953 * filtering out streams already present in the WIM (in the case of an in place
1954 * overwrite) or present in other WIMs (in case of creating delta WIM).
1957 prepare_logical_stream_list(WIMStruct *wim, int image, bool streams_ok,
1958 struct find_streams_ctx *ctx)
1962 if (streams_ok && (image == WIMLIB_ALL_IMAGES ||
1963 (image == 1 && wim->hdr.image_count == 1)))
1965 /* Fast case: Assume that all streams are being written and
1966 * that the reference counts are correct. */
1967 struct wim_lookup_table_entry *lte;
1968 struct wim_image_metadata *imd;
1971 for_lookup_table_entry(wim->lookup_table,
1972 do_lte_full_reference_for_logical_write, ctx);
1973 for (i = 0; i < wim->hdr.image_count; i++) {
1974 imd = wim->image_metadata[i];
1975 image_for_each_unhashed_stream(lte, imd)
1976 do_lte_full_reference_for_logical_write(lte, ctx);
1979 /* Slow case: Walk through the images being written and
1980 * determine the streams referenced. */
1981 for_lookup_table_entry(wim->lookup_table, lte_zero_out_refcnt, NULL);
1983 ret = for_image(wim, image, image_find_streams_to_write);
1992 process_filtered_stream(struct wim_lookup_table_entry *lte, void *_ctx)
1994 struct find_streams_ctx *ctx = _ctx;
1997 /* Calculate and set lte->filtered. */
1998 if (lte->resource_location == RESOURCE_IN_WIM) {
1999 if (lte->rspec->wim == ctx->wim &&
2000 (ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE))
2001 filtered |= FILTERED_SAME_WIM;
2002 if (lte->rspec->wim != ctx->wim &&
2003 (ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS))
2004 filtered |= FILTERED_EXTERNAL_WIM;
2006 lte->filtered = filtered;
2008 /* Filtered streams get inserted into the stream size table too, unless
2009 * they already were. This is because streams that are checksummed
2010 * on-the-fly during the write should not be written if they are
2011 * duplicates of filtered stream. */
2012 if (lte->filtered && lte->out_refcnt == 0)
2013 stream_size_table_insert(lte, &ctx->stream_size_tab);
2018 mark_stream_not_filtered(struct wim_lookup_table_entry *lte, void *_ignore)
2024 /* Given the list of streams to include in a logical write of a WIM, handle
2025 * filtering out streams already present in the WIM or already present in
2026 * external WIMs, depending on the write flags provided. */
2028 handle_stream_filtering(struct find_streams_ctx *ctx)
2030 struct wim_lookup_table_entry *lte, *tmp;
2032 if (!(ctx->write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
2033 WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS)))
2035 for_lookup_table_entry(ctx->wim->lookup_table,
2036 mark_stream_not_filtered, ctx);
2040 for_lookup_table_entry(ctx->wim->lookup_table,
2041 process_filtered_stream, ctx);
2043 /* Streams in logical write list that were filtered can be removed. */
2044 list_for_each_entry_safe(lte, tmp, &ctx->stream_list,
2047 list_del(<e->write_streams_list);
2050 /* Prepares list of streams to write for the specified WIM image(s). This wraps
2051 * around prepare_logical_stream_list() to handle filtering out streams already
2052 * present in the WIM or already present in external WIMs, depending on the
2053 * write flags provided.
2055 * Note: some additional data is stored in each `struct wim_lookup_table_entry':
2057 * - 'out_refcnt' is set to the number of references found for the logical write.
2058 * This will be nonzero on all streams in the list returned by this function,
2059 * but will also be nonzero on streams not in the list that were included in
2060 * the logical write list, but filtered out from the returned list.
2061 * - 'filtered' is set to nonzero if the stream was filtered. Filtered streams
2062 * are not included in the list of streams returned by this function.
2063 * - 'unique_size' is set if the stream has a unique size among all streams in
2064 * the logical write plus any filtered streams in the entire WIM that could
2065 * potentially turn out to have the same checksum as a yet-to-be-checksummed
2066 * stream being written.
2069 prepare_stream_list(WIMStruct *wim, int image, int write_flags,
2070 struct list_head *stream_list)
2074 struct find_streams_ctx ctx;
2076 INIT_LIST_HEAD(&ctx.stream_list);
2077 ret = init_stream_size_table(&ctx.stream_size_tab,
2078 wim->lookup_table->capacity);
2081 ctx.write_flags = write_flags;
2084 streams_ok = ((write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK) != 0);
2086 ret = prepare_logical_stream_list(wim, image, streams_ok, &ctx);
2088 goto out_destroy_table;
2090 handle_stream_filtering(&ctx);
2091 list_transfer(&ctx.stream_list, stream_list);
2094 destroy_stream_size_table(&ctx.stream_size_tab);
2099 write_wim_streams(WIMStruct *wim, int image, int write_flags,
2100 unsigned num_threads,
2101 wimlib_progress_func_t progress_func,
2102 struct list_head *stream_list_override)
2105 struct list_head _stream_list;
2106 struct list_head *stream_list;
2107 struct wim_lookup_table_entry *lte;
2109 if (stream_list_override == NULL) {
2110 /* Normal case: prepare stream list from image(s) being written.
2112 stream_list = &_stream_list;
2113 ret = prepare_stream_list(wim, image, write_flags, stream_list);
2117 /* Currently only as a result of wimlib_split() being called:
2118 * use stream list already explicitly provided. Use existing
2119 * reference counts. */
2120 stream_list = stream_list_override;
2121 list_for_each_entry(lte, stream_list, write_streams_list)
2122 lte->out_refcnt = (lte->refcnt ? lte->refcnt : 1);
2125 return write_stream_list(stream_list,
2128 wim->out_compression_type,
2129 wim->out_chunk_size,
2137 write_wim_metadata_resources(WIMStruct *wim, int image, int write_flags,
2138 wimlib_progress_func_t progress_func)
2143 int write_resource_flags;
2145 if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA) {
2146 DEBUG("Not writing any metadata resources.");
2150 write_resource_flags = write_flags_to_resource_flags(write_flags);
2152 DEBUG("Writing metadata resources (offset=%"PRIu64")",
2153 wim->out_fd.offset);
2156 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL);
2158 if (image == WIMLIB_ALL_IMAGES) {
2160 end_image = wim->hdr.image_count;
2162 start_image = image;
2166 for (int i = start_image; i <= end_image; i++) {
2167 struct wim_image_metadata *imd;
2169 imd = wim->image_metadata[i - 1];
2170 /* Build a new metadata resource only if image was modified from
2171 * the original (or was newly added). Otherwise just copy the
2173 if (imd->modified) {
2174 DEBUG("Image %u was modified; building and writing new "
2175 "metadata resource", i);
2176 ret = write_metadata_resource(wim, i,
2177 write_resource_flags);
2178 } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2179 DEBUG("Image %u was not modified; re-using existing "
2180 "metadata resource.", i);
2181 wim_res_spec_to_hdr(imd->metadata_lte->rspec,
2182 &imd->metadata_lte->out_reshdr);
2185 DEBUG("Image %u was not modified; copying existing "
2186 "metadata resource.", i);
2187 ret = write_wim_resource(imd->metadata_lte,
2189 wim->out_compression_type,
2190 wim->out_chunk_size,
2191 &imd->metadata_lte->out_reshdr,
2192 write_resource_flags,
2199 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL);
2204 open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
2207 DEBUG("Opening \"%"TS"\" for writing.", path);
2209 raw_fd = topen(path, open_flags | O_BINARY, 0644);
2211 ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
2212 return WIMLIB_ERR_OPEN;
2214 filedes_init(&wim->out_fd, raw_fd);
2219 close_wim_writable(WIMStruct *wim, int write_flags)
2223 if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)) {
2224 DEBUG("Closing WIM file.");
2225 if (filedes_valid(&wim->out_fd))
2226 if (filedes_close(&wim->out_fd))
2227 ret = WIMLIB_ERR_WRITE;
2229 filedes_invalidate(&wim->out_fd);
2236 * Finish writing a WIM file: write the lookup table, xml data, and integrity
2237 * table, then overwrite the WIM header. By default, closes the WIM file
2238 * descriptor (@wim->out_fd) if successful.
2240 * write_flags is a bitwise OR of the following:
2242 * (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
2243 * Include an integrity table.
2245 * (public) WIMLIB_WRITE_FLAG_FSYNC:
2246 * fsync() the output file before closing it.
2248 * (public) WIMLIB_WRITE_FLAG_PIPABLE:
2249 * Writing a pipable WIM, possibly to a pipe; include pipable WIM
2250 * stream headers before the lookup table and XML data, and also
2251 * write the WIM header at the end instead of seeking to the
2252 * beginning. Can't be combined with
2253 * WIMLIB_WRITE_FLAG_CHECK_INTEGRITY.
2255 * (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE:
2256 * Don't write the lookup table.
2258 * (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE:
2259 * When (if) writing the integrity table, re-use entries from the
2260 * existing integrity table, if possible.
2262 * (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
2263 * After writing the XML data but before writing the integrity
2264 * table, write a temporary WIM header and flush the stream so that
2265 * the WIM is less likely to become corrupted upon abrupt program
2267 * (private) WIMLIB_WRITE_FLAG_HEADER_AT_END:
2268 * Instead of overwriting the WIM header at the beginning of the
2269 * file, simply append it to the end of the file. (Used when
2271 * (private) WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR:
2272 * Do not close the file descriptor @wim->out_fd on either success
2274 * (private) WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES:
2275 * Use the existing <TOTALBYTES> stored in the in-memory XML
2276 * information, rather than setting it to the offset of the XML
2277 * data being written.
2280 finish_write(WIMStruct *wim, int image, int write_flags,
2281 wimlib_progress_func_t progress_func,
2282 struct list_head *stream_list_override)
2286 int write_resource_flags;
2287 off_t old_lookup_table_end;
2288 off_t new_lookup_table_end;
2291 DEBUG("image=%d, write_flags=%08x", image, write_flags);
2293 write_resource_flags = write_flags_to_resource_flags(write_flags);
2295 /* In the WIM header, there is room for the resource entry for a
2296 * metadata resource labeled as the "boot metadata". This entry should
2297 * be zeroed out if there is no bootable image (boot_idx 0). Otherwise,
2298 * it should be a copy of the resource entry for the image that is
2299 * marked as bootable. This is not well documented... */
2300 if (wim->hdr.boot_idx == 0) {
2301 zero_reshdr(&wim->hdr.boot_metadata_reshdr);
2303 copy_reshdr(&wim->hdr.boot_metadata_reshdr,
2304 &wim->image_metadata[wim->hdr.boot_idx- 1
2305 ]->metadata_lte->out_reshdr);
2308 /* Write lookup table. (Save old position first.) */
2309 old_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim +
2310 wim->hdr.lookup_table_reshdr.size_in_wim;
2311 if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
2312 ret = write_wim_lookup_table(wim, image, write_flags,
2313 &wim->hdr.lookup_table_reshdr,
2314 stream_list_override);
2319 /* Write XML data. */
2320 xml_totalbytes = wim->out_fd.offset;
2321 if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2322 xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING;
2323 ret = write_wim_xml_data(wim, image, xml_totalbytes,
2324 &wim->hdr.xml_data_reshdr,
2325 write_resource_flags);
2329 /* Write integrity table (optional). */
2330 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2331 if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
2332 struct wim_header checkpoint_hdr;
2333 memcpy(&checkpoint_hdr, &wim->hdr, sizeof(struct wim_header));
2334 zero_reshdr(&checkpoint_hdr.integrity_table_reshdr);
2335 checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2336 ret = write_wim_header_at_offset(&checkpoint_hdr,
2342 if (!(write_flags & WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE))
2343 old_lookup_table_end = 0;
2345 new_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim +
2346 wim->hdr.lookup_table_reshdr.size_in_wim;
2348 ret = write_integrity_table(wim,
2349 new_lookup_table_end,
2350 old_lookup_table_end,
2355 /* No integrity table. */
2356 zero_reshdr(&wim->hdr.integrity_table_reshdr);
2359 /* Now that all information in the WIM header has been determined, the
2360 * preliminary header written earlier can be overwritten, the header of
2361 * the existing WIM file can be overwritten, or the final header can be
2362 * written to the end of the pipable WIM. */
2363 wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2365 if (write_flags & WIMLIB_WRITE_FLAG_HEADER_AT_END)
2366 hdr_offset = wim->out_fd.offset;
2367 DEBUG("Writing new header @ %"PRIu64".", hdr_offset);
2368 ret = write_wim_header_at_offset(&wim->hdr, &wim->out_fd, hdr_offset);
2372 /* Possibly sync file data to disk before closing. On POSIX systems, it
2373 * is necessary to do this before using rename() to overwrite an
2374 * existing file with a new file. Otherwise, data loss would occur if
2375 * the system is abruptly terminated when the metadata for the rename
2376 * operation has been written to disk, but the new file data has not.
2378 if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
2379 DEBUG("Syncing WIM file.");
2380 if (fsync(wim->out_fd.fd)) {
2381 ERROR_WITH_ERRNO("Error syncing data to WIM file");
2382 return WIMLIB_ERR_WRITE;
2386 if (close_wim_writable(wim, write_flags)) {
2387 ERROR_WITH_ERRNO("Failed to close the output WIM file");
2388 return WIMLIB_ERR_WRITE;
2394 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
2396 lock_wim(WIMStruct *wim, int fd)
2399 if (fd != -1 && !wim->wim_locked) {
2400 ret = flock(fd, LOCK_EX | LOCK_NB);
2402 if (errno == EWOULDBLOCK) {
2403 ERROR("`%"TS"' is already being modified or has been "
2404 "mounted read-write\n"
2405 " by another process!", wim->filename);
2406 ret = WIMLIB_ERR_ALREADY_LOCKED;
2408 WARNING_WITH_ERRNO("Failed to lock `%"TS"'",
2413 wim->wim_locked = 1;
2421 * write_pipable_wim():
2423 * Perform the intermediate stages of creating a "pipable" WIM (i.e. a WIM
2424 * capable of being applied from a pipe).
2426 * Pipable WIMs are a wimlib-specific modification of the WIM format such that
2427 * images can be applied from them sequentially when the file data is sent over
2428 * a pipe. In addition, a pipable WIM can be written sequentially to a pipe.
2429 * The modifications made to the WIM format for pipable WIMs are:
2431 * - Magic characters in header are "WLPWM\0\0\0" (wimlib pipable WIM) instead
2432 * of "MSWIM\0\0\0". This lets wimlib know that the WIM is pipable and also
2433 * stops other software from trying to read the file as a normal WIM.
2435 * - The header at the beginning of the file does not contain all the normal
2436 * information; in particular it will have all 0's for the lookup table and
2437 * XML data resource entries. This is because this information cannot be
2438 * determined until the lookup table and XML data have been written.
2439 * Consequently, wimlib will write the full header at the very end of the
2440 * file. The header at the end, however, is only used when reading the WIM
2441 * from a seekable file (not a pipe).
2443 * - An extra copy of the XML data is placed directly after the header. This
2444 * allows image names and sizes to be determined at an appropriate time when
2445 * reading the WIM from a pipe. This copy of the XML data is ignored if the
2446 * WIM is read from a seekable file (not a pipe).
2448 * - The format of resources, or streams, has been modified to allow them to be
2449 * used before the "lookup table" has been read. Each stream is prefixed with
2450 * a `struct pwm_stream_hdr' that is basically an abbreviated form of `struct
2451 * wim_lookup_table_entry_disk' that only contains the SHA1 message digest,
2452 * uncompressed stream size, and flags that indicate whether the stream is
2453 * compressed. The data of uncompressed streams then follows literally, while
2454 * the data of compressed streams follows in a modified format. Compressed
2455 * streams do not begin with a chunk table, since the chunk table cannot be
2456 * written until all chunks have been compressed. Instead, each compressed
2457 * chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size.
2458 * Furthermore, the chunk table is written at the end of the resource instead
2459 * of the start. Note: chunk offsets are given in the chunk table as if the
2460 * `struct pwm_chunk_hdr's were not present; also, the chunk table is only
2461 * used if the WIM is being read from a seekable file (not a pipe).
2463 * - Metadata resources always come before other file resources (streams).
2464 * (This does not by itself constitute an incompatibility with normal WIMs,
2465 * since this is valid in normal WIMs.)
2467 * - At least up to the end of the file resources, all components must be packed
2468 * as tightly as possible; there cannot be any "holes" in the WIM. (This does
2469 * not by itself consititute an incompatibility with normal WIMs, since this
2470 * is valid in normal WIMs.)
2472 * Note: the lookup table, XML data, and header at the end are not used when
2473 * applying from a pipe. They exist to support functionality such as image
2474 * application and export when the WIM is *not* read from a pipe.
2476 * Layout of pipable WIM:
2478 * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2479 * | Header | XML data | Metadata resources | File resources | Lookup table | XML data | Header |
2480 * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2482 * Layout of normal WIM:
2484 * +--------+-----------------------------+-------------------------+
2485 * | Header | File and metadata resources | Lookup table | XML data |
2486 * +--------+-----------------------------+-------------------------+
2488 * An optional integrity table can follow the final XML data in both normal and
2489 * pipable WIMs. However, due to implementation details, wimlib currently can
2490 * only include an integrity table in a pipable WIM when writing it to a
2491 * seekable file (not a pipe).
2493 * Do note that since pipable WIMs are not supported by Microsoft's software,
2494 * wimlib does not create them unless explicitly requested (with
2495 * WIMLIB_WRITE_FLAG_PIPABLE) and as stated above they use different magic
2496 * characters to identify the file.
2499 write_pipable_wim(WIMStruct *wim, int image, int write_flags,
2500 unsigned num_threads, wimlib_progress_func_t progress_func,
2501 struct list_head *stream_list_override)
2504 struct wim_reshdr xml_reshdr;
2506 WARNING("Creating a pipable WIM, which will "
2508 " with Microsoft's software (wimgapi/imagex/Dism).");
2510 /* At this point, the header at the beginning of the file has already
2513 /* For efficiency, when wimlib adds an image to the WIM with
2514 * wimlib_add_image(), the SHA1 message digests of files is not
2515 * calculated; instead, they are calculated while the files are being
2516 * written. However, this does not work when writing a pipable WIM,
2517 * since when writing a stream to a pipable WIM, its SHA1 message digest
2518 * needs to be known before the stream data is written. Therefore,
2519 * before getting much farther, we need to pre-calculate the SHA1
2520 * message digests of all streams that will be written. */
2521 ret = wim_checksum_unhashed_streams(wim);
2525 /* Write extra copy of the XML data. */
2526 ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT,
2528 WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE);
2532 /* Write metadata resources for the image(s) being included in the
2534 ret = write_wim_metadata_resources(wim, image, write_flags,
2539 /* Write streams needed for the image(s) being included in the output
2540 * WIM, or streams needed for the split WIM part. */
2541 return write_wim_streams(wim, image, write_flags, num_threads,
2542 progress_func, stream_list_override);
2544 /* The lookup table, XML data, and header at end are handled by
2545 * finish_write(). */
2548 /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file
2551 write_wim_part(WIMStruct *wim,
2552 const void *path_or_fd,
2555 unsigned num_threads,
2556 wimlib_progress_func_t progress_func,
2557 unsigned part_number,
2558 unsigned total_parts,
2559 struct list_head *stream_list_override,
2563 struct wim_header hdr_save;
2564 struct list_head lt_stream_list_override;
2566 if (total_parts == 1)
2567 DEBUG("Writing standalone WIM.");
2569 DEBUG("Writing split WIM part %u/%u", part_number, total_parts);
2570 if (image == WIMLIB_ALL_IMAGES)
2571 DEBUG("Including all images.");
2573 DEBUG("Including image %d only.", image);
2574 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2575 DEBUG("File descriptor: %d", *(const int*)path_or_fd);
2577 DEBUG("Path: \"%"TS"\"", (const tchar*)path_or_fd);
2578 DEBUG("Write flags: 0x%08x", write_flags);
2579 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2580 DEBUG("\tCHECK_INTEGRITY");
2581 if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
2583 if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
2584 DEBUG("\tRECOMPRESS");
2585 if (write_flags & WIMLIB_WRITE_FLAG_FSYNC)
2587 if (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE)
2589 if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
2590 DEBUG("\tIGNORE_READONLY_FLAG");
2591 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2593 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2594 DEBUG("\tFILE_DESCRIPTOR");
2595 if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
2596 DEBUG("\tNO_METADATA");
2597 if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2598 DEBUG("\tUSE_EXISTING_TOTALBYTES");
2599 if (num_threads == 0)
2600 DEBUG("Number of threads: autodetect");
2602 DEBUG("Number of threads: %u", num_threads);
2603 DEBUG("Progress function: %s", (progress_func ? "yes" : "no"));
2604 DEBUG("Stream list: %s", (stream_list_override ? "specified" : "autodetect"));
2605 DEBUG("GUID: %s", ((guid || wim->guid_set_explicitly) ?
2606 "specified" : "generate new"));
2608 /* Internally, this is always called with a valid part number and total
2610 wimlib_assert(total_parts >= 1);
2611 wimlib_assert(part_number >= 1 && part_number <= total_parts);
2613 /* A valid image (or all images) must be specified. */
2614 if (image != WIMLIB_ALL_IMAGES &&
2615 (image < 1 || image > wim->hdr.image_count))
2616 return WIMLIB_ERR_INVALID_IMAGE;
2618 /* If we need to write metadata resources, make sure the ::WIMStruct has
2619 * the needed information attached (e.g. is not a resource-only WIM,
2620 * such as a non-first part of a split WIM). */
2621 if (!wim_has_metadata(wim) &&
2622 !(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA))
2623 return WIMLIB_ERR_METADATA_NOT_FOUND;
2625 /* Check for contradictory flags. */
2626 if ((write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2627 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2628 == (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2629 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2630 return WIMLIB_ERR_INVALID_PARAM;
2632 if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2633 WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2634 == (WIMLIB_WRITE_FLAG_PIPABLE |
2635 WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2636 return WIMLIB_ERR_INVALID_PARAM;
2638 /* Save previous header, then start initializing the new one. */
2639 memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
2641 /* Set default integrity and pipable flags. */
2642 if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2643 WIMLIB_WRITE_FLAG_NOT_PIPABLE)))
2644 if (wim_is_pipable(wim))
2645 write_flags |= WIMLIB_WRITE_FLAG_PIPABLE;
2647 if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2648 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2649 if (wim_has_integrity_table(wim))
2650 write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2652 /* Set appropriate magic number. */
2653 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2654 wim->hdr.magic = PWM_MAGIC;
2656 wim->hdr.magic = WIM_MAGIC;
2658 /* Clear header flags that will be set automatically. */
2659 wim->hdr.flags &= ~(WIM_HDR_FLAG_METADATA_ONLY |
2660 WIM_HDR_FLAG_RESOURCE_ONLY |
2661 WIM_HDR_FLAG_SPANNED |
2662 WIM_HDR_FLAG_WRITE_IN_PROGRESS);
2664 /* Set SPANNED header flag if writing part of a split WIM. */
2665 if (total_parts != 1)
2666 wim->hdr.flags |= WIM_HDR_FLAG_SPANNED;
2668 /* Set part number and total parts of split WIM. This will be 1 and 1
2669 * if the WIM is standalone. */
2670 wim->hdr.part_number = part_number;
2671 wim->hdr.total_parts = total_parts;
2673 /* Set compression type if different. */
2674 if (wim->compression_type != wim->out_compression_type) {
2675 ret = set_wim_hdr_cflags(wim->out_compression_type, &wim->hdr);
2676 wimlib_assert(ret == 0);
2679 /* Set chunk size if different. */
2680 wim->hdr.chunk_size = wim->out_chunk_size;
2682 /* Use GUID if specified; otherwise generate a new one. */
2684 memcpy(wim->hdr.guid, guid, WIMLIB_GUID_LEN);
2685 else if (!wim->guid_set_explicitly)
2686 randomize_byte_array(wim->hdr.guid, WIMLIB_GUID_LEN);
2688 /* Clear references to resources that have not been written yet. */
2689 zero_reshdr(&wim->hdr.lookup_table_reshdr);
2690 zero_reshdr(&wim->hdr.xml_data_reshdr);
2691 zero_reshdr(&wim->hdr.boot_metadata_reshdr);
2692 zero_reshdr(&wim->hdr.integrity_table_reshdr);
2694 /* Set image count and boot index correctly for single image writes. */
2695 if (image != WIMLIB_ALL_IMAGES) {
2696 wim->hdr.image_count = 1;
2697 if (wim->hdr.boot_idx == image)
2698 wim->hdr.boot_idx = 1;
2700 wim->hdr.boot_idx = 0;
2703 /* Split WIMs can't be bootable. */
2704 if (total_parts != 1)
2705 wim->hdr.boot_idx = 0;
2707 /* Initialize output file descriptor. */
2708 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) {
2709 /* File descriptor was explicitly provided. Return error if
2710 * file descriptor is not seekable, unless writing a pipable WIM
2712 wim->out_fd.fd = *(const int*)path_or_fd;
2713 wim->out_fd.offset = 0;
2714 if (!filedes_is_seekable(&wim->out_fd)) {
2715 ret = WIMLIB_ERR_INVALID_PARAM;
2716 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2717 goto out_restore_hdr;
2718 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2719 ERROR("Can't include integrity check when "
2720 "writing pipable WIM to pipe!");
2721 goto out_restore_hdr;
2726 /* Filename of WIM to write was provided; open file descriptor
2728 ret = open_wim_writable(wim, (const tchar*)path_or_fd,
2729 O_TRUNC | O_CREAT | O_RDWR);
2731 goto out_restore_hdr;
2734 /* Write initial header. This is merely a "dummy" header since it
2735 * doesn't have all the information yet, so it will be overwritten later
2736 * (unless writing a pipable WIM). */
2737 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2738 wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2739 ret = write_wim_header(&wim->hdr, &wim->out_fd);
2740 wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2742 goto out_restore_hdr;
2744 if (stream_list_override) {
2745 struct wim_lookup_table_entry *lte;
2746 INIT_LIST_HEAD(<_stream_list_override);
2747 list_for_each_entry(lte, stream_list_override,
2750 list_add_tail(<e->lookup_table_list,
2751 <_stream_list_override);
2755 /* Write metadata resources and streams. */
2756 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) {
2757 /* Default case: create a normal (non-pipable) WIM. */
2758 ret = write_wim_streams(wim, image, write_flags, num_threads,
2759 progress_func, stream_list_override);
2761 goto out_restore_hdr;
2763 ret = write_wim_metadata_resources(wim, image, write_flags,
2766 goto out_restore_hdr;
2768 /* Non-default case: create pipable WIM. */
2769 ret = write_pipable_wim(wim, image, write_flags, num_threads,
2770 progress_func, stream_list_override);
2772 goto out_restore_hdr;
2773 write_flags |= WIMLIB_WRITE_FLAG_HEADER_AT_END;
2776 if (stream_list_override)
2777 stream_list_override = <_stream_list_override;
2779 /* Write lookup table, XML data, and (optional) integrity table. */
2780 ret = finish_write(wim, image, write_flags, progress_func,
2781 stream_list_override);
2783 memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
2784 (void)close_wim_writable(wim, write_flags);
2785 DEBUG("ret=%d", ret);
2789 /* Write a standalone WIM to a file or file descriptor. */
2791 write_standalone_wim(WIMStruct *wim, const void *path_or_fd,
2792 int image, int write_flags, unsigned num_threads,
2793 wimlib_progress_func_t progress_func)
2795 return write_wim_part(wim, path_or_fd, image, write_flags,
2796 num_threads, progress_func, 1, 1, NULL, NULL);
2799 /* API function documented in wimlib.h */
2801 wimlib_write(WIMStruct *wim, const tchar *path,
2802 int image, int write_flags, unsigned num_threads,
2803 wimlib_progress_func_t progress_func)
2806 return WIMLIB_ERR_INVALID_PARAM;
2808 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
2810 return write_standalone_wim(wim, path, image, write_flags,
2811 num_threads, progress_func);
2814 /* API function documented in wimlib.h */
2816 wimlib_write_to_fd(WIMStruct *wim, int fd,
2817 int image, int write_flags, unsigned num_threads,
2818 wimlib_progress_func_t progress_func)
2821 return WIMLIB_ERR_INVALID_PARAM;
2823 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
2824 write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR;
2826 return write_standalone_wim(wim, &fd, image, write_flags,
2827 num_threads, progress_func);
2831 any_images_modified(WIMStruct *wim)
2833 for (int i = 0; i < wim->hdr.image_count; i++)
2834 if (wim->image_metadata[i]->modified)
2840 check_resource_offset(struct wim_lookup_table_entry *lte, void *_wim)
2842 const WIMStruct *wim = _wim;
2843 off_t end_offset = *(const off_t*)wim->private;
2845 if (lte->resource_location == RESOURCE_IN_WIM && lte->rspec->wim == wim &&
2846 lte->rspec->offset_in_wim + lte->rspec->size_in_wim > end_offset)
2847 return WIMLIB_ERR_RESOURCE_ORDER;
2851 /* Make sure no file or metadata resources are located after the XML data (or
2852 * integrity table if present)--- otherwise we can't safely overwrite the WIM in
2853 * place and we return WIMLIB_ERR_RESOURCE_ORDER. */
2855 check_resource_offsets(WIMStruct *wim, off_t end_offset)
2860 wim->private = &end_offset;
2861 ret = for_lookup_table_entry(wim->lookup_table, check_resource_offset, wim);
2865 for (i = 0; i < wim->hdr.image_count; i++) {
2866 ret = check_resource_offset(wim->image_metadata[i]->metadata_lte, wim);
2874 * Overwrite a WIM, possibly appending streams to it.
2876 * A WIM looks like (or is supposed to look like) the following:
2878 * Header (212 bytes)
2879 * Streams and metadata resources (variable size)
2880 * Lookup table (variable size)
2881 * XML data (variable size)
2882 * Integrity table (optional) (variable size)
2884 * If we are not adding any streams or metadata resources, the lookup table is
2885 * unchanged--- so we only need to overwrite the XML data, integrity table, and
2886 * header. This operation is potentially unsafe if the program is abruptly
2887 * terminated while the XML data or integrity table are being overwritten, but
2888 * before the new header has been written. To partially alleviate this problem,
2889 * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to
2890 * finish_write() to cause a temporary WIM header to be written after the XML
2891 * data has been written. This may prevent the WIM from becoming corrupted if
2892 * the program is terminated while the integrity table is being calculated (but
2893 * no guarantees, due to write re-ordering...).
2895 * If we are adding new streams or images (metadata resources), the lookup table
2896 * needs to be changed, and those streams need to be written. In this case, we
2897 * try to perform a safe update of the WIM file by writing the streams *after*
2898 * the end of the previous WIM, then writing the new lookup table, XML data, and
2899 * (optionally) integrity table following the new streams. This will produce a
2900 * layout like the following:
2902 * Header (212 bytes)
2903 * (OLD) Streams and metadata resources (variable size)
2904 * (OLD) Lookup table (variable size)
2905 * (OLD) XML data (variable size)
2906 * (OLD) Integrity table (optional) (variable size)
2907 * (NEW) Streams and metadata resources (variable size)
2908 * (NEW) Lookup table (variable size)
2909 * (NEW) XML data (variable size)
2910 * (NEW) Integrity table (optional) (variable size)
2912 * At all points, the WIM is valid as nothing points to the new data yet. Then,
2913 * the header is overwritten to point to the new lookup table, XML data, and
2914 * integrity table, to produce the following layout:
2916 * Header (212 bytes)
2917 * Streams and metadata resources (variable size)
2918 * Nothing (variable size)
2919 * More Streams and metadata resources (variable size)
2920 * Lookup table (variable size)
2921 * XML data (variable size)
2922 * Integrity table (optional) (variable size)
2924 * This method allows an image to be appended to a large WIM very quickly, and
2925 * is is crash-safe except in the case of write re-ordering, but the
2926 * disadvantage is that a small hole is left in the WIM where the old lookup
2927 * table, xml data, and integrity table were. (These usually only take up a
2928 * small amount of space compared to the streams, however.)
2931 overwrite_wim_inplace(WIMStruct *wim, int write_flags,
2932 unsigned num_threads,
2933 wimlib_progress_func_t progress_func)
2936 struct list_head stream_list;
2938 u64 old_lookup_table_end, old_xml_begin, old_xml_end;
2939 struct wim_header hdr_save;
2941 DEBUG("Overwriting `%"TS"' in-place", wim->filename);
2943 /* Set default integrity flag. */
2944 if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2945 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2946 if (wim_has_integrity_table(wim))
2947 write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2949 /* Set additional flags for overwrite. */
2950 write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
2951 WIMLIB_WRITE_FLAG_STREAMS_OK;
2953 /* Make sure that the integrity table (if present) is after the XML
2954 * data, and that there are no stream resources, metadata resources, or
2955 * lookup tables after the XML data. Otherwise, these data would be
2957 old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
2958 old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
2959 old_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim +
2960 wim->hdr.lookup_table_reshdr.size_in_wim;
2961 if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0 &&
2962 wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
2963 WARNING("Didn't expect the integrity table to be before the XML data");
2964 return WIMLIB_ERR_RESOURCE_ORDER;
2967 if (old_lookup_table_end > old_xml_begin) {
2968 WARNING("Didn't expect the lookup table to be after the XML data");
2969 return WIMLIB_ERR_RESOURCE_ORDER;
2972 /* Set @old_wim_end, which indicates the point beyond which we don't
2973 * allow any file and metadata resources to appear without returning
2974 * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
2975 * overwrite these resources). */
2976 if (!wim->deletion_occurred && !any_images_modified(wim)) {
2977 /* If no images have been modified and no images have been
2978 * deleted, a new lookup table does not need to be written. We
2979 * shall write the new XML data and optional integrity table
2980 * immediately after the lookup table. Note that this may
2981 * overwrite an existing integrity table. */
2982 DEBUG("Skipping writing lookup table "
2983 "(no images modified or deleted)");
2984 old_wim_end = old_lookup_table_end;
2985 write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE |
2986 WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
2987 } else if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0) {
2988 /* Old WIM has an integrity table; begin writing new streams
2990 old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
2991 wim->hdr.integrity_table_reshdr.size_in_wim;
2993 /* No existing integrity table; begin writing new streams after
2994 * the old XML data. */
2995 old_wim_end = old_xml_end;
2998 ret = check_resource_offsets(wim, old_wim_end);
3002 ret = prepare_stream_list(wim, WIMLIB_ALL_IMAGES, write_flags,
3007 ret = open_wim_writable(wim, wim->filename, O_RDWR);
3011 ret = lock_wim(wim, wim->out_fd.fd);
3015 /* Save original header so it can be restored in case of error */
3016 memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
3018 /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */
3019 wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3020 ret = write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3022 ERROR_WITH_ERRNO("Error updating WIM header flags");
3023 goto out_restore_memory_hdr;
3026 if (filedes_seek(&wim->out_fd, old_wim_end) == -1) {
3027 ERROR_WITH_ERRNO("Can't seek to end of WIM");
3028 ret = WIMLIB_ERR_WRITE;
3029 goto out_restore_physical_hdr;
3032 ret = write_stream_list(&stream_list,
3035 wim->compression_type,
3044 ret = write_wim_metadata_resources(wim, WIMLIB_ALL_IMAGES,
3045 write_flags, progress_func);
3049 write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE;
3050 ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3051 progress_func, NULL);
3055 goto out_unlock_wim;
3058 if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
3059 WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",
3060 wim->filename, old_wim_end);
3061 /* Return value of ftruncate() is ignored because this is
3062 * already an error path. */
3063 (void)ftruncate(wim->out_fd.fd, old_wim_end);
3065 out_restore_physical_hdr:
3066 (void)write_wim_header_flags(hdr_save.flags, &wim->out_fd);
3067 out_restore_memory_hdr:
3068 memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
3070 (void)close_wim_writable(wim, write_flags);
3072 wim->wim_locked = 0;
3077 overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags,
3078 unsigned num_threads,
3079 wimlib_progress_func_t progress_func)
3081 size_t wim_name_len;
3084 DEBUG("Overwriting `%"TS"' via a temporary file", wim->filename);
3086 /* Write the WIM to a temporary file in the same directory as the
3088 wim_name_len = tstrlen(wim->filename);
3089 tchar tmpfile[wim_name_len + 10];
3090 tmemcpy(tmpfile, wim->filename, wim_name_len);
3091 randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
3092 tmpfile[wim_name_len + 9] = T('\0');
3094 ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
3095 write_flags | WIMLIB_WRITE_FLAG_FSYNC,
3096 num_threads, progress_func);
3104 /* Rename the new WIM file to the original WIM file. Note: on Windows
3105 * this actually calls win32_rename_replacement(), not _wrename(), so
3106 * that removing the existing destination file can be handled. */
3107 DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, wim->filename);
3108 ret = trename(tmpfile, wim->filename);
3110 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
3111 tmpfile, wim->filename);
3118 return WIMLIB_ERR_RENAME;
3121 if (progress_func) {
3122 union wimlib_progress_info progress;
3123 progress.rename.from = tmpfile;
3124 progress.rename.to = wim->filename;
3125 progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress);
3130 /* API function documented in wimlib.h */
3132 wimlib_overwrite(WIMStruct *wim, int write_flags,
3133 unsigned num_threads,
3134 wimlib_progress_func_t progress_func)
3139 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
3141 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
3142 return WIMLIB_ERR_INVALID_PARAM;
3145 return WIMLIB_ERR_NO_FILENAME;
3147 orig_hdr_flags = wim->hdr.flags;
3148 if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
3149 wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
3150 ret = can_modify_wim(wim);
3151 wim->hdr.flags = orig_hdr_flags;
3155 if ((!wim->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
3156 && !(write_flags & (WIMLIB_WRITE_FLAG_REBUILD |
3157 WIMLIB_WRITE_FLAG_PIPABLE))
3158 && !(wim_is_pipable(wim))
3159 && wim->compression_type == wim->out_compression_type
3160 && wim->chunk_size == wim->out_chunk_size)
3162 ret = overwrite_wim_inplace(wim, write_flags, num_threads,
3164 if (ret != WIMLIB_ERR_RESOURCE_ORDER)
3166 WARNING("Falling back to re-building entire WIM");
3168 return overwrite_wim_via_tmpfile(wim, write_flags, num_threads,