4 * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5 * compressed file resources, etc.
9 * Copyright (C) 2012, 2013 Eric Biggers
11 * This file is part of wimlib, a library for working with WIM files.
13 * wimlib is free software; you can redistribute it and/or modify it under the
14 * terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 3 of the License, or (at your option)
18 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
19 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
20 * A PARTICULAR PURPOSE. See the GNU General Public License for more
23 * You should have received a copy of the GNU General Public License
24 * along with wimlib; if not, see http://www.gnu.org/licenses/.
29 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
30 /* On BSD, this should be included before "list.h" so that "list.h" can
31 * overwrite the LIST_HEAD macro. */
32 # include <sys/file.h>
40 #include "wimlib_internal.h"
41 #include "buffer_io.h"
43 #include "lookup_table.h"
46 #ifdef ENABLE_MULTITHREADED_COMPRESSION
55 # include <ntfs-3g/attrib.h>
56 # include <ntfs-3g/inode.h>
57 # include <ntfs-3g/dir.h>
68 #if defined(__WIN32__) && !defined(INVALID_HANDLE_VALUE)
69 # define INVALID_HANDLE_VALUE ((HANDLE)(-1))
72 /* Chunk table that's located at the beginning of each compressed resource in
73 * the WIM. (This is not the on-disk format; the on-disk format just has an
74 * array of offsets.) */
78 u64 original_resource_size;
79 u64 bytes_per_chunk_entry;
87 * Allocates and initializes a chunk table, and reserves space for it in the
91 begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
94 struct chunk_table **chunk_tab_ret)
96 u64 size = wim_resource_size(lte);
97 u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE;
98 size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
99 struct chunk_table *chunk_tab = CALLOC(1, alloc_size);
103 ERROR("Failed to allocate chunk table for %"PRIu64" byte "
105 ret = WIMLIB_ERR_NOMEM;
108 chunk_tab->file_offset = file_offset;
109 chunk_tab->num_chunks = num_chunks;
110 chunk_tab->original_resource_size = size;
111 chunk_tab->bytes_per_chunk_entry = (size >= (1ULL << 32)) ? 8 : 4;
112 chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
114 chunk_tab->cur_offset = 0;
115 chunk_tab->cur_offset_p = chunk_tab->offsets;
117 if (fwrite(chunk_tab, 1, chunk_tab->table_disk_size, out_fp) !=
118 chunk_tab->table_disk_size) {
119 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
122 ret = WIMLIB_ERR_WRITE;
127 *chunk_tab_ret = chunk_tab;
133 * compress_func_t- Pointer to a function to compresses a chunk
134 * of a WIM resource. This may be either
135 * wimlib_xpress_compress() (xpress-compress.c) or
136 * wimlib_lzx_compress() (lzx-compress.c).
138 * @chunk: Uncompressed data of the chunk.
139 * @chunk_size: Size of the uncompressed chunk, in bytes.
140 * @out: Pointer to output buffer of size at least (@chunk_size - 1) bytes.
142 * Returns the size of the compressed data written to @out in bytes, or 0 if the
143 * data could not be compressed to (@chunk_size - 1) bytes or fewer.
145 * As a special requirement, the compression code is optimized for the WIM
146 * format and therefore requires (@chunk_size <= 32768).
148 * As another special requirement, the compression code will read up to 8 bytes
149 * off the end of the @chunk array for performance reasons. The values of these
150 * bytes will not affect the output of the compression, but the calling code
151 * must make sure that the buffer holding the uncompressed chunk is actually at
152 * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
153 * mapped memory that will not cause a memory access violation if accessed.
155 typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
159 get_compress_func(int out_ctype)
161 if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
162 return wimlib_lzx_compress;
164 return wimlib_xpress_compress;
168 * Writes a chunk of a WIM resource to an output file.
170 * @chunk: Uncompressed data of the chunk.
171 * @chunk_size: Size of the chunk (<= WIM_CHUNK_SIZE)
172 * @out_fp: FILE * to write the chunk to.
173 * @compress: Compression function to use (NULL if writing uncompressed
175 * @chunk_tab: Pointer to chunk table being created. It is updated with the
176 * offset of the chunk we write.
178 * Returns 0 on success; nonzero on failure.
181 write_wim_resource_chunk(const void *chunk, unsigned chunk_size,
182 FILE *out_fp, compress_func_t compress,
183 struct chunk_table *chunk_tab)
186 unsigned out_chunk_size;
188 u8 *compressed_chunk = alloca(chunk_size);
190 out_chunk_size = compress(chunk, chunk_size, compressed_chunk);
191 if (out_chunk_size) {
192 /* Write compressed */
193 out_chunk = compressed_chunk;
195 /* Write uncompressed */
197 out_chunk_size = chunk_size;
199 *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
200 chunk_tab->cur_offset += out_chunk_size;
202 /* Write uncompressed */
204 out_chunk_size = chunk_size;
206 if (fwrite(out_chunk, 1, out_chunk_size, out_fp) != out_chunk_size) {
207 ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
208 return WIMLIB_ERR_WRITE;
214 * Finishes a WIM chunk table and writes it to the output file at the correct
217 * The final size of the full compressed resource is returned in the
218 * @compressed_size_p.
221 finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
222 FILE *out_fp, u64 *compressed_size_p)
224 size_t bytes_written;
225 if (fseeko(out_fp, chunk_tab->file_offset, SEEK_SET) != 0) {
226 ERROR_WITH_ERRNO("Failed to seek to byte %"PRIu64" of output "
227 "WIM file", chunk_tab->file_offset);
228 return WIMLIB_ERR_WRITE;
231 if (chunk_tab->bytes_per_chunk_entry == 8) {
232 array_cpu_to_le64(chunk_tab->offsets, chunk_tab->num_chunks);
234 for (u64 i = 0; i < chunk_tab->num_chunks; i++)
235 ((u32*)chunk_tab->offsets)[i] =
236 cpu_to_le32(chunk_tab->offsets[i]);
238 bytes_written = fwrite((u8*)chunk_tab->offsets +
239 chunk_tab->bytes_per_chunk_entry,
240 1, chunk_tab->table_disk_size, out_fp);
241 if (bytes_written != chunk_tab->table_disk_size) {
242 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
244 return WIMLIB_ERR_WRITE;
246 if (fseeko(out_fp, 0, SEEK_END) != 0) {
247 ERROR_WITH_ERRNO("Failed to seek to end of output WIM file");
248 return WIMLIB_ERR_WRITE;
250 *compressed_size_p = chunk_tab->cur_offset + chunk_tab->table_disk_size;
254 struct write_resource_ctx {
255 compress_func_t compress;
256 struct chunk_table *chunk_tab;
263 write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx)
265 struct write_resource_ctx *ctx = _ctx;
268 sha1_update(&ctx->sha_ctx, chunk, chunk_size);
271 return write_wim_resource_chunk(chunk, chunk_size,
272 ctx->out_fp, ctx->compress,
275 if (fwrite(chunk, 1, chunk_size, ctx->out_fp) != chunk_size) {
276 ERROR_WITH_ERRNO("Error writing to output WIM");
277 return WIMLIB_ERR_WRITE;
285 * Write a resource to an output WIM.
287 * @lte: Lookup table entry for the resource, which could be in another WIM,
288 * in an external file, or in another location.
290 * @out_fp: FILE * opened to the output WIM.
292 * @out_ctype: One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate
293 * which compression algorithm to use.
295 * @out_res_entry: On success, this is filled in with the offset, flags,
296 * compressed size, and uncompressed size of the resource
299 * @flags: WIMLIB_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed
300 * even if it could otherwise be copied directly from the input.
302 * Additional notes: The SHA1 message digest of the uncompressed data is
303 * calculated (except when doing a raw copy --- see below). If the @unhashed
304 * flag is set on the lookup table entry, this message digest is simply copied
305 * to it; otherwise, the message digest is compared with the existing one, and
306 * the function will fail if they do not match.
309 write_wim_resource(struct wim_lookup_table_entry *lte,
310 FILE *out_fp, int out_ctype,
311 struct resource_entry *out_res_entry,
314 struct write_resource_ctx write_ctx;
319 flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS;
321 if (wim_resource_size(lte) == 0) {
322 /* Empty resource; nothing needs to be done, so just return
327 /* Get current position in output WIM */
328 offset = ftello(out_fp);
330 ERROR_WITH_ERRNO("Can't get position in output WIM");
331 return WIMLIB_ERR_WRITE;
334 /* If we are not forcing the data to be recompressed, and the input
335 * resource is located in a WIM with the same compression type as that
336 * desired other than no compression, we can simply copy the compressed
337 * data without recompressing it. This also means we must skip
338 * calculating the SHA1, as we never will see the uncompressed data. */
339 if (!(flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
340 lte->resource_location == RESOURCE_IN_WIM &&
341 out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
342 wimlib_get_compression_type(lte->wim) == out_ctype)
344 flags |= WIMLIB_RESOURCE_FLAG_RAW;
345 write_ctx.doing_sha = false;
347 write_ctx.doing_sha = true;
348 sha1_init(&write_ctx.sha_ctx);
351 /* Initialize the chunk table and set the compression function if
352 * compressing the resource. */
353 if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
354 (flags & WIMLIB_RESOURCE_FLAG_RAW)) {
355 write_ctx.compress = NULL;
356 write_ctx.chunk_tab = NULL;
358 write_ctx.compress = get_compress_func(out_ctype);
359 ret = begin_wim_resource_chunk_tab(lte, out_fp,
361 &write_ctx.chunk_tab);
366 /* Write the entire resource by reading the entire resource and feeding
367 * the data through the write_resource_cb function. */
368 write_ctx.out_fp = out_fp;
370 ret = read_resource_prefix(lte, wim_resource_size(lte),
371 write_resource_cb, &write_ctx, flags);
373 /* Verify SHA1 message digest of the resource, or set the hash for the
375 if (write_ctx.doing_sha) {
376 u8 md[SHA1_HASH_SIZE];
377 sha1_final(md, &write_ctx.sha_ctx);
379 copy_hash(lte->hash, md);
380 } else if (!hashes_equal(md, lte->hash)) {
381 ERROR("WIM resource has incorrect hash!");
382 if (lte_filename_valid(lte)) {
383 ERROR("We were reading it from \"%"TS"\"; maybe "
384 "it changed while we were reading it.",
387 ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
388 goto out_free_chunk_tab;
392 out_res_entry->flags = lte->resource_entry.flags;
393 out_res_entry->original_size = wim_resource_size(lte);
394 out_res_entry->offset = offset;
395 if (flags & WIMLIB_RESOURCE_FLAG_RAW) {
396 /* Doing a raw write: The new compressed size is the same as
397 * the compressed size in the other WIM. */
398 new_size = lte->resource_entry.size;
399 } else if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) {
400 /* Using WIMLIB_COMPRESSION_TYPE_NONE: The new compressed size
401 * is the original size. */
402 new_size = lte->resource_entry.original_size;
403 out_res_entry->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
405 /* Using a different compression type: Call
406 * finish_wim_resource_chunk_tab() and it will provide the new
407 * compressed size. */
408 ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, out_fp,
411 goto out_free_chunk_tab;
412 if (new_size >= wim_resource_size(lte)) {
413 /* Oops! We compressed the resource to larger than the original
414 * size. Write the resource uncompressed instead. */
415 if (fseeko(out_fp, offset, SEEK_SET) ||
417 ftruncate(fileno(out_fp),
418 offset + wim_resource_size(lte)))
420 ERROR_WITH_ERRNO("Failed to flush and/or truncate "
422 ret = WIMLIB_ERR_WRITE;
423 goto out_free_chunk_tab;
425 DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
426 "writing uncompressed instead",
427 wim_resource_size(lte), new_size);
428 write_ctx.compress = NULL;
429 write_ctx.doing_sha = false;
430 out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
431 goto try_write_again;
433 out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
435 out_res_entry->size = new_size;
438 FREE(write_ctx.chunk_tab);
442 #ifdef ENABLE_MULTITHREADED_COMPRESSION
444 /* Blocking shared queue (solves the producer-consumer problem) */
445 struct shared_queue {
449 unsigned filled_slots;
451 pthread_mutex_t lock;
452 pthread_cond_t msg_avail_cond;
453 pthread_cond_t space_avail_cond;
457 shared_queue_init(struct shared_queue *q, unsigned size)
459 wimlib_assert(size != 0);
460 q->array = CALLOC(sizeof(q->array[0]), size);
462 return WIMLIB_ERR_NOMEM;
467 pthread_mutex_init(&q->lock, NULL);
468 pthread_cond_init(&q->msg_avail_cond, NULL);
469 pthread_cond_init(&q->space_avail_cond, NULL);
474 shared_queue_destroy(struct shared_queue *q)
477 pthread_mutex_destroy(&q->lock);
478 pthread_cond_destroy(&q->msg_avail_cond);
479 pthread_cond_destroy(&q->space_avail_cond);
483 shared_queue_put(struct shared_queue *q, void *obj)
485 pthread_mutex_lock(&q->lock);
486 while (q->filled_slots == q->size)
487 pthread_cond_wait(&q->space_avail_cond, &q->lock);
489 q->back = (q->back + 1) % q->size;
490 q->array[q->back] = obj;
493 pthread_cond_broadcast(&q->msg_avail_cond);
494 pthread_mutex_unlock(&q->lock);
498 shared_queue_get(struct shared_queue *q)
502 pthread_mutex_lock(&q->lock);
503 while (q->filled_slots == 0)
504 pthread_cond_wait(&q->msg_avail_cond, &q->lock);
506 obj = q->array[q->front];
507 q->array[q->front] = NULL;
508 q->front = (q->front + 1) % q->size;
511 pthread_cond_broadcast(&q->space_avail_cond);
512 pthread_mutex_unlock(&q->lock);
516 struct compressor_thread_params {
517 struct shared_queue *res_to_compress_queue;
518 struct shared_queue *compressed_res_queue;
519 compress_func_t compress;
522 #define MAX_CHUNKS_PER_MSG 2
525 struct wim_lookup_table_entry *lte;
526 u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
527 u8 *out_compressed_chunks[MAX_CHUNKS_PER_MSG];
528 u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
529 unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
530 unsigned compressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
532 struct list_head list;
538 compress_chunks(struct message *msg, compress_func_t compress)
540 for (unsigned i = 0; i < msg->num_chunks; i++) {
541 DEBUG2("compress chunk %u of %u", i, msg->num_chunks);
542 unsigned len = compress(msg->uncompressed_chunks[i],
543 msg->uncompressed_chunk_sizes[i],
544 msg->compressed_chunks[i]);
546 /* To be written compressed */
547 msg->out_compressed_chunks[i] = msg->compressed_chunks[i];
548 msg->compressed_chunk_sizes[i] = len;
550 /* To be written uncompressed */
551 msg->out_compressed_chunks[i] = msg->uncompressed_chunks[i];
552 msg->compressed_chunk_sizes[i] = msg->uncompressed_chunk_sizes[i];
558 /* Compressor thread routine. This is a lot simpler than the main thread
559 * routine: just repeatedly get a group of chunks from the
560 * res_to_compress_queue, compress them, and put them in the
561 * compressed_res_queue. A NULL pointer indicates that the thread should stop.
564 compressor_thread_proc(void *arg)
566 struct compressor_thread_params *params = arg;
567 struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
568 struct shared_queue *compressed_res_queue = params->compressed_res_queue;
569 compress_func_t compress = params->compress;
572 DEBUG("Compressor thread ready");
573 while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
574 compress_chunks(msg, compress);
575 shared_queue_put(compressed_res_queue, msg);
577 DEBUG("Compressor thread terminating");
580 #endif /* ENABLE_MULTITHREADED_COMPRESSION */
583 do_write_streams_progress(union wimlib_progress_info *progress,
584 wimlib_progress_func_t progress_func,
587 progress->write_streams.completed_bytes += size_added;
588 progress->write_streams.completed_streams++;
590 progress->write_streams.completed_bytes >= progress->write_streams._private)
592 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
594 if (progress->write_streams._private == progress->write_streams.total_bytes) {
595 progress->write_streams._private = ~0;
597 progress->write_streams._private =
598 min(progress->write_streams.total_bytes,
599 progress->write_streams.completed_bytes +
600 progress->write_streams.total_bytes / 100);
606 sha1_chunk(const void *buf, size_t len, void *ctx)
608 sha1_update(ctx, buf, len);
613 sha1_resource(struct wim_lookup_table_entry *lte)
619 ret = read_resource_prefix(lte, wim_resource_size(lte),
620 sha1_chunk, &sha_ctx, 0);
622 sha1_final(lte->hash, &sha_ctx);
628 STREAMS_NOT_MERGED = 1,
632 do_write_stream_list(struct list_head *stream_list,
633 struct wim_lookup_table *lookup_table,
636 wimlib_progress_func_t progress_func,
637 union wimlib_progress_info *progress,
638 int write_resource_flags)
641 struct wim_lookup_table_entry *lte;
643 /* For each stream in @stream_list ... */
644 while (!list_empty(stream_list)) {
645 lte = container_of(stream_list->next,
646 struct wim_lookup_table_entry,
648 list_del(<e->write_streams_list);
649 if (lte->unhashed && !lte->unique_size) {
651 /* Unhashed stream that shares a size with some other
652 * stream in the WIM we are writing. The stream must be
653 * checksummed to know if we need to write it or not. */
654 struct wim_lookup_table_entry *duplicate_lte;
655 struct wim_lookup_table_entry **back_ptr;
657 /* back_ptr must be saved because it's in union with the
658 * SHA1 message digest and will no longer be valid once
659 * the SHA1 has been calculated. */
660 back_ptr = lte->back_ptr;
662 /* Checksum the stream */
663 ret = sha1_resource(lte);
667 /* Look for a duplicate stream */
668 duplicate_lte = __lookup_resource(lookup_table, lte->hash);
670 /* We have a duplicate stream. Transfer the
671 * reference counts from this stream to the
672 * duplicate, update the reference to this
673 * stream (in an inode or ads_entry) to point to
674 * the duplicate, then free this stream. */
675 wimlib_assert(!(duplicate_lte->unhashed));
676 bool is_new_stream = (duplicate_lte->out_refcnt == 0);
677 duplicate_lte->refcnt += lte->refcnt;
678 duplicate_lte->out_refcnt += lte->refcnt;
679 *back_ptr = duplicate_lte;
680 list_del(<e->unhashed_list);
681 free_lookup_table_entry(lte);
685 /* The duplicate stream is one we
686 * weren't already planning to write.
687 * But, now we must write it.
689 * XXX: Currently, the copy of the
690 * stream in the WIM is always chosen
691 * for writing, rather than the extra
692 * copy we just read (which may be in an
693 * external file). This may not always
696 /* We have already written, or are going
697 * to write, the duplicate stream. So
698 * just skip to the next stream. */
699 DEBUG("Discarding duplicate stream of length %"PRIu64,
700 wim_resource_size(lte));
701 goto skip_to_progress;
705 /* No duplicate stream, so we need to insert
706 * this stream into the lookup table and treat
707 * it as a hashed stream. */
708 list_del(<e->unhashed_list);
709 lookup_table_insert(lookup_table, lte);
710 lte->out_refcnt = lte->refcnt;
715 /* Here, @lte either a hashed stream or an unhashed stream with
716 * a unique size. In either case we know that the stream has to
717 * be written. In either case the SHA1 message digest will be
718 * calculated over the stream while writing it; however, in the
719 * former case this is done merely to check the data, while in
720 * the latter case this is done because we do not have the SHA1
721 * message digest yet. */
723 wimlib_assert(lte->out_refcnt != 0);
725 ret = write_wim_resource(lte,
728 <e->output_resource_entry,
729 write_resource_flags);
733 list_del(<e->unhashed_list);
734 lookup_table_insert(lookup_table, lte);
738 do_write_streams_progress(progress,
740 wim_resource_size(lte));
746 write_stream_list_serial(struct list_head *stream_list,
747 struct wim_lookup_table *lookup_table,
751 wimlib_progress_func_t progress_func,
752 union wimlib_progress_info *progress)
754 int write_resource_flags = 0;
755 if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
756 write_resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS;
758 progress->write_streams.num_threads = 1;
760 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
761 return do_write_stream_list(stream_list,
764 out_ctype, progress_func,
765 progress, write_resource_flags);
768 #ifdef ENABLE_MULTITHREADED_COMPRESSION
770 write_wim_chunks(struct message *msg, FILE *out_fp,
771 struct chunk_table *chunk_tab)
773 for (unsigned i = 0; i < msg->num_chunks; i++) {
774 unsigned chunk_csize = msg->compressed_chunk_sizes[i];
776 DEBUG2("Write wim chunk %u of %u (csize = %u)",
777 i, msg->num_chunks, chunk_csize);
779 if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp)
782 ERROR_WITH_ERRNO("Failed to write WIM chunk");
783 return WIMLIB_ERR_WRITE;
786 *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
787 chunk_tab->cur_offset += chunk_csize;
793 * This function is executed by the main thread when the resources are being
794 * compressed in parallel. The main thread is in change of all reading of the
795 * uncompressed data and writing of the compressed data. The compressor threads
796 * *only* do compression from/to in-memory buffers.
798 * Each unit of work given to a compressor thread is up to MAX_CHUNKS_PER_MSG
799 * chunks of compressed data to compress, represented in a `struct message'.
800 * Each message is passed from the main thread to a worker thread through the
801 * res_to_compress_queue, and it is passed back through the
802 * compressed_res_queue.
805 main_writer_thread_proc(struct list_head *stream_list,
808 struct shared_queue *res_to_compress_queue,
809 struct shared_queue *compressed_res_queue,
812 wimlib_progress_func_t progress_func,
813 union wimlib_progress_info *progress)
816 struct chunk_table *cur_chunk_tab = NULL;
817 struct message *msgs = CALLOC(num_messages, sizeof(struct message));
818 struct wim_lookup_table_entry *next_lte = NULL;
820 // Initially, all the messages are available to use.
821 LIST_HEAD(available_msgs);
824 ret = WIMLIB_ERR_NOMEM;
828 for (size_t i = 0; i < num_messages; i++)
829 list_add(&msgs[i].list, &available_msgs);
831 // outstanding_resources is the list of resources that currently have
832 // had chunks sent off for compression.
834 // The first stream in outstanding_resources is the stream that is
835 // currently being written (cur_lte).
837 // The last stream in outstanding_resources is the stream that is
838 // currently being read and chunks fed to the compressor threads
841 // Depending on the number of threads and the sizes of the resource,
842 // the outstanding streams list may contain streams between cur_lte and
843 // next_lte that have all their chunks compressed or being compressed,
844 // but haven't been written yet.
846 LIST_HEAD(outstanding_resources);
847 struct list_head *next_resource = stream_list->next;
849 u64 next_num_chunks = 0;
851 // As in write_wim_resource(), each resource we read is checksummed.
852 SHA_CTX next_sha_ctx;
853 u8 next_hash[SHA1_HASH_SIZE];
855 // Resources that don't need any chunks compressed are added to this
856 // list and written directly by the main thread.
857 LIST_HEAD(my_resources);
859 struct wim_lookup_table_entry *cur_lte = NULL;
863 ntfs_inode *ni = NULL;
866 DEBUG("Initializing buffers for uncompressed "
867 "and compressed data (%zu bytes needed)",
868 num_messages * MAX_CHUNKS_PER_MSG * WIM_CHUNK_SIZE * 2);
870 // Pre-allocate all the buffers that will be needed to do the chunk
872 for (size_t i = 0; i < num_messages; i++) {
873 for (size_t j = 0; j < MAX_CHUNKS_PER_MSG; j++) {
874 msgs[i].compressed_chunks[j] = MALLOC(WIM_CHUNK_SIZE);
876 // The extra 8 bytes is because longest_match() in
877 // lz77.c may read a little bit off the end of the
878 // uncompressed data. It doesn't need to be
879 // initialized--- we really just need to avoid accessing
881 msgs[i].uncompressed_chunks[j] = MALLOC(WIM_CHUNK_SIZE + 8);
882 if (msgs[i].compressed_chunks[j] == NULL ||
883 msgs[i].uncompressed_chunks[j] == NULL)
885 ret = WIMLIB_ERR_NOMEM;
891 // This loop is executed until all resources have been written, except
892 // possibly a few that have been added to the @my_resources list for
895 // Send chunks to the compressor threads until either (a) there
896 // are no more messages available since they were all sent off,
897 // or (b) there are no more resources that need to be
899 while (!list_empty(&available_msgs)) {
900 if (next_chunk == next_num_chunks) {
901 // If next_chunk == next_num_chunks, there are
902 // no more chunks to write in the current
903 // stream. So, check the SHA1 message digest of
904 // the stream that was just finished (unless
905 // next_lte == NULL, which is the case the very
906 // first time this loop is entered, and also
907 // near the very end of the compression when
908 // there are no more streams.) Then, advance to
909 // the next stream (if there is one).
910 if (next_lte != NULL) {
912 end_wim_resource_read(next_lte, ni);
915 end_wim_resource_read(next_lte);
917 DEBUG2("Finalize SHA1 md (next_num_chunks=%zu)",
919 sha1_final(next_hash, &next_sha_ctx);
920 if (!hashes_equal(next_lte->hash, next_hash)) {
921 ERROR("WIM resource has incorrect hash!");
922 if (next_lte->resource_location ==
923 RESOURCE_IN_FILE_ON_DISK)
925 ERROR("We were reading it from `%"TS"'; "
926 "maybe it changed while we were "
928 next_lte->file_on_disk);
930 ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
935 // Advance to the next resource.
937 // If the next resource needs no compression, just write
938 // it with this thread (not now though--- we could be in
939 // the middle of writing another resource.) Keep doing
940 // this until we either get to the end of the resources
941 // list, or we get to a resource that needs compression.
943 if (next_resource == stream_list) {
944 // No more resources to send for
949 next_lte = container_of(next_resource,
950 struct wim_lookup_table_entry,
952 next_resource = next_resource->next;
953 if ((!(write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
954 && wim_resource_compression_type(next_lte) == out_ctype)
955 || wim_resource_size(next_lte) == 0)
957 list_add_tail(&next_lte->write_streams_list,
960 list_add_tail(&next_lte->write_streams_list,
961 &outstanding_resources);
963 next_num_chunks = wim_resource_chunks(next_lte);
964 sha1_init(&next_sha_ctx);
965 INIT_LIST_HEAD(&next_lte->msg_list);
967 ret = prepare_resource_for_read(next_lte, &ni);
969 ret = prepare_resource_for_read(next_lte);
974 if (cur_lte == NULL) {
975 // Set cur_lte for the
984 if (next_lte == NULL) {
985 // No more resources to send for compression
989 // Get a message from the available messages
991 msg = container_of(available_msgs.next,
995 // ... and delete it from the available messages
997 list_del(&msg->list);
999 // Initialize the message with the chunks to
1001 msg->num_chunks = min(next_num_chunks - next_chunk,
1002 MAX_CHUNKS_PER_MSG);
1003 msg->lte = next_lte;
1004 msg->complete = false;
1005 msg->begin_chunk = next_chunk;
1007 unsigned size = WIM_CHUNK_SIZE;
1008 for (unsigned i = 0; i < msg->num_chunks; i++) {
1010 // Read chunk @next_chunk of the stream into the
1011 // message so that a compressor thread can
1014 if (next_chunk == next_num_chunks - 1) {
1015 size = MODULO_NONZERO(wim_resource_size(next_lte),
1019 DEBUG2("Read resource (size=%u, offset=%zu)",
1020 size, next_chunk * WIM_CHUNK_SIZE);
1022 msg->uncompressed_chunk_sizes[i] = size;
1024 ret = read_wim_resource(next_lte,
1025 msg->uncompressed_chunks[i],
1027 next_chunk * WIM_CHUNK_SIZE,
1031 sha1_update(&next_sha_ctx,
1032 msg->uncompressed_chunks[i], size);
1036 // Send the compression request
1037 list_add_tail(&msg->list, &next_lte->msg_list);
1038 shared_queue_put(res_to_compress_queue, msg);
1039 DEBUG2("Compression request sent");
1042 // If there are no outstanding resources, there are no more
1043 // resources that need to be written.
1044 if (list_empty(&outstanding_resources)) {
1049 // Get the next message from the queue and process it.
1050 // The message will contain 1 or more data chunks that have been
1052 msg = shared_queue_get(compressed_res_queue);
1053 msg->complete = true;
1055 // Is this the next chunk in the current resource? If it's not
1056 // (i.e., an earlier chunk in a same or different resource
1057 // hasn't been compressed yet), do nothing, and keep this
1058 // message around until all earlier chunks are received.
1060 // Otherwise, write all the chunks we can.
1061 while (cur_lte != NULL &&
1062 !list_empty(&cur_lte->msg_list) &&
1063 (msg = container_of(cur_lte->msg_list.next,
1067 DEBUG2("Complete msg (begin_chunk=%"PRIu64")", msg->begin_chunk);
1068 if (msg->begin_chunk == 0) {
1069 DEBUG2("Begin chunk tab");
1071 // This is the first set of chunks. Leave space
1072 // for the chunk table in the output file.
1073 off_t cur_offset = ftello(out_fp);
1074 if (cur_offset == -1) {
1075 ret = WIMLIB_ERR_WRITE;
1078 ret = begin_wim_resource_chunk_tab(cur_lte,
1086 // Write the compressed chunks from the message.
1087 ret = write_wim_chunks(msg, out_fp, cur_chunk_tab);
1091 list_del(&msg->list);
1093 // This message is available to use for different chunks
1095 list_add(&msg->list, &available_msgs);
1097 // Was this the last chunk of the stream? If so, finish
1099 if (list_empty(&cur_lte->msg_list) &&
1100 msg->begin_chunk + msg->num_chunks == cur_chunk_tab->num_chunks)
1102 DEBUG2("Finish wim chunk tab");
1104 ret = finish_wim_resource_chunk_tab(cur_chunk_tab,
1110 if (res_csize >= wim_resource_size(cur_lte)) {
1111 /* Oops! We compressed the resource to
1112 * larger than the original size. Write
1113 * the resource uncompressed instead. */
1114 ret = write_uncompressed_resource_and_truncate(
1117 cur_chunk_tab->file_offset,
1118 &cur_lte->output_resource_entry);
1122 cur_lte->output_resource_entry.size =
1125 cur_lte->output_resource_entry.original_size =
1126 cur_lte->resource_entry.original_size;
1128 cur_lte->output_resource_entry.offset =
1129 cur_chunk_tab->file_offset;
1131 cur_lte->output_resource_entry.flags =
1132 cur_lte->resource_entry.flags |
1133 WIM_RESHDR_FLAG_COMPRESSED;
1136 do_write_streams_progress(progress, progress_func,
1137 wim_resource_size(cur_lte));
1139 FREE(cur_chunk_tab);
1140 cur_chunk_tab = NULL;
1142 struct list_head *next = cur_lte->write_streams_list.next;
1143 list_del(&cur_lte->write_streams_list);
1145 if (next == &outstanding_resources)
1148 cur_lte = container_of(cur_lte->write_streams_list.next,
1149 struct wim_lookup_table_entry,
1150 write_streams_list);
1152 // Since we just finished writing a stream,
1153 // write any streams that have been added to the
1154 // my_resources list for direct writing by the
1155 // main thread (e.g. resources that don't need
1156 // to be compressed because the desired
1157 // compression type is the same as the previous
1158 // compression type).
1159 ret = do_write_stream_list(&my_resources,
1172 if (ret == WIMLIB_ERR_NOMEM) {
1173 ERROR("Could not allocate enough memory for "
1174 "multi-threaded compression");
1179 end_wim_resource_read(next_lte, ni);
1181 end_wim_resource_read(next_lte);
1186 ret = do_write_stream_list(&my_resources, out_fp,
1187 out_ctype, progress_func,
1191 size_t num_available_msgs = 0;
1192 struct list_head *cur;
1194 list_for_each(cur, &available_msgs) {
1195 num_available_msgs++;
1198 while (num_available_msgs < num_messages) {
1199 shared_queue_get(compressed_res_queue);
1200 num_available_msgs++;
1206 for (size_t i = 0; i < num_messages; i++) {
1207 for (size_t j = 0; j < MAX_CHUNKS_PER_MSG; j++) {
1208 FREE(msgs[i].compressed_chunks[j]);
1209 FREE(msgs[i].uncompressed_chunks[j]);
1215 FREE(cur_chunk_tab);
1220 get_default_num_threads()
1223 return win32_get_number_of_processors();
1225 return sysconf(_SC_NPROCESSORS_ONLN);
1230 write_stream_list_parallel(struct list_head *stream_list,
1231 struct wim_lookup_table *lookup_table,
1235 unsigned num_threads,
1236 wimlib_progress_func_t progress_func,
1237 union wimlib_progress_info *progress)
1240 struct shared_queue res_to_compress_queue;
1241 struct shared_queue compressed_res_queue;
1242 pthread_t *compressor_threads = NULL;
1244 if (num_threads == 0) {
1245 long nthreads = get_default_num_threads();
1246 if (nthreads < 1 || nthreads > UINT_MAX) {
1247 WARNING("Could not determine number of processors! Assuming 1");
1250 num_threads = nthreads;
1254 progress->write_streams.num_threads = num_threads;
1255 wimlib_assert(stream_list->next != stream_list);
1257 static const double MESSAGES_PER_THREAD = 2.0;
1258 size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
1260 DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
1262 ret = shared_queue_init(&res_to_compress_queue, queue_size);
1266 ret = shared_queue_init(&compressed_res_queue, queue_size);
1268 goto out_destroy_res_to_compress_queue;
1270 struct compressor_thread_params params;
1271 params.res_to_compress_queue = &res_to_compress_queue;
1272 params.compressed_res_queue = &compressed_res_queue;
1273 params.compress = get_compress_func(out_ctype);
1275 compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
1276 if (!compressor_threads) {
1277 ret = WIMLIB_ERR_NOMEM;
1278 goto out_destroy_compressed_res_queue;
1281 for (unsigned i = 0; i < num_threads; i++) {
1282 DEBUG("pthread_create thread %u", i);
1283 ret = pthread_create(&compressor_threads[i], NULL,
1284 compressor_thread_proc, ¶ms);
1287 ERROR_WITH_ERRNO("Failed to create compressor "
1295 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
1297 ret = main_writer_thread_proc(stream_list,
1300 &res_to_compress_queue,
1301 &compressed_res_queue,
1307 for (unsigned i = 0; i < num_threads; i++)
1308 shared_queue_put(&res_to_compress_queue, NULL);
1310 for (unsigned i = 0; i < num_threads; i++) {
1311 if (pthread_join(compressor_threads[i], NULL)) {
1312 WARNING_WITH_ERRNO("Failed to join compressor "
1316 FREE(compressor_threads);
1317 out_destroy_compressed_res_queue:
1318 shared_queue_destroy(&compressed_res_queue);
1319 out_destroy_res_to_compress_queue:
1320 shared_queue_destroy(&res_to_compress_queue);
1321 if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
1324 WARNING("Falling back to single-threaded compression");
1325 return write_stream_list_serial(stream_list,
1337 * Write a list of streams to a WIM (@out_fp) using the compression type
1338 * @out_ctype and up to @num_threads compressor threads.
1341 write_stream_list(struct list_head *stream_list,
1342 struct wim_lookup_table *lookup_table,
1343 FILE *out_fp, int out_ctype, int write_flags,
1344 unsigned num_threads, wimlib_progress_func_t progress_func)
1346 struct wim_lookup_table_entry *lte;
1347 size_t num_streams = 0;
1348 u64 total_bytes = 0;
1349 u64 total_compression_bytes = 0;
1350 union wimlib_progress_info progress;
1353 if (list_empty(stream_list))
1356 list_for_each_entry(lte, stream_list, write_streams_list) {
1358 total_bytes += wim_resource_size(lte);
1359 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
1360 && (wim_resource_compression_type(lte) != out_ctype ||
1361 (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)))
1363 total_compression_bytes += wim_resource_size(lte);
1366 progress.write_streams.total_bytes = total_bytes;
1367 progress.write_streams.total_streams = num_streams;
1368 progress.write_streams.completed_bytes = 0;
1369 progress.write_streams.completed_streams = 0;
1370 progress.write_streams.num_threads = num_threads;
1371 progress.write_streams.compression_type = out_ctype;
1372 progress.write_streams._private = 0;
1374 #ifdef ENABLE_MULTITHREADED_COMPRESSION
1375 if (total_compression_bytes >= 1000000 && num_threads != 1)
1376 ret = write_stream_list_parallel(stream_list,
1386 ret = write_stream_list_serial(stream_list,
1396 struct stream_size_table {
1397 struct hlist_head *array;
1403 init_stream_size_table(struct stream_size_table *tab, size_t capacity)
1405 tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1407 return WIMLIB_ERR_NOMEM;
1408 tab->num_entries = 0;
1409 tab->capacity = capacity;
1414 destroy_stream_size_table(struct stream_size_table *tab)
1420 stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
1422 struct stream_size_table *tab = _tab;
1424 struct wim_lookup_table_entry *hashed_lte;
1425 struct hlist_node *tmp;
1427 pos = hash_u64(wim_resource_size(lte)) % tab->capacity;
1428 lte->unique_size = 1;
1429 hlist_for_each_entry(hashed_lte, tmp, &tab->array[pos], hash_list_2) {
1430 if (wim_resource_size(hashed_lte) == wim_resource_size(lte)) {
1431 lte->unique_size = 0;
1432 hashed_lte->unique_size = 0;
1437 hlist_add_head(<e->hash_list_2, &tab->array[pos]);
1443 struct lte_overwrite_prepare_args {
1446 struct list_head stream_list;
1447 struct stream_size_table stream_size_tab;
1451 lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *arg)
1453 struct lte_overwrite_prepare_args *args = arg;
1455 if (lte->resource_location == RESOURCE_IN_WIM &&
1456 lte->wim == args->wim)
1458 /* We can't do an in place overwrite on the WIM if there are
1459 * streams after the XML data. */
1460 if (lte->resource_entry.offset +
1461 lte->resource_entry.size > args->end_offset)
1463 #ifdef ENABLE_ERROR_MESSAGES
1464 ERROR("The following resource is after the XML data:");
1465 print_lookup_table_entry(lte, stderr);
1467 return WIMLIB_ERR_RESOURCE_ORDER;
1470 wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA));
1471 list_add_tail(<e->write_streams_list, &args->stream_list);
1473 lte->out_refcnt = lte->refcnt;
1474 stream_size_table_insert(lte, &args->stream_size_tab);
1479 lte_set_output_res_entry(struct wim_lookup_table_entry *lte, void *_wim)
1481 if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == _wim) {
1482 copy_resource_entry(<e->output_resource_entry,
1483 <e->resource_entry);
1488 /* Given a WIM that we are going to overwrite in place with zero or more
1489 * additional streams added, construct a list the list of new unique streams
1490 * ('struct wim_lookup_table_entry's) that must be written, plus any unhashed
1491 * streams that need to be added but may be identical to other hashed or
1492 * unhashed streams. These unhashed streams are checksummed while the streams
1493 * are being written. To aid this process, the member @unique_size is set to 1
1494 * on streams that have a unique size and therefore must be written.
1496 * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
1497 * indicate the number of times the stream is referenced in only the streams
1498 * that are being written; this may still be adjusted later when unhashed
1499 * streams are being resolved.
1502 prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset,
1503 struct list_head *stream_list)
1506 struct lte_overwrite_prepare_args args;
1509 args.end_offset = end_offset;
1510 ret = init_stream_size_table(&args.stream_size_tab,
1511 wim->lookup_table->capacity);
1515 INIT_LIST_HEAD(&args.stream_list);
1516 for (int i = 0; i < wim->hdr.image_count; i++) {
1517 struct wim_image_metadata *imd;
1518 struct wim_lookup_table_entry *lte;
1520 imd = wim->image_metadata[i];
1521 image_for_each_unhashed_stream(lte, imd) {
1522 ret = lte_overwrite_prepare(lte, &args);
1524 goto out_destroy_stream_size_table;
1527 ret = for_lookup_table_entry(wim->lookup_table,
1528 lte_overwrite_prepare, &args);
1530 goto out_destroy_stream_size_table;
1532 for (int i = 0; i < wim->hdr.image_count; i++)
1533 lte_set_output_res_entry(wim->image_metadata[i]->metadata_lte,
1535 for_lookup_table_entry(wim->lookup_table, lte_set_output_res_entry, wim);
1536 INIT_LIST_HEAD(stream_list);
1537 list_splice(&args.stream_list, stream_list);
1538 out_destroy_stream_size_table:
1539 destroy_stream_size_table(&args.stream_size_tab);
1544 struct find_streams_ctx {
1545 struct list_head stream_list;
1546 struct stream_size_table stream_size_tab;
1550 inode_find_streams_to_write(struct wim_inode *inode,
1551 struct wim_lookup_table *table,
1552 struct list_head *stream_list,
1553 struct stream_size_table *tab)
1555 struct wim_lookup_table_entry *lte;
1556 for (unsigned i = 0; i <= inode->i_num_ads; i++) {
1557 lte = inode_stream_lte(inode, i, table);
1559 if (lte->out_refcnt == 0) {
1561 stream_size_table_insert(lte, tab);
1562 list_add_tail(<e->write_streams_list, stream_list);
1564 lte->out_refcnt += inode->i_nlink;
1570 image_find_streams_to_write(WIMStruct *w)
1572 struct wim_image_metadata *imd;
1573 struct find_streams_ctx *ctx;
1574 struct wim_inode *inode;
1575 struct wim_lookup_table_entry *lte;
1578 imd = wim_get_current_image_metadata(w);
1580 image_for_each_unhashed_stream(lte, imd) {
1581 lte->out_refcnt = 0;
1582 wimlib_assert(lte->unhashed);
1583 wimlib_assert(lte->back_ptr != NULL);
1586 /* Go through this image's inodes to find any streams that have not been
1588 image_for_each_inode(inode, imd) {
1589 inode_find_streams_to_write(inode, w->lookup_table,
1591 &ctx->stream_size_tab);
1596 /* Given a WIM that from which one or all of the images is being written, build
1597 * the list of unique streams ('struct wim_lookup_table_entry's) that must be
1598 * written, plus any unhashed streams that need to be written but may be
1599 * identical to other hashed or unhashed streams being written. These unhashed
1600 * streams are checksummed while the streams are being written. To aid this
1601 * process, the member @unique_size is set to 1 on streams that have a unique
1602 * size and therefore must be written.
1604 * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
1605 * indicate the number of times the stream is referenced in only the streams
1606 * that are being written; this may still be adjusted later when unhashed
1607 * streams are being resolved.
1610 prepare_stream_list(WIMStruct *wim, int image, struct list_head *stream_list)
1613 struct find_streams_ctx ctx;
1615 for_lookup_table_entry(wim->lookup_table, lte_zero_out_refcnt, NULL);
1616 ret = init_stream_size_table(&ctx.stream_size_tab,
1617 wim->lookup_table->capacity);
1620 for_lookup_table_entry(wim->lookup_table, stream_size_table_insert,
1621 &ctx.stream_size_tab);
1622 INIT_LIST_HEAD(&ctx.stream_list);
1623 wim->private = &ctx;
1624 ret = for_image(wim, image, image_find_streams_to_write);
1625 destroy_stream_size_table(&ctx.stream_size_tab);
1627 INIT_LIST_HEAD(stream_list);
1628 list_splice(&ctx.stream_list, stream_list);
1633 /* Writes the streams for the specified @image in @wim to @wim->out_fp.
1636 write_wim_streams(WIMStruct *wim, int image, int write_flags,
1637 unsigned num_threads,
1638 wimlib_progress_func_t progress_func)
1641 struct list_head stream_list;
1643 ret = prepare_stream_list(wim, image, &stream_list);
1646 return write_stream_list(&stream_list,
1649 wimlib_get_compression_type(wim),
1656 * Finish writing a WIM file: write the lookup table, xml data, and integrity
1657 * table (optional), then overwrite the WIM header.
1659 * write_flags is a bitwise OR of the following:
1661 * (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
1662 * Include an integrity table.
1664 * (public) WIMLIB_WRITE_FLAG_SHOW_PROGRESS:
1665 * Show progress information when (if) writing the integrity table.
1667 * (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE:
1668 * Don't write the lookup table.
1670 * (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE:
1671 * When (if) writing the integrity table, re-use entries from the
1672 * existing integrity table, if possible.
1674 * (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
1675 * After writing the XML data but before writing the integrity
1676 * table, write a temporary WIM header and flush the stream so that
1677 * the WIM is less likely to become corrupted upon abrupt program
1680 * (private) WIMLIB_WRITE_FLAG_FSYNC:
1681 * fsync() the output file before closing it.
1685 finish_write(WIMStruct *w, int image, int write_flags,
1686 wimlib_progress_func_t progress_func)
1689 struct wim_header hdr;
1690 FILE *out = w->out_fp;
1692 /* @hdr will be the header for the new WIM. First copy all the data
1693 * from the header in the WIMStruct; then set all the fields that may
1694 * have changed, including the resource entries, boot index, and image
1696 memcpy(&hdr, &w->hdr, sizeof(struct wim_header));
1698 if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
1699 ret = write_lookup_table(w, image, &hdr.lookup_table_res_entry);
1704 ret = write_xml_data(w->wim_info, image, out,
1705 (write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE) ?
1706 wim_info_get_total_bytes(w->wim_info) : 0,
1707 &hdr.xml_res_entry);
1711 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
1712 if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
1713 struct wim_header checkpoint_hdr;
1714 memcpy(&checkpoint_hdr, &hdr, sizeof(struct wim_header));
1715 memset(&checkpoint_hdr.integrity, 0, sizeof(struct resource_entry));
1716 if (fseeko(out, 0, SEEK_SET) != 0) {
1717 ERROR_WITH_ERRNO("Failed to seek to beginning "
1718 "of WIM being written");
1719 ret = WIMLIB_ERR_WRITE;
1722 ret = write_header(&checkpoint_hdr, out);
1726 if (fflush(out) != 0) {
1727 ERROR_WITH_ERRNO("Can't write data to WIM");
1728 ret = WIMLIB_ERR_WRITE;
1732 if (fseeko(out, 0, SEEK_END) != 0) {
1733 ERROR_WITH_ERRNO("Failed to seek to end "
1734 "of WIM being written");
1735 ret = WIMLIB_ERR_WRITE;
1740 off_t old_lookup_table_end;
1741 off_t new_lookup_table_end;
1742 if (write_flags & WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE) {
1743 old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
1744 w->hdr.lookup_table_res_entry.size;
1746 old_lookup_table_end = 0;
1748 new_lookup_table_end = hdr.lookup_table_res_entry.offset +
1749 hdr.lookup_table_res_entry.size;
1751 ret = write_integrity_table(out,
1753 new_lookup_table_end,
1754 old_lookup_table_end,
1759 memset(&hdr.integrity, 0, sizeof(struct resource_entry));
1763 * In the WIM header, there is room for the resource entry for a
1764 * metadata resource labeled as the "boot metadata". This entry should
1765 * be zeroed out if there is no bootable image (boot_idx 0). Otherwise,
1766 * it should be a copy of the resource entry for the image that is
1767 * marked as bootable. This is not well documented...
1770 /* Set image count and boot index correctly for single image writes */
1771 if (image != WIMLIB_ALL_IMAGES) {
1772 hdr.image_count = 1;
1773 if (hdr.boot_idx == image)
1779 if (hdr.boot_idx == 0) {
1780 memset(&hdr.boot_metadata_res_entry, 0,
1781 sizeof(struct resource_entry));
1783 memcpy(&hdr.boot_metadata_res_entry,
1785 hdr.boot_idx - 1]->metadata_lte->output_resource_entry,
1786 sizeof(struct resource_entry));
1789 if (fseeko(out, 0, SEEK_SET) != 0) {
1790 ERROR_WITH_ERRNO("Failed to seek to beginning of WIM "
1792 ret = WIMLIB_ERR_WRITE;
1796 ret = write_header(&hdr, out);
1800 if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
1801 if (fflush(out) != 0
1802 || fsync(fileno(out)) != 0)
1804 ERROR_WITH_ERRNO("Error flushing data to WIM file");
1805 ret = WIMLIB_ERR_WRITE;
1809 if (fclose(out) != 0) {
1810 ERROR_WITH_ERRNO("Failed to close the WIM file");
1812 ret = WIMLIB_ERR_WRITE;
1818 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
1820 lock_wim(WIMStruct *w, FILE *fp)
1823 if (fp && !w->wim_locked) {
1824 ret = flock(fileno(fp), LOCK_EX | LOCK_NB);
1826 if (errno == EWOULDBLOCK) {
1827 ERROR("`%"TS"' is already being modified or has been "
1828 "mounted read-write\n"
1829 " by another process!", w->filename);
1830 ret = WIMLIB_ERR_ALREADY_LOCKED;
1832 WARNING_WITH_ERRNO("Failed to lock `%"TS"'",
1845 open_wim_writable(WIMStruct *w, const tchar *path,
1846 bool trunc, bool also_readable)
1857 wimlib_assert(w->out_fp == NULL);
1858 w->out_fp = tfopen(path, mode);
1862 ERROR_WITH_ERRNO("Failed to open `%"TS"' for writing", path);
1863 return WIMLIB_ERR_OPEN;
1869 close_wim_writable(WIMStruct *w)
1872 if (fclose(w->out_fp) != 0) {
1873 WARNING_WITH_ERRNO("Failed to close output WIM");
1879 /* Open file stream and write dummy header for WIM. */
1881 begin_write(WIMStruct *w, const tchar *path, int write_flags)
1884 ret = open_wim_writable(w, path, true,
1885 (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) != 0);
1888 /* Write dummy header. It will be overwritten later. */
1889 return write_header(&w->hdr, w->out_fp);
1892 /* Writes a stand-alone WIM to a file. */
1894 wimlib_write(WIMStruct *w, const tchar *path,
1895 int image, int write_flags, unsigned num_threads,
1896 wimlib_progress_func_t progress_func)
1901 return WIMLIB_ERR_INVALID_PARAM;
1903 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
1905 if (image != WIMLIB_ALL_IMAGES &&
1906 (image < 1 || image > w->hdr.image_count))
1907 return WIMLIB_ERR_INVALID_IMAGE;
1909 if (w->hdr.total_parts != 1) {
1910 ERROR("Cannot call wimlib_write() on part of a split WIM");
1911 return WIMLIB_ERR_SPLIT_UNSUPPORTED;
1914 ret = begin_write(w, path, write_flags);
1918 ret = write_wim_streams(w, image, write_flags, num_threads,
1924 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL);
1926 ret = for_image(w, image, write_metadata_resource);
1931 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL);
1933 ret = finish_write(w, image, write_flags, progress_func);
1935 close_wim_writable(w);
1936 DEBUG("wimlib_write(path=%"TS") = %d", path, ret);
1941 any_images_modified(WIMStruct *w)
1943 for (int i = 0; i < w->hdr.image_count; i++)
1944 if (w->image_metadata[i]->modified)
1950 * Overwrite a WIM, possibly appending streams to it.
1952 * A WIM looks like (or is supposed to look like) the following:
1954 * Header (212 bytes)
1955 * Streams and metadata resources (variable size)
1956 * Lookup table (variable size)
1957 * XML data (variable size)
1958 * Integrity table (optional) (variable size)
1960 * If we are not adding any streams or metadata resources, the lookup table is
1961 * unchanged--- so we only need to overwrite the XML data, integrity table, and
1962 * header. This operation is potentially unsafe if the program is abruptly
1963 * terminated while the XML data or integrity table are being overwritten, but
1964 * before the new header has been written. To partially alleviate this problem,
1965 * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to
1966 * finish_write() to cause a temporary WIM header to be written after the XML
1967 * data has been written. This may prevent the WIM from becoming corrupted if
1968 * the program is terminated while the integrity table is being calculated (but
1969 * no guarantees, due to write re-ordering...).
1971 * If we are adding new streams or images (metadata resources), the lookup table
1972 * needs to be changed, and those streams need to be written. In this case, we
1973 * try to perform a safe update of the WIM file by writing the streams *after*
1974 * the end of the previous WIM, then writing the new lookup table, XML data, and
1975 * (optionally) integrity table following the new streams. This will produce a
1976 * layout like the following:
1978 * Header (212 bytes)
1979 * (OLD) Streams and metadata resources (variable size)
1980 * (OLD) Lookup table (variable size)
1981 * (OLD) XML data (variable size)
1982 * (OLD) Integrity table (optional) (variable size)
1983 * (NEW) Streams and metadata resources (variable size)
1984 * (NEW) Lookup table (variable size)
1985 * (NEW) XML data (variable size)
1986 * (NEW) Integrity table (optional) (variable size)
1988 * At all points, the WIM is valid as nothing points to the new data yet. Then,
1989 * the header is overwritten to point to the new lookup table, XML data, and
1990 * integrity table, to produce the following layout:
1992 * Header (212 bytes)
1993 * Streams and metadata resources (variable size)
1994 * Nothing (variable size)
1995 * More Streams and metadata resources (variable size)
1996 * Lookup table (variable size)
1997 * XML data (variable size)
1998 * Integrity table (optional) (variable size)
2000 * This method allows an image to be appended to a large WIM very quickly, and
2001 * is is crash-safe except in the case of write re-ordering, but the
2002 * disadvantage is that a small hole is left in the WIM where the old lookup
2003 * table, xml data, and integrity table were. (These usually only take up a
2004 * small amount of space compared to the streams, however.)
2007 overwrite_wim_inplace(WIMStruct *w, int write_flags,
2008 unsigned num_threads,
2009 wimlib_progress_func_t progress_func)
2012 struct list_head stream_list;
2015 DEBUG("Overwriting `%"TS"' in-place", w->filename);
2017 /* Make sure that the integrity table (if present) is after the XML
2018 * data, and that there are no stream resources, metadata resources, or
2019 * lookup tables after the XML data. Otherwise, these data would be
2021 if (w->hdr.integrity.offset != 0 &&
2022 w->hdr.integrity.offset < w->hdr.xml_res_entry.offset) {
2023 ERROR("Didn't expect the integrity table to be before the XML data");
2024 return WIMLIB_ERR_RESOURCE_ORDER;
2027 if (w->hdr.lookup_table_res_entry.offset > w->hdr.xml_res_entry.offset) {
2028 ERROR("Didn't expect the lookup table to be after the XML data");
2029 return WIMLIB_ERR_RESOURCE_ORDER;
2033 if (w->hdr.integrity.offset)
2034 old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size;
2036 old_wim_end = w->hdr.xml_res_entry.offset + w->hdr.xml_res_entry.size;
2038 if (!w->deletion_occurred && !any_images_modified(w)) {
2039 /* If no images have been modified and no images have been
2040 * deleted, a new lookup table does not need to be written. */
2041 old_wim_end = w->hdr.lookup_table_res_entry.offset +
2042 w->hdr.lookup_table_res_entry.size;
2043 write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE |
2044 WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
2046 ret = prepare_streams_for_overwrite(w, old_wim_end, &stream_list);
2050 ret = open_wim_writable(w, w->filename, false,
2051 (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) != 0);
2055 ret = lock_wim(w, w->out_fp);
2062 if (fseeko(w->out_fp, old_wim_end, SEEK_SET) != 0) {
2063 ERROR_WITH_ERRNO("Can't seek to end of WIM");
2067 return WIMLIB_ERR_WRITE;
2070 DEBUG("Writing newly added streams (offset = %"PRIu64")",
2072 ret = write_stream_list(&stream_list,
2075 wimlib_get_compression_type(w),
2082 for (int i = 0; i < w->hdr.image_count; i++) {
2083 if (w->image_metadata[i]->modified) {
2084 select_wim_image(w, i + 1);
2085 ret = write_metadata_resource(w);
2090 write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE;
2091 ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags,
2094 close_wim_writable(w);
2095 if (ret != 0 && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
2096 WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",
2097 w->filename, old_wim_end);
2098 /* Return value of truncate() is ignored because this is already
2100 (void)ttruncate(w->filename, old_wim_end);
2107 overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags,
2108 unsigned num_threads,
2109 wimlib_progress_func_t progress_func)
2111 size_t wim_name_len;
2114 DEBUG("Overwriting `%"TS"' via a temporary file", w->filename);
2116 /* Write the WIM to a temporary file in the same directory as the
2118 wim_name_len = tstrlen(w->filename);
2119 tchar tmpfile[wim_name_len + 10];
2120 tmemcpy(tmpfile, w->filename, wim_name_len);
2121 randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
2122 tmpfile[wim_name_len + 9] = T('\0');
2124 ret = wimlib_write(w, tmpfile, WIMLIB_ALL_IMAGES,
2125 write_flags | WIMLIB_WRITE_FLAG_FSYNC,
2126 num_threads, progress_func);
2128 ERROR("Failed to write the WIM file `%"TS"'", tmpfile);
2132 DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, w->filename);
2135 /* Windows won't let you delete open files unless FILE_SHARE_DELETE was
2136 * specified to CreateFile(). The WIM was opened with fopen(), which
2137 * didn't provided this flag to CreateFile, so the handle must be closed
2138 * before executing the rename(). */
2139 if (w->fp != NULL) {
2145 /* Rename the new file to the old file .*/
2146 if (trename(tmpfile, w->filename) != 0) {
2147 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
2148 tmpfile, w->filename);
2149 ret = WIMLIB_ERR_RENAME;
2153 if (progress_func) {
2154 union wimlib_progress_info progress;
2155 progress.rename.from = tmpfile;
2156 progress.rename.to = w->filename;
2157 progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress);
2160 /* Close the original WIM file that was opened for reading. */
2161 if (w->fp != NULL) {
2166 /* Re-open the WIM read-only. */
2167 w->fp = tfopen(w->filename, T("rb"));
2168 if (w->fp == NULL) {
2169 ret = WIMLIB_ERR_REOPEN;
2170 WARNING_WITH_ERRNO("Failed to re-open `%"TS"' read-only",
2177 /* Remove temporary file. */
2178 if (tunlink(tmpfile) != 0)
2179 WARNING_WITH_ERRNO("Failed to remove `%"TS"'", tmpfile);
2184 * Writes a WIM file to the original file that it was read from, overwriting it.
2187 wimlib_overwrite(WIMStruct *w, int write_flags,
2188 unsigned num_threads,
2189 wimlib_progress_func_t progress_func)
2191 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
2194 return WIMLIB_ERR_NO_FILENAME;
2196 if (w->hdr.total_parts != 1) {
2197 ERROR("Cannot modify a split WIM");
2198 return WIMLIB_ERR_SPLIT_UNSUPPORTED;
2201 if ((!w->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
2202 && !(write_flags & WIMLIB_WRITE_FLAG_REBUILD))
2205 ret = overwrite_wim_inplace(w, write_flags, num_threads,
2207 if (ret == WIMLIB_ERR_RESOURCE_ORDER)
2208 WARNING("Falling back to re-building entire WIM");
2212 return overwrite_wim_via_tmpfile(w, write_flags, num_threads,