4 * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5 * compressed file resources, etc.
9 * Copyright (C) 2010 Carl Thijssen
10 * Copyright (C) 2012 Eric Biggers
12 * This file is part of wimlib, a library for working with WIM files.
14 * wimlib is free software; you can redistribute it and/or modify it under the
15 * terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 3 of the License, or (at your option)
19 * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
20 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
21 * A PARTICULAR PURPOSE. See the GNU General Public License for more
24 * You should have received a copy of the GNU General Public License
25 * along with wimlib; if not, see http://www.gnu.org/licenses/.
30 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
31 /* On BSD, this should be included before "list.h" so that "list.h" can
32 * overwrite the LIST_HEAD macro. */
37 #include "wimlib_internal.h"
40 #include "lookup_table.h"
45 #ifdef ENABLE_MULTITHREADED_COMPRESSION
54 #include <ntfs-3g/attrib.h>
55 #include <ntfs-3g/inode.h>
56 #include <ntfs-3g/dir.h>
66 static int do_fflush(FILE *fp)
70 ERROR_WITH_ERRNO("Failed to flush data to output WIM file");
71 return WIMLIB_ERR_WRITE;
76 static int fflush_and_ftruncate(FILE *fp, off_t size)
83 ret = ftruncate(fileno(fp), size);
85 ERROR_WITH_ERRNO("Failed to truncate output WIM file to "
86 "%"PRIu64" bytes", size);
87 return WIMLIB_ERR_WRITE;
92 /* Chunk table that's located at the beginning of each compressed resource in
93 * the WIM. (This is not the on-disk format; the on-disk format just has an
94 * array of offsets.) */
98 u64 original_resource_size;
99 u64 bytes_per_chunk_entry;
107 * Allocates and initializes a chunk table, and reserves space for it in the
111 begin_wim_resource_chunk_tab(const struct lookup_table_entry *lte,
114 struct chunk_table **chunk_tab_ret)
116 u64 size = wim_resource_size(lte);
117 u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE;
118 size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
119 struct chunk_table *chunk_tab = CALLOC(1, alloc_size);
123 ERROR("Failed to allocate chunk table for %"PRIu64" byte "
125 ret = WIMLIB_ERR_NOMEM;
128 chunk_tab->file_offset = file_offset;
129 chunk_tab->num_chunks = num_chunks;
130 chunk_tab->original_resource_size = size;
131 chunk_tab->bytes_per_chunk_entry = (size >= (1ULL << 32)) ? 8 : 4;
132 chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
134 chunk_tab->cur_offset = 0;
135 chunk_tab->cur_offset_p = chunk_tab->offsets;
137 if (fwrite(chunk_tab, 1, chunk_tab->table_disk_size, out_fp) !=
138 chunk_tab->table_disk_size) {
139 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
141 ret = WIMLIB_ERR_WRITE;
147 *chunk_tab_ret = chunk_tab;
152 * Pointer to function to compresses a chunk of a WIM resource.
154 * @chunk: Uncompressed data of the chunk.
155 * @chunk_size: Size of the uncompressed chunk in bytes.
156 * @compressed_chunk: Pointer to output buffer of size at least
157 * (@chunk_size - 1) bytes.
158 * @compressed_chunk_len_ret: Pointer to an unsigned int into which the size
159 * of the compressed chunk will be
162 * Returns zero if compressed succeeded, and nonzero if the chunk could not be
163 * compressed to any smaller than @chunk_size. This function cannot fail for
166 typedef int (*compress_func_t)(const void *, unsigned, void *, unsigned *);
168 compress_func_t get_compress_func(int out_ctype)
170 if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
173 return xpress_compress;
177 * Writes a chunk of a WIM resource to an output file.
179 * @chunk: Uncompressed data of the chunk.
180 * @chunk_size: Size of the chunk (<= WIM_CHUNK_SIZE)
181 * @out_fp: FILE * to write tho chunk to.
182 * @out_ctype: Compression type to use when writing the chunk (ignored if no
183 * chunk table provided)
184 * @chunk_tab: Pointer to chunk table being created. It is updated with the
185 * offset of the chunk we write.
187 * Returns 0 on success; nonzero on failure.
189 static int write_wim_resource_chunk(const u8 chunk[], unsigned chunk_size,
190 FILE *out_fp, compress_func_t compress,
191 struct chunk_table *chunk_tab)
194 unsigned out_chunk_size;
196 u8 *compressed_chunk = alloca(chunk_size);
199 ret = compress(chunk, chunk_size, compressed_chunk,
202 out_chunk = compressed_chunk;
205 out_chunk_size = chunk_size;
207 *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
208 chunk_tab->cur_offset += out_chunk_size;
211 out_chunk_size = chunk_size;
213 if (fwrite(out_chunk, 1, out_chunk_size, out_fp) != out_chunk_size) {
214 ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
215 return WIMLIB_ERR_WRITE;
221 * Finishes a WIM chunk tale and writes it to the output file at the correct
224 * The final size of the full compressed resource is returned in the
225 * @compressed_size_p.
228 finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
229 FILE *out_fp, u64 *compressed_size_p)
231 size_t bytes_written;
232 if (fseeko(out_fp, chunk_tab->file_offset, SEEK_SET) != 0) {
233 ERROR_WITH_ERRNO("Failed to seek to byte %"PRIu64" of output "
234 "WIM file", chunk_tab->file_offset);
235 return WIMLIB_ERR_WRITE;
238 if (chunk_tab->bytes_per_chunk_entry == 8) {
239 array_cpu_to_le64(chunk_tab->offsets, chunk_tab->num_chunks);
241 for (u64 i = 0; i < chunk_tab->num_chunks; i++)
242 ((u32*)chunk_tab->offsets)[i] =
243 cpu_to_le32(chunk_tab->offsets[i]);
245 bytes_written = fwrite((u8*)chunk_tab->offsets +
246 chunk_tab->bytes_per_chunk_entry,
247 1, chunk_tab->table_disk_size, out_fp);
248 if (bytes_written != chunk_tab->table_disk_size) {
249 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
251 return WIMLIB_ERR_WRITE;
253 if (fseeko(out_fp, 0, SEEK_END) != 0) {
254 ERROR_WITH_ERRNO("Failed to seek to end of output WIM file");
255 return WIMLIB_ERR_WRITE;
257 *compressed_size_p = chunk_tab->cur_offset + chunk_tab->table_disk_size;
261 /* Prepare for multiple reads to a resource by caching a FILE * or NTFS
262 * attribute pointer in the lookup table entry. */
263 static int prepare_resource_for_read(struct lookup_table_entry *lte
266 , ntfs_inode **ni_ret
270 if (lte->resource_location == RESOURCE_IN_FILE_ON_DISK
271 && !lte->file_on_disk_fp)
273 wimlib_assert(lte->file_on_disk);
274 lte->file_on_disk_fp = fopen(lte->file_on_disk, "rb");
275 if (!lte->file_on_disk_fp) {
276 ERROR_WITH_ERRNO("Failed to open the file `%s' for "
277 "reading", lte->file_on_disk);
278 return WIMLIB_ERR_OPEN;
282 else if (lte->resource_location == RESOURCE_IN_NTFS_VOLUME
285 struct ntfs_location *loc = lte->ntfs_loc;
288 ni = ntfs_pathname_to_inode(*loc->ntfs_vol_p, NULL, loc->path_utf8);
290 ERROR_WITH_ERRNO("Failed to open inode `%s' in NTFS "
291 "volume", loc->path_utf8);
292 return WIMLIB_ERR_NTFS_3G;
294 lte->attr = ntfs_attr_open(ni,
295 loc->is_reparse_point ? AT_REPARSE_POINT : AT_DATA,
296 (ntfschar*)loc->stream_name_utf16,
297 loc->stream_name_utf16_num_chars);
299 ERROR_WITH_ERRNO("Failed to open attribute of `%s' in "
300 "NTFS volume", loc->path_utf8);
301 ntfs_inode_close(ni);
302 return WIMLIB_ERR_NTFS_3G;
310 /* Undo prepare_resource_for_read() by closing the cached FILE * or NTFS
312 static void end_wim_resource_read(struct lookup_table_entry *lte
318 if (lte->resource_location == RESOURCE_IN_FILE_ON_DISK
319 && lte->file_on_disk_fp) {
320 fclose(lte->file_on_disk_fp);
321 lte->file_on_disk_fp = NULL;
324 else if (lte->resource_location == RESOURCE_IN_NTFS_VOLUME) {
326 ntfs_attr_close(lte->attr);
330 ntfs_inode_close(ni);
336 * Writes a WIM resource to a FILE * opened for writing. The resource may be
337 * written uncompressed or compressed depending on the @out_ctype parameter.
339 * If by chance the resource compresses to more than the original size (this may
340 * happen with random data or files than are pre-compressed), the resource is
341 * instead written uncompressed (and this is reflected in the @out_res_entry by
342 * removing the WIM_RESHDR_FLAG_COMPRESSED flag).
344 * @lte: The lookup table entry for the WIM resource.
345 * @out_fp: The FILE * to write the resource to.
346 * @out_ctype: The compression type of the resource to write. Note: if this is
347 * the same as the compression type of the WIM resource we
348 * need to read, we simply copy the data (i.e. we do not
349 * uncompress it, then compress it again).
350 * @out_res_entry: If non-NULL, a resource entry that is filled in with the
351 * offset, original size, compressed size, and compression flag
352 * of the output resource.
354 * Returns 0 on success; nonzero on failure.
356 int write_wim_resource(struct lookup_table_entry *lte,
357 FILE *out_fp, int out_ctype,
358 struct resource_entry *out_res_entry,
363 u64 old_compressed_size;
364 u64 new_compressed_size;
367 struct chunk_table *chunk_tab = NULL;
370 compress_func_t compress = NULL;
372 ntfs_inode *ni = NULL;
377 /* Original size of the resource */
378 original_size = wim_resource_size(lte);
380 /* Compressed size of the resource (as it exists now) */
381 old_compressed_size = wim_resource_compressed_size(lte);
383 /* Current offset in output file */
384 file_offset = ftello(out_fp);
385 if (file_offset == -1) {
386 ERROR_WITH_ERRNO("Failed to get offset in output "
388 return WIMLIB_ERR_WRITE;
391 /* Are the compression types the same? If so, do a raw copy (copy
392 * without decompressing and recompressing the data). */
393 raw = (wim_resource_compression_type(lte) == out_ctype
394 && out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
395 && !(flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS));
398 flags |= WIMLIB_RESOURCE_FLAG_RAW;
399 bytes_remaining = old_compressed_size;
401 flags &= ~WIMLIB_RESOURCE_FLAG_RAW;
402 bytes_remaining = original_size;
405 /* Empty resource; nothing needs to be done, so just return success. */
406 if (bytes_remaining == 0)
409 /* Buffer for reading chunks for the resource */
410 u8 buf[min(WIM_CHUNK_SIZE, bytes_remaining)];
412 /* If we are writing a compressed resource and not doing a raw copy, we
413 * need to initialize the chunk table */
414 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE && !raw) {
415 ret = begin_wim_resource_chunk_tab(lte, out_fp, file_offset,
421 /* If the WIM resource is in an external file, open a FILE * to it so we
422 * don't have to open a temporary one in read_wim_resource() for each
425 ret = prepare_resource_for_read(lte, &ni);
427 ret = prepare_resource_for_read(lte);
432 /* If we aren't doing a raw copy, we will compute the SHA1 message
433 * digest of the resource as we read it, and verify it's the same as the
434 * hash given in the lookup table entry once we've finished reading the
439 compress = get_compress_func(out_ctype);
443 /* While there are still bytes remaining in the WIM resource, read a
444 * chunk of the resource, update SHA1, then write that chunk using the
445 * desired compression type. */
447 u64 to_read = min(bytes_remaining, WIM_CHUNK_SIZE);
448 ret = read_wim_resource(lte, buf, to_read, offset, flags);
452 sha1_update(&ctx, buf, to_read);
453 ret = write_wim_resource_chunk(buf, to_read, out_fp,
454 compress, chunk_tab);
457 bytes_remaining -= to_read;
459 } while (bytes_remaining);
461 /* Raw copy: The new compressed size is the same as the old compressed
464 * Using WIMLIB_COMPRESSION_TYPE_NONE: The new compressed size is the
467 * Using a different compression type: Call
468 * finish_wim_resource_chunk_tab() and it will provide the new
472 new_compressed_size = old_compressed_size;
474 if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
475 new_compressed_size = original_size;
477 ret = finish_wim_resource_chunk_tab(chunk_tab, out_fp,
478 &new_compressed_size);
484 /* Verify SHA1 message digest of the resource, unless we are doing a raw
485 * write (in which case we never even saw the uncompressed data). Or,
486 * if the hash we had before is all 0's, just re-set it to be the new
489 u8 md[SHA1_HASH_SIZE];
490 sha1_final(md, &ctx);
491 if (is_zero_hash(lte->hash)) {
492 copy_hash(lte->hash, md);
493 } else if (!hashes_equal(md, lte->hash)) {
494 ERROR("WIM resource has incorrect hash!");
495 if (lte->resource_location == RESOURCE_IN_FILE_ON_DISK) {
496 ERROR("We were reading it from `%s'; maybe it changed "
497 "while we were reading it.",
500 ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
505 if (!raw && new_compressed_size >= original_size &&
506 out_ctype != WIMLIB_COMPRESSION_TYPE_NONE)
508 /* Oops! We compressed the resource to larger than the original
509 * size. Write the resource uncompressed instead. */
510 if (fseeko(out_fp, file_offset, SEEK_SET) != 0) {
511 ERROR_WITH_ERRNO("Failed to seek to byte %"PRIu64" "
512 "of output WIM file", file_offset);
513 ret = WIMLIB_ERR_WRITE;
516 ret = write_wim_resource(lte, out_fp, WIMLIB_COMPRESSION_TYPE_NONE,
517 out_res_entry, flags);
521 ret = fflush_and_ftruncate(out_fp, file_offset + out_res_entry->size);
526 out_res_entry->size = new_compressed_size;
527 out_res_entry->original_size = original_size;
528 out_res_entry->offset = file_offset;
529 out_res_entry->flags = lte->resource_entry.flags
530 & ~WIM_RESHDR_FLAG_COMPRESSED;
531 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE)
532 out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
538 end_wim_resource_read(lte, ni);
540 end_wim_resource_read(lte);
547 #ifdef ENABLE_MULTITHREADED_COMPRESSION
548 struct shared_queue {
552 unsigned filled_slots;
554 pthread_mutex_t lock;
555 pthread_cond_t msg_avail_cond;
556 pthread_cond_t space_avail_cond;
559 static int shared_queue_init(struct shared_queue *q, unsigned size)
561 q->array = CALLOC(sizeof(q->array[0]), size);
563 return WIMLIB_ERR_NOMEM;
568 pthread_mutex_init(&q->lock, NULL);
569 pthread_cond_init(&q->msg_avail_cond, NULL);
570 pthread_cond_init(&q->space_avail_cond, NULL);
574 static void shared_queue_destroy(struct shared_queue *q)
577 pthread_mutex_destroy(&q->lock);
578 pthread_cond_destroy(&q->msg_avail_cond);
579 pthread_cond_destroy(&q->space_avail_cond);
582 static void shared_queue_put(struct shared_queue *q, void *obj)
584 pthread_mutex_lock(&q->lock);
585 while (q->filled_slots == q->size)
586 pthread_cond_wait(&q->space_avail_cond, &q->lock);
588 q->back = (q->back + 1) % q->size;
589 q->array[q->back] = obj;
592 pthread_cond_broadcast(&q->msg_avail_cond);
593 pthread_mutex_unlock(&q->lock);
596 static void *shared_queue_get(struct shared_queue *q)
600 pthread_mutex_lock(&q->lock);
601 while (q->filled_slots == 0)
602 pthread_cond_wait(&q->msg_avail_cond, &q->lock);
604 obj = q->array[q->front];
605 q->array[q->front] = NULL;
606 q->front = (q->front + 1) % q->size;
609 pthread_cond_broadcast(&q->space_avail_cond);
610 pthread_mutex_unlock(&q->lock);
614 struct compressor_thread_params {
615 struct shared_queue *res_to_compress_queue;
616 struct shared_queue *compressed_res_queue;
617 compress_func_t compress;
620 #define MAX_CHUNKS_PER_MSG 2
623 struct lookup_table_entry *lte;
624 u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
625 u8 *out_compressed_chunks[MAX_CHUNKS_PER_MSG];
626 u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
627 unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
628 unsigned compressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
630 struct list_head list;
635 static void compress_chunks(struct message *msg, compress_func_t compress)
637 for (unsigned i = 0; i < msg->num_chunks; i++) {
638 DEBUG2("compress chunk %u of %u", i, msg->num_chunks);
639 int ret = compress(msg->uncompressed_chunks[i],
640 msg->uncompressed_chunk_sizes[i],
641 msg->compressed_chunks[i],
642 &msg->compressed_chunk_sizes[i]);
644 msg->out_compressed_chunks[i] = msg->compressed_chunks[i];
646 msg->out_compressed_chunks[i] = msg->uncompressed_chunks[i];
647 msg->compressed_chunk_sizes[i] = msg->uncompressed_chunk_sizes[i];
652 static void *compressor_thread_proc(void *arg)
654 struct compressor_thread_params *params = arg;
655 struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
656 struct shared_queue *compressed_res_queue = params->compressed_res_queue;
657 compress_func_t compress = params->compress;
660 DEBUG("Compressor thread ready");
661 while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
662 compress_chunks(msg, compress);
663 shared_queue_put(compressed_res_queue, msg);
665 DEBUG("Compressor thread terminating");
670 static int do_write_stream_list(struct list_head *my_resources,
673 wimlib_progress_func_t progress_func,
674 union wimlib_progress_info *progress,
675 int write_resource_flags)
678 struct lookup_table_entry *lte, *tmp;
680 list_for_each_entry_safe(lte, tmp, my_resources, staging_list) {
681 ret = write_wim_resource(lte,
684 <e->output_resource_entry,
685 write_resource_flags);
688 list_del(<e->staging_list);
689 progress->write_streams.completed_bytes +=
690 wim_resource_size(lte);
691 progress->write_streams.completed_streams++;
693 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
700 static int write_stream_list_serial(struct list_head *stream_list,
704 wimlib_progress_func_t progress_func,
705 union wimlib_progress_info *progress)
707 int write_resource_flags;
709 if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
710 write_resource_flags = WIMLIB_RESOURCE_FLAG_RECOMPRESS;
712 write_resource_flags = 0;
713 progress->write_streams.num_threads = 1;
715 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
716 return do_write_stream_list(stream_list, out_fp,
717 out_ctype, progress_func,
718 progress, write_resource_flags);
721 #ifdef ENABLE_MULTITHREADED_COMPRESSION
722 static int write_wim_chunks(struct message *msg, FILE *out_fp,
723 struct chunk_table *chunk_tab)
725 for (unsigned i = 0; i < msg->num_chunks; i++) {
726 unsigned chunk_csize = msg->compressed_chunk_sizes[i];
728 DEBUG2("Write wim chunk %u of %u (csize = %u)",
729 i, msg->num_chunks, chunk_csize);
731 if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp)
734 ERROR_WITH_ERRNO("Failed to write WIM chunk");
735 return WIMLIB_ERR_WRITE;
738 *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
739 chunk_tab->cur_offset += chunk_csize;
745 * This function is executed by the main thread when the resources are being
746 * compressed in parallel. The main thread is in change of all reading of the
747 * uncompressed data and writing of the compressed data. The compressor threads
748 * *only* do compression from/to in-memory buffers.
750 * Each unit of work given to a compressor thread is up to MAX_CHUNKS_PER_MSG
751 * chunks of compressed data to compress, represented in a `struct message'.
752 * Each message is passed from the main thread to a worker thread through the
753 * res_to_compress_queue, and it is passed back through the
754 * compressed_res_queue.
756 static int main_writer_thread_proc(struct list_head *stream_list,
759 struct shared_queue *res_to_compress_queue,
760 struct shared_queue *compressed_res_queue,
763 wimlib_progress_func_t progress_func,
764 union wimlib_progress_info *progress)
768 struct message msgs[queue_size];
771 // Initially, all the messages are available to use.
772 LIST_HEAD(available_msgs);
773 for (size_t i = 0; i < ARRAY_LEN(msgs); i++)
774 list_add(&msgs[i].list, &available_msgs);
776 // outstanding_resources is the list of resources that currently have
777 // had chunks sent off for compression.
779 // The first stream in outstanding_resources is the stream that is
780 // currently being written (cur_lte).
782 // The last stream in outstanding_resources is the stream that is
783 // currently being read and chunks fed to the compressor threads
786 // Depending on the number of threads and the sizes of the resource,
787 // the outstanding streams list may contain streams between cur_lte and
788 // next_lte that have all their chunks compressed or being compressed,
789 // but haven't been written yet.
791 LIST_HEAD(outstanding_resources);
792 struct list_head *next_resource = stream_list->next;
793 struct lookup_table_entry *next_lte = container_of(next_resource,
794 struct lookup_table_entry,
796 next_resource = next_resource->next;
798 u64 next_num_chunks = wim_resource_chunks(next_lte);
799 INIT_LIST_HEAD(&next_lte->msg_list);
800 list_add_tail(&next_lte->staging_list, &outstanding_resources);
802 // As in write_wim_resource(), each resource we read is checksummed.
803 SHA_CTX next_sha_ctx;
804 sha1_init(&next_sha_ctx);
805 u8 next_hash[SHA1_HASH_SIZE];
807 // Resources that don't need any chunks compressed are added to this
808 // list and written directly by the main thread.
809 LIST_HEAD(my_resources);
811 struct lookup_table_entry *cur_lte = next_lte;
812 struct chunk_table *cur_chunk_tab = NULL;
816 ntfs_inode *ni = NULL;
820 ret = prepare_resource_for_read(next_lte, &ni);
822 ret = prepare_resource_for_read(next_lte);
827 DEBUG("Initializing buffers for uncompressed "
828 "and compressed data (%zu bytes needed)",
829 queue_size * MAX_CHUNKS_PER_MSG * WIM_CHUNK_SIZE * 2);
831 // Pre-allocate all the buffers that will be needed to do the chunk
833 for (size_t i = 0; i < ARRAY_LEN(msgs); i++) {
834 for (size_t j = 0; j < MAX_CHUNKS_PER_MSG; j++) {
835 msgs[i].compressed_chunks[j] = MALLOC(WIM_CHUNK_SIZE);
836 msgs[i].uncompressed_chunks[j] = MALLOC(WIM_CHUNK_SIZE);
837 if (msgs[i].compressed_chunks[j] == NULL ||
838 msgs[i].uncompressed_chunks[j] == NULL)
840 ERROR("Could not allocate enough memory for "
841 "multi-threaded compression");
842 ret = WIMLIB_ERR_NOMEM;
848 // This loop is executed until all resources have been written, except
849 // possibly a few that have been added to the @my_resources list for
852 // Send chunks to the compressor threads until either (a) there
853 // are no more messages available since they were all sent off,
854 // or (b) there are no more resources that need to be
856 while (!list_empty(&available_msgs) && next_lte != NULL) {
858 // Get a message from the available messages
860 msg = container_of(available_msgs.next,
864 // ... and delete it from the available messages
866 list_del(&msg->list);
868 // Initialize the message with the chunks to
870 msg->num_chunks = min(next_num_chunks - next_chunk,
873 msg->complete = false;
874 msg->begin_chunk = next_chunk;
876 unsigned size = WIM_CHUNK_SIZE;
877 for (unsigned i = 0; i < msg->num_chunks; i++) {
879 // Read chunk @next_chunk of the stream into the
880 // message so that a compressor thread can
883 if (next_chunk == next_num_chunks - 1 &&
884 wim_resource_size(next_lte) % WIM_CHUNK_SIZE != 0)
886 size = wim_resource_size(next_lte) % WIM_CHUNK_SIZE;
890 DEBUG2("Read resource (size=%u, offset=%zu)",
891 size, next_chunk * WIM_CHUNK_SIZE);
893 msg->uncompressed_chunk_sizes[i] = size;
895 ret = read_wim_resource(next_lte,
896 msg->uncompressed_chunks[i],
898 next_chunk * WIM_CHUNK_SIZE,
902 sha1_update(&next_sha_ctx,
903 msg->uncompressed_chunks[i], size);
907 // Send the compression request
908 list_add_tail(&msg->list, &next_lte->msg_list);
909 shared_queue_put(res_to_compress_queue, msg);
910 DEBUG2("Compression request sent");
912 if (next_chunk != next_num_chunks)
913 // More chunks to send for this resource
916 // Done sending compression requests for a resource!
917 // Check the SHA1 message digest.
918 DEBUG2("Finalize SHA1 md (next_num_chunks=%zu)", next_num_chunks);
919 sha1_final(next_hash, &next_sha_ctx);
920 if (!hashes_equal(next_lte->hash, next_hash)) {
921 ERROR("WIM resource has incorrect hash!");
922 if (next_lte->resource_location == RESOURCE_IN_FILE_ON_DISK) {
923 ERROR("We were reading it from `%s'; maybe it changed "
924 "while we were reading it.",
925 next_lte->file_on_disk);
927 ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
931 // Advance to the next resource.
933 // If the next resource needs no compression, just write
934 // it with this thread (not now though--- we could be in
935 // the middle of writing another resource.) Keep doing
936 // this until we either get to the end of the resources
937 // list, or we get to a resource that needs compression.
940 if (next_resource == stream_list) {
945 end_wim_resource_read(next_lte, ni);
948 end_wim_resource_read(next_lte);
951 next_lte = container_of(next_resource,
952 struct lookup_table_entry,
954 next_resource = next_resource->next;
955 if ((!(write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
956 && next_lte->resource_location == RESOURCE_IN_WIM
957 && wimlib_get_compression_type(next_lte->wim) == out_ctype)
958 || wim_resource_size(next_lte) == 0)
960 list_add_tail(&next_lte->staging_list,
963 list_add_tail(&next_lte->staging_list,
964 &outstanding_resources);
966 next_num_chunks = wim_resource_chunks(next_lte);
967 sha1_init(&next_sha_ctx);
968 INIT_LIST_HEAD(&next_lte->msg_list);
970 ret = prepare_resource_for_read(next_lte, &ni);
972 ret = prepare_resource_for_read(next_lte);
976 DEBUG2("Updated next_lte");
982 // If there are no outstanding resources, there are no more
983 // resources that need to be written.
984 if (list_empty(&outstanding_resources)) {
985 DEBUG("No outstanding resources! Done");
990 // Get the next message from the queue and process it.
991 // The message will contain 1 or more data chunks that have been
993 DEBUG2("Waiting for message");
994 msg = shared_queue_get(compressed_res_queue);
995 msg->complete = true;
997 DEBUG2("Received msg (begin_chunk=%"PRIu64")", msg->begin_chunk);
999 list_for_each_entry(msg, &cur_lte->msg_list, list) {
1000 DEBUG2("complete=%d", msg->complete);
1003 // Is this the next chunk in the current resource? If it's not
1004 // (i.e., an earlier chunk in a same or different resource
1005 // hasn't been compressed yet), do nothing, and keep this
1006 // message around until all earlier chunks are received.
1008 // Otherwise, write all the chunks we can.
1009 while (!list_empty(&cur_lte->msg_list)
1010 && (msg = container_of(cur_lte->msg_list.next,
1014 DEBUG2("Complete msg (begin_chunk=%"PRIu64")", msg->begin_chunk);
1015 if (msg->begin_chunk == 0) {
1016 DEBUG2("Begin chunk tab");
1018 // This is the first set of chunks. Leave space
1019 // for the chunk table in the output file.
1020 off_t cur_offset = ftello(out_fp);
1021 if (cur_offset == -1) {
1022 ret = WIMLIB_ERR_WRITE;
1025 ret = begin_wim_resource_chunk_tab(cur_lte,
1033 // Write the compressed chunks from the message.
1034 ret = write_wim_chunks(msg, out_fp, cur_chunk_tab);
1038 list_del(&msg->list);
1040 // This message is available to use for different chunks
1042 list_add(&msg->list, &available_msgs);
1044 // Was this the last chunk of the stream? If so,
1046 if (list_empty(&cur_lte->msg_list) &&
1047 msg->begin_chunk + msg->num_chunks == cur_chunk_tab->num_chunks)
1049 DEBUG2("Finish wim chunk tab");
1051 ret = finish_wim_resource_chunk_tab(cur_chunk_tab,
1057 progress->write_streams.completed_bytes +=
1058 wim_resource_size(cur_lte);
1059 progress->write_streams.completed_streams++;
1061 if (progress_func) {
1062 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1066 cur_lte->output_resource_entry.size =
1069 cur_lte->output_resource_entry.original_size =
1070 cur_lte->resource_entry.original_size;
1072 cur_lte->output_resource_entry.offset =
1073 cur_chunk_tab->file_offset;
1075 cur_lte->output_resource_entry.flags =
1076 cur_lte->resource_entry.flags |
1077 WIM_RESHDR_FLAG_COMPRESSED;
1079 FREE(cur_chunk_tab);
1080 cur_chunk_tab = NULL;
1082 struct list_head *next = cur_lte->staging_list.next;
1083 list_del(&cur_lte->staging_list);
1085 if (next == &outstanding_resources) {
1086 DEBUG("No more outstanding resources");
1090 cur_lte = container_of(cur_lte->staging_list.next,
1091 struct lookup_table_entry,
1095 // Since we just finished writing a stream,
1096 // write any streams that have been added to the
1097 // my_resources list for direct writing by the
1098 // main thread (e.g. resources that don't need
1099 // to be compressed because the desired
1100 // compression type is the same as the previous
1101 // compression type).
1102 ret = do_write_stream_list(&my_resources,
1116 end_wim_resource_read(cur_lte, ni);
1118 end_wim_resource_read(cur_lte);
1121 ret = do_write_stream_list(&my_resources, out_fp,
1122 out_ctype, progress_func,
1125 size_t num_available_msgs = 0;
1126 struct list_head *cur;
1128 list_for_each(cur, &available_msgs) {
1129 num_available_msgs++;
1132 while (num_available_msgs < ARRAY_LEN(msgs)) {
1133 shared_queue_get(compressed_res_queue);
1134 num_available_msgs++;
1138 for (size_t i = 0; i < ARRAY_LEN(msgs); i++) {
1139 for (size_t j = 0; j < MAX_CHUNKS_PER_MSG; j++) {
1140 FREE(msgs[i].compressed_chunks[j]);
1141 FREE(msgs[i].uncompressed_chunks[j]);
1145 if (cur_chunk_tab != NULL)
1146 FREE(cur_chunk_tab);
1151 static int write_stream_list_parallel(struct list_head *stream_list,
1155 unsigned num_threads,
1156 wimlib_progress_func_t progress_func,
1157 union wimlib_progress_info *progress)
1160 struct shared_queue res_to_compress_queue;
1161 struct shared_queue compressed_res_queue;
1162 pthread_t *compressor_threads = NULL;
1164 if (num_threads == 0) {
1165 long nthreads = sysconf(_SC_NPROCESSORS_ONLN);
1167 WARNING("Could not determine number of processors! Assuming 1");
1170 num_threads = nthreads;
1174 progress->write_streams.num_threads = num_threads;
1175 wimlib_assert(stream_list->next != stream_list);
1177 static const double MESSAGES_PER_THREAD = 2.0;
1178 size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
1180 DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
1182 ret = shared_queue_init(&res_to_compress_queue, queue_size);
1186 ret = shared_queue_init(&compressed_res_queue, queue_size);
1188 goto out_destroy_res_to_compress_queue;
1190 struct compressor_thread_params params;
1191 params.res_to_compress_queue = &res_to_compress_queue;
1192 params.compressed_res_queue = &compressed_res_queue;
1193 params.compress = get_compress_func(out_ctype);
1195 compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
1197 for (unsigned i = 0; i < num_threads; i++) {
1198 DEBUG("pthread_create thread %u", i);
1199 ret = pthread_create(&compressor_threads[i], NULL,
1200 compressor_thread_proc, ¶ms);
1203 ERROR_WITH_ERRNO("Failed to create compressor "
1211 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
1213 ret = main_writer_thread_proc(stream_list,
1216 &res_to_compress_queue,
1217 &compressed_res_queue,
1223 for (unsigned i = 0; i < num_threads; i++)
1224 shared_queue_put(&res_to_compress_queue, NULL);
1226 for (unsigned i = 0; i < num_threads; i++) {
1227 if (pthread_join(compressor_threads[i], NULL)) {
1228 WARNING("Failed to join compressor thread %u: %s",
1229 i, strerror(errno));
1232 FREE(compressor_threads);
1233 shared_queue_destroy(&compressed_res_queue);
1234 out_destroy_res_to_compress_queue:
1235 shared_queue_destroy(&res_to_compress_queue);
1236 if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
1239 WARNING("Falling back to single-threaded compression");
1240 return write_stream_list_serial(stream_list,
1251 * Write a list of streams to a WIM (@out_fp) using the compression type
1252 * @out_ctype and up to @num_threads compressor threads.
1254 static int write_stream_list(struct list_head *stream_list, FILE *out_fp,
1255 int out_ctype, int write_flags,
1256 unsigned num_threads,
1257 wimlib_progress_func_t progress_func)
1259 struct lookup_table_entry *lte;
1260 size_t num_streams = 0;
1261 u64 total_bytes = 0;
1262 bool compression_needed = false;
1263 union wimlib_progress_info progress;
1266 list_for_each_entry(lte, stream_list, staging_list) {
1268 total_bytes += wim_resource_size(lte);
1269 if (!compression_needed
1271 (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
1272 && (lte->resource_location != RESOURCE_IN_WIM
1273 || wimlib_get_compression_type(lte->wim) != out_ctype
1274 || (write_flags & WIMLIB_WRITE_FLAG_REBUILD)))
1275 && wim_resource_size(lte) != 0)
1276 compression_needed = true;
1278 progress.write_streams.total_bytes = total_bytes;
1279 progress.write_streams.total_streams = num_streams;
1280 progress.write_streams.completed_bytes = 0;
1281 progress.write_streams.completed_streams = 0;
1282 progress.write_streams.num_threads = num_threads;
1283 progress.write_streams.compression_type = out_ctype;
1285 if (num_streams == 0) {
1290 #ifdef ENABLE_MULTITHREADED_COMPRESSION
1291 if (compression_needed && total_bytes >= 1000000 && num_threads != 1) {
1292 ret = write_stream_list_parallel(stream_list,
1303 ret = write_stream_list_serial(stream_list,
1315 static int dentry_find_streams_to_write(struct dentry *dentry,
1319 struct list_head *stream_list = w->private;
1320 struct lookup_table_entry *lte;
1321 for (unsigned i = 0; i <= dentry->d_inode->num_ads; i++) {
1322 lte = inode_stream_lte(dentry->d_inode, i, w->lookup_table);
1323 if (lte && ++lte->out_refcnt == 1)
1324 list_add_tail(<e->staging_list, stream_list);
1329 static int find_streams_to_write(WIMStruct *w)
1331 return for_dentry_in_tree(wim_root_dentry(w),
1332 dentry_find_streams_to_write, w);
1335 static int write_wim_streams(WIMStruct *w, int image, int write_flags,
1336 unsigned num_threads,
1337 wimlib_progress_func_t progress_func)
1340 for_lookup_table_entry(w->lookup_table, lte_zero_out_refcnt, NULL);
1341 LIST_HEAD(stream_list);
1342 w->private = &stream_list;
1343 for_image(w, image, find_streams_to_write);
1344 return write_stream_list(&stream_list, w->out_fp,
1345 wimlib_get_compression_type(w), write_flags,
1346 num_threads, progress_func);
1350 * Finish writing a WIM file: write the lookup table, xml data, and integrity
1351 * table (optional), then overwrite the WIM header.
1353 * write_flags is a bitwise OR of the following:
1355 * (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
1356 * Include an integrity table.
1358 * (public) WIMLIB_WRITE_FLAG_SHOW_PROGRESS:
1359 * Show progress information when (if) writing the integrity table.
1361 * (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE:
1362 * Don't write the lookup table.
1364 * (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE:
1365 * When (if) writing the integrity table, re-use entries from the
1366 * existing integrity table, if possible.
1368 * (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
1369 * After writing the XML data but before writing the integrity
1370 * table, write a temporary WIM header and flush the stream so that
1371 * the WIM is less likely to become corrupted upon abrupt program
1374 * (private) WIMLIB_WRITE_FLAG_FSYNC:
1375 * fsync() the output file before closing it.
1378 int finish_write(WIMStruct *w, int image, int write_flags,
1379 wimlib_progress_func_t progress_func)
1382 struct wim_header hdr;
1383 FILE *out = w->out_fp;
1385 /* @hdr will be the header for the new WIM. First copy all the data
1386 * from the header in the WIMStruct; then set all the fields that may
1387 * have changed, including the resource entries, boot index, and image
1389 memcpy(&hdr, &w->hdr, sizeof(struct wim_header));
1391 if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
1392 ret = write_lookup_table(w->lookup_table, out, &hdr.lookup_table_res_entry);
1397 ret = write_xml_data(w->wim_info, image, out,
1398 (write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE) ?
1399 wim_info_get_total_bytes(w->wim_info) : 0,
1400 &hdr.xml_res_entry);
1404 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
1405 if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
1406 struct wim_header checkpoint_hdr;
1407 memcpy(&checkpoint_hdr, &hdr, sizeof(struct wim_header));
1408 memset(&checkpoint_hdr.integrity, 0, sizeof(struct resource_entry));
1409 if (fseeko(out, 0, SEEK_SET) != 0) {
1410 ret = WIMLIB_ERR_WRITE;
1413 ret = write_header(&checkpoint_hdr, out);
1417 if (fflush(out) != 0) {
1418 ERROR_WITH_ERRNO("Can't write data to WIM");
1419 ret = WIMLIB_ERR_WRITE;
1423 if (fseeko(out, 0, SEEK_END) != 0) {
1424 ret = WIMLIB_ERR_WRITE;
1429 off_t old_lookup_table_end;
1430 off_t new_lookup_table_end;
1431 if (write_flags & WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE) {
1432 old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
1433 w->hdr.lookup_table_res_entry.size;
1435 old_lookup_table_end = 0;
1437 new_lookup_table_end = hdr.lookup_table_res_entry.offset +
1438 hdr.lookup_table_res_entry.size;
1440 ret = write_integrity_table(out,
1442 new_lookup_table_end,
1443 old_lookup_table_end,
1448 memset(&hdr.integrity, 0, sizeof(struct resource_entry));
1452 * In the WIM header, there is room for the resource entry for a
1453 * metadata resource labeled as the "boot metadata". This entry should
1454 * be zeroed out if there is no bootable image (boot_idx 0). Otherwise,
1455 * it should be a copy of the resource entry for the image that is
1456 * marked as bootable. This is not well documented...
1458 if (hdr.boot_idx == 0 || !w->image_metadata
1459 || (image != WIMLIB_ALL_IMAGES && image != hdr.boot_idx)) {
1460 memset(&hdr.boot_metadata_res_entry, 0,
1461 sizeof(struct resource_entry));
1463 memcpy(&hdr.boot_metadata_res_entry,
1465 hdr.boot_idx - 1].metadata_lte->output_resource_entry,
1466 sizeof(struct resource_entry));
1469 /* Set image count and boot index correctly for single image writes */
1470 if (image != WIMLIB_ALL_IMAGES) {
1471 hdr.image_count = 1;
1472 if (hdr.boot_idx == image)
1478 if (fseeko(out, 0, SEEK_SET) != 0) {
1479 ret = WIMLIB_ERR_WRITE;
1483 ret = write_header(&hdr, out);
1487 if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
1488 if (fflush(out) != 0
1489 || fsync(fileno(out)) != 0)
1491 ERROR_WITH_ERRNO("Error flushing data to WIM file");
1492 ret = WIMLIB_ERR_WRITE;
1496 if (fclose(out) != 0) {
1497 ERROR_WITH_ERRNO("Failed to close the WIM file");
1499 ret = WIMLIB_ERR_WRITE;
1505 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
1506 int lock_wim(FILE *fp, const char *path)
1510 ret = flock(fileno(fp), LOCK_EX | LOCK_NB);
1512 if (errno == EWOULDBLOCK) {
1513 ERROR("`%s' is already being modified or has been "
1514 "mounted read-write\n"
1515 " by another process!", path);
1516 ret = WIMLIB_ERR_ALREADY_LOCKED;
1518 WARNING("Failed to lock `%s': %s",
1519 path, strerror(errno));
1528 static int open_wim_writable(WIMStruct *w, const char *path,
1529 bool trunc, bool readable)
1540 wimlib_assert(w->out_fp == NULL);
1541 w->out_fp = fopen(path, mode);
1545 ERROR_WITH_ERRNO("Failed to open `%s' for writing", path);
1546 return WIMLIB_ERR_OPEN;
1551 void close_wim_writable(WIMStruct *w)
1554 if (fclose(w->out_fp) != 0) {
1555 WARNING("Failed to close output WIM: %s",
1562 /* Open file stream and write dummy header for WIM. */
1563 int begin_write(WIMStruct *w, const char *path, int write_flags)
1566 ret = open_wim_writable(w, path, true,
1567 (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) != 0);
1570 /* Write dummy header. It will be overwritten later. */
1571 return write_header(&w->hdr, w->out_fp);
1574 /* Writes a stand-alone WIM to a file. */
1575 WIMLIBAPI int wimlib_write(WIMStruct *w, const char *path,
1576 int image, int write_flags, unsigned num_threads,
1577 wimlib_progress_func_t progress_func)
1582 return WIMLIB_ERR_INVALID_PARAM;
1584 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
1586 if (image != WIMLIB_ALL_IMAGES &&
1587 (image < 1 || image > w->hdr.image_count))
1588 return WIMLIB_ERR_INVALID_IMAGE;
1590 if (w->hdr.total_parts != 1) {
1591 ERROR("Cannot call wimlib_write() on part of a split WIM");
1592 return WIMLIB_ERR_SPLIT_UNSUPPORTED;
1595 ret = begin_write(w, path, write_flags);
1599 ret = write_wim_streams(w, image, write_flags, num_threads,
1605 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL);
1607 ret = for_image(w, image, write_metadata_resource);
1612 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL);
1614 ret = finish_write(w, image, write_flags, progress_func);
1616 close_wim_writable(w);
1620 static int lte_overwrite_prepare(struct lookup_table_entry *lte,
1623 memcpy(<e->output_resource_entry, <e->resource_entry,
1624 sizeof(struct resource_entry));
1625 lte->out_refcnt = 0;
1629 static int check_resource_offset(struct lookup_table_entry *lte, void *arg)
1631 off_t end_offset = *(u64*)arg;
1633 wimlib_assert(lte->out_refcnt <= lte->refcnt);
1634 if (lte->out_refcnt < lte->refcnt) {
1635 if (lte->resource_entry.offset + lte->resource_entry.size > end_offset) {
1636 ERROR("The following resource is after the XML data:");
1637 print_lookup_table_entry(lte);
1638 return WIMLIB_ERR_RESOURCE_ORDER;
1644 static int find_new_streams(struct lookup_table_entry *lte, void *arg)
1646 if (lte->out_refcnt == lte->refcnt)
1647 list_add(<e->staging_list, (struct list_head*)arg);
1649 lte->out_refcnt = lte->refcnt;
1654 * Overwrite a WIM, possibly appending streams to it.
1656 * A WIM looks like (or is supposed to look like) the following:
1658 * Header (212 bytes)
1659 * Streams and metadata resources (variable size)
1660 * Lookup table (variable size)
1661 * XML data (variable size)
1662 * Integrity table (optional) (variable size)
1664 * If we are not adding any streams or metadata resources, the lookup table is
1665 * unchanged--- so we only need to overwrite the XML data, integrity table, and
1666 * header. This operation is potentially unsafe if the program is abruptly
1667 * terminated while the XML data or integrity table are being overwritten, but
1668 * before the new header has been written. To partially alleviate this problem,
1669 * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to
1670 * finish_write() to cause a temporary WIM header to be written after the XML
1671 * data has been written. This may prevent the WIM from becoming corrupted if
1672 * the program is terminated while the integrity table is being calculated (but
1673 * no guarantees, due to write re-ordering...).
1675 * If we are adding new streams or images (metadata resources), the lookup table
1676 * needs to be changed, and those streams need to be written. In this case, we
1677 * try to perform a safe update of the WIM file by writing the streams *after*
1678 * the end of the previous WIM, then writing the new lookup table, XML data, and
1679 * (optionally) integrity table following the new streams. This will produce a
1680 * layout like the following:
1682 * Header (212 bytes)
1683 * (OLD) Streams and metadata resources (variable size)
1684 * (OLD) Lookup table (variable size)
1685 * (OLD) XML data (variable size)
1686 * (OLD) Integrity table (optional) (variable size)
1687 * (NEW) Streams and metadata resources (variable size)
1688 * (NEW) Lookup table (variable size)
1689 * (NEW) XML data (variable size)
1690 * (NEW) Integrity table (optional) (variable size)
1692 * At all points, the WIM is valid as nothing points to the new data yet. Then,
1693 * the header is overwritten to point to the new lookup table, XML data, and
1694 * integrity table, to produce the following layout:
1696 * Header (212 bytes)
1697 * Streams and metadata resources (variable size)
1698 * Nothing (variable size)
1699 * More Streams and metadata resources (variable size)
1700 * Lookup table (variable size)
1701 * XML data (variable size)
1702 * Integrity table (optional) (variable size)
1704 * This method allows an image to be appended to a large WIM very quickly, and
1705 * is is crash-safe except in the case of write re-ordering, but the
1706 * disadvantage is that a small hole is left in the WIM where the old lookup
1707 * table, xml data, and integrity table were. (These usually only take up a
1708 * small amount of space compared to the streams, however.
1710 static int overwrite_wim_inplace(WIMStruct *w, int write_flags,
1711 unsigned num_threads,
1712 wimlib_progress_func_t progress_func,
1713 int modified_image_idx)
1716 struct list_head stream_list;
1719 DEBUG("Overwriting `%s' in-place", w->filename);
1721 /* Make sure that the integrity table (if present) is after the XML
1722 * data, and that there are no stream resources, metadata resources, or
1723 * lookup tables after the XML data. Otherwise, these data would be
1725 if (w->hdr.integrity.offset != 0 &&
1726 w->hdr.integrity.offset < w->hdr.xml_res_entry.offset) {
1727 ERROR("Didn't expect the integrity table to be before the XML data");
1728 return WIMLIB_ERR_RESOURCE_ORDER;
1731 if (w->hdr.lookup_table_res_entry.offset > w->hdr.xml_res_entry.offset) {
1732 ERROR("Didn't expect the lookup table to be after the XML data");
1733 return WIMLIB_ERR_RESOURCE_ORDER;
1736 DEBUG("Identifying newly added streams");
1737 for_lookup_table_entry(w->lookup_table, lte_overwrite_prepare, NULL);
1738 INIT_LIST_HEAD(&stream_list);
1739 for (int i = modified_image_idx; i < w->hdr.image_count; i++) {
1740 DEBUG("Identifiying streams in image %d", i + 1);
1741 w->private = &stream_list;
1742 for_dentry_in_tree(w->image_metadata[i].root_dentry,
1743 dentry_find_streams_to_write, w);
1746 if (w->hdr.integrity.offset)
1747 old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size;
1749 old_wim_end = w->hdr.xml_res_entry.offset + w->hdr.xml_res_entry.size;
1751 ret = for_lookup_table_entry(w->lookup_table, check_resource_offset,
1756 if (modified_image_idx == w->hdr.image_count && !w->deletion_occurred) {
1757 /* If no images have been modified and no images have been
1758 * deleted, a new lookup table does not need to be written. */
1759 old_wim_end = w->hdr.lookup_table_res_entry.offset +
1760 w->hdr.lookup_table_res_entry.size;
1761 write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE |
1762 WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
1765 INIT_LIST_HEAD(&stream_list);
1766 for_lookup_table_entry(w->lookup_table, find_new_streams,
1769 ret = open_wim_writable(w, w->filename, false,
1770 (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) != 0);
1774 ret = lock_wim(w->out_fp, w->filename);
1781 if (fseeko(w->out_fp, old_wim_end, SEEK_SET) != 0) {
1782 ERROR_WITH_ERRNO("Can't seek to end of WIM");
1783 ret = WIMLIB_ERR_WRITE;
1787 if (!list_empty(&stream_list)) {
1788 DEBUG("Writing newly added streams (offset = %"PRIu64")",
1790 ret = write_stream_list(&stream_list, w->out_fp,
1791 wimlib_get_compression_type(w),
1792 write_flags, num_threads,
1797 DEBUG("No new streams were added");
1800 for (int i = modified_image_idx; i < w->hdr.image_count; i++) {
1801 select_wim_image(w, i + 1);
1802 ret = write_metadata_resource(w);
1806 write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE;
1807 ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags,
1810 close_wim_writable(w);
1812 WARNING("Truncating `%s' to its original size (%"PRIu64" bytes)",
1813 w->filename, old_wim_end);
1814 truncate(w->filename, old_wim_end);
1819 static int overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags,
1820 unsigned num_threads,
1821 wimlib_progress_func_t progress_func)
1823 size_t wim_name_len;
1826 DEBUG("Overwriting `%s' via a temporary file", w->filename);
1828 /* Write the WIM to a temporary file in the same directory as the
1830 wim_name_len = strlen(w->filename);
1831 char tmpfile[wim_name_len + 10];
1832 memcpy(tmpfile, w->filename, wim_name_len);
1833 randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
1834 tmpfile[wim_name_len + 9] = '\0';
1836 ret = wimlib_write(w, tmpfile, WIMLIB_ALL_IMAGES,
1837 write_flags | WIMLIB_WRITE_FLAG_FSYNC,
1838 num_threads, progress_func);
1840 ERROR("Failed to write the WIM file `%s'", tmpfile);
1844 DEBUG("Renaming `%s' to `%s'", tmpfile, w->filename);
1846 /* Rename the new file to the old file .*/
1847 if (rename(tmpfile, w->filename) != 0) {
1848 ERROR_WITH_ERRNO("Failed to rename `%s' to `%s'",
1849 tmpfile, w->filename);
1850 ret = WIMLIB_ERR_RENAME;
1854 if (progress_func) {
1855 union wimlib_progress_info progress;
1856 progress.rename.from = tmpfile;
1857 progress.rename.to = w->filename;
1858 progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress);
1861 /* Close the original WIM file that was opened for reading. */
1862 if (w->fp != NULL) {
1867 /* Re-open the WIM read-only. */
1868 w->fp = fopen(w->filename, "rb");
1869 if (w->fp == NULL) {
1870 ret = WIMLIB_ERR_REOPEN;
1871 WARNING("Failed to re-open `%s' read-only: %s",
1872 w->filename, strerror(errno));
1878 /* Remove temporary file. */
1879 if (unlink(tmpfile) != 0)
1880 WARNING("Failed to remove `%s': %s", tmpfile, strerror(errno));
1885 * Writes a WIM file to the original file that it was read from, overwriting it.
1887 WIMLIBAPI int wimlib_overwrite(WIMStruct *w, int write_flags,
1888 unsigned num_threads,
1889 wimlib_progress_func_t progress_func)
1892 return WIMLIB_ERR_INVALID_PARAM;
1894 write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
1897 return WIMLIB_ERR_NO_FILENAME;
1899 if (w->hdr.total_parts != 1) {
1900 ERROR("Cannot modify a split WIM");
1901 return WIMLIB_ERR_SPLIT_UNSUPPORTED;
1904 if ((!w->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
1905 && !(write_flags & WIMLIB_WRITE_FLAG_REBUILD))
1907 int i, modified_image_idx;
1908 for (i = 0; i < w->hdr.image_count && !w->image_metadata[i].modified; i++)
1910 modified_image_idx = i;
1911 for (; i < w->hdr.image_count && w->image_metadata[i].modified &&
1912 !w->image_metadata[i].has_been_mounted_rw; i++)
1914 if (i == w->hdr.image_count) {
1915 return overwrite_wim_inplace(w, write_flags, num_threads,
1917 modified_image_idx);
1920 return overwrite_wim_via_tmpfile(w, write_flags, num_threads,