]> wimlib.net Git - wimlib/blob - src/write.c
16ce531aacc2850b1d096f243d6e257030c566f9
[wimlib] / src / write.c
1 /*
2  * write.c
3  *
4  * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5  * compressed file resources, etc.
6  */
7
8 /*
9  * Copyright (C) 2012, 2013 Eric Biggers
10  *
11  * This file is part of wimlib, a library for working with WIM files.
12  *
13  * wimlib is free software; you can redistribute it and/or modify it under the
14  * terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 3 of the License, or (at your option)
16  * any later version.
17  *
18  * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
19  * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
20  * A PARTICULAR PURPOSE. See the GNU General Public License for more
21  * details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with wimlib; if not, see http://www.gnu.org/licenses/.
25  */
26
27 #include "config.h"
28
29 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
30 /* On BSD, this should be included before "list.h" so that "list.h" can
31  * overwrite the LIST_HEAD macro. */
32 #  include <sys/file.h>
33 #endif
34
35 #ifdef __WIN32__
36 #  include "win32.h"
37 #endif
38
39 #include "list.h"
40 #include "wimlib_internal.h"
41 #include "buffer_io.h"
42 #include "dentry.h"
43 #include "lookup_table.h"
44 #include "xml.h"
45
46 #ifdef ENABLE_MULTITHREADED_COMPRESSION
47 #  include <pthread.h>
48 #endif
49
50 #include <unistd.h>
51 #include <errno.h>
52
53 #ifdef WITH_NTFS_3G
54 #  include <time.h>
55 #  include <ntfs-3g/attrib.h>
56 #  include <ntfs-3g/inode.h>
57 #  include <ntfs-3g/dir.h>
58 #endif
59
60 #ifdef HAVE_ALLOCA_H
61 #  include <alloca.h>
62 #else
63 #  include <stdlib.h>
64 #endif
65
66 #include <limits.h>
67
68 /* Chunk table that's located at the beginning of each compressed resource in
69  * the WIM.  (This is not the on-disk format; the on-disk format just has an
70  * array of offsets.) */
71 struct chunk_table {
72         off_t file_offset;
73         u64 num_chunks;
74         u64 original_resource_size;
75         u64 bytes_per_chunk_entry;
76         u64 table_disk_size;
77         u64 cur_offset;
78         u64 *cur_offset_p;
79         u64 offsets[0];
80 };
81
82 /*
83  * Allocates and initializes a chunk table, and reserves space for it in the
84  * output file.
85  */
86 static int
87 begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
88                              int out_fd,
89                              off_t file_offset,
90                              struct chunk_table **chunk_tab_ret)
91 {
92         u64 size = wim_resource_size(lte);
93         u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE;
94         size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
95         struct chunk_table *chunk_tab = CALLOC(1, alloc_size);
96
97         DEBUG("Begin chunk table for stream with size %"PRIu64, size);
98
99         if (!chunk_tab) {
100                 ERROR("Failed to allocate chunk table for %"PRIu64" byte "
101                       "resource", size);
102                 return WIMLIB_ERR_NOMEM;
103         }
104         chunk_tab->file_offset = file_offset;
105         chunk_tab->num_chunks = num_chunks;
106         chunk_tab->original_resource_size = size;
107         chunk_tab->bytes_per_chunk_entry = (size >= (1ULL << 32)) ? 8 : 4;
108         chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
109                                      (num_chunks - 1);
110         chunk_tab->cur_offset = 0;
111         chunk_tab->cur_offset_p = chunk_tab->offsets;
112
113         if (full_write(out_fd, chunk_tab,
114                        chunk_tab->table_disk_size) != chunk_tab->table_disk_size)
115         {
116                 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
117                                  "file resource");
118                 FREE(chunk_tab);
119                 return WIMLIB_ERR_WRITE;
120         }
121         *chunk_tab_ret = chunk_tab;
122         return 0;
123 }
124
125 /*
126  * compress_func_t- Pointer to a function to compresses a chunk
127  *                  of a WIM resource.  This may be either
128  *                  wimlib_xpress_compress() (xpress-compress.c) or
129  *                  wimlib_lzx_compress() (lzx-compress.c).
130  *
131  * @chunk:        Uncompressed data of the chunk.
132  * @chunk_size:   Size of the uncompressed chunk, in bytes.
133  * @out:          Pointer to output buffer of size at least (@chunk_size - 1) bytes.
134  *
135  * Returns the size of the compressed data written to @out in bytes, or 0 if the
136  * data could not be compressed to (@chunk_size - 1) bytes or fewer.
137  *
138  * As a special requirement, the compression code is optimized for the WIM
139  * format and therefore requires (@chunk_size <= 32768).
140  *
141  * As another special requirement, the compression code will read up to 8 bytes
142  * off the end of the @chunk array for performance reasons.  The values of these
143  * bytes will not affect the output of the compression, but the calling code
144  * must make sure that the buffer holding the uncompressed chunk is actually at
145  * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
146  * mapped memory that will not cause a memory access violation if accessed.
147  */
148 typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
149                                     void *out);
150
151 static compress_func_t
152 get_compress_func(int out_ctype)
153 {
154         if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
155                 return wimlib_lzx_compress;
156         else
157                 return wimlib_xpress_compress;
158 }
159
160 /*
161  * Writes a chunk of a WIM resource to an output file.
162  *
163  * @chunk:        Uncompressed data of the chunk.
164  * @chunk_size:   Size of the chunk (<= WIM_CHUNK_SIZE)
165  * @out_fd:       File descriptor to write the chunk to.
166  * @compress:     Compression function to use (NULL if writing uncompressed
167  *                      data).
168  * @chunk_tab:    Pointer to chunk table being created.  It is updated with the
169  *                      offset of the chunk we write.
170  *
171  * Returns 0 on success; nonzero on failure.
172  */
173 static int
174 write_wim_resource_chunk(const void * restrict chunk,
175                          unsigned chunk_size,
176                          filedes_t out_fd,
177                          compress_func_t compress,
178                          struct chunk_table * restrict chunk_tab)
179 {
180         const void *out_chunk;
181         unsigned out_chunk_size;
182         if (compress) {
183                 void *compressed_chunk = alloca(chunk_size);
184
185                 out_chunk_size = (*compress)(chunk, chunk_size, compressed_chunk);
186                 if (out_chunk_size) {
187                         /* Write compressed */
188                         out_chunk = compressed_chunk;
189                 } else {
190                         /* Write uncompressed */
191                         out_chunk = chunk;
192                         out_chunk_size = chunk_size;
193                 }
194                 *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
195                 chunk_tab->cur_offset += out_chunk_size;
196         } else {
197                 /* Write uncompressed */
198                 out_chunk = chunk;
199                 out_chunk_size = chunk_size;
200         }
201         if (full_write(out_fd, out_chunk, out_chunk_size) != out_chunk_size) {
202                 ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
203                 return WIMLIB_ERR_WRITE;
204         }
205         return 0;
206 }
207
208 /*
209  * Finishes a WIM chunk table and writes it to the output file at the correct
210  * offset.
211  *
212  * The final size of the full compressed resource is returned in the
213  * @compressed_size_p.
214  */
215 static int
216 finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
217                               filedes_t out_fd, u64 *compressed_size_p)
218 {
219         size_t bytes_written;
220
221         if (chunk_tab->bytes_per_chunk_entry == 8) {
222                 array_cpu_to_le64(chunk_tab->offsets, chunk_tab->num_chunks);
223         } else {
224                 for (u64 i = 0; i < chunk_tab->num_chunks; i++)
225                         ((u32*)chunk_tab->offsets)[i] =
226                                 cpu_to_le32(chunk_tab->offsets[i]);
227         }
228         bytes_written = full_pwrite(out_fd,
229                                     (u8*)chunk_tab->offsets + chunk_tab->bytes_per_chunk_entry,
230                                     chunk_tab->table_disk_size,
231                                     chunk_tab->file_offset);
232         if (bytes_written != chunk_tab->table_disk_size) {
233                 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
234                                  "file resource");
235                 return WIMLIB_ERR_WRITE;
236         }
237         *compressed_size_p = chunk_tab->cur_offset + chunk_tab->table_disk_size;
238         return 0;
239 }
240
241 static int
242 seek_and_truncate(filedes_t out_fd, off_t offset)
243 {
244         if (lseek(out_fd, offset, SEEK_SET) == -1 ||
245             ftruncate(out_fd, offset))
246         {
247                 ERROR_WITH_ERRNO("Failed to truncate output WIM file");
248                 return WIMLIB_ERR_WRITE;
249         } else {
250                 return 0;
251         }
252 }
253
254 static int
255 finalize_and_check_sha1(SHA_CTX * restrict sha_ctx,
256                         struct wim_lookup_table_entry * restrict lte)
257 {
258         u8 md[SHA1_HASH_SIZE];
259         sha1_final(md, sha_ctx);
260         if (lte->unhashed) {
261                 copy_hash(lte->hash, md);
262         } else if (!hashes_equal(md, lte->hash)) {
263                 ERROR("WIM resource has incorrect hash!");
264                 if (lte_filename_valid(lte)) {
265                         ERROR("We were reading it from \"%"TS"\"; maybe "
266                               "it changed while we were reading it.",
267                               lte->file_on_disk);
268                 }
269                 return WIMLIB_ERR_INVALID_RESOURCE_HASH;
270         }
271         return 0;
272 }
273
274
275 struct write_resource_ctx {
276         compress_func_t compress;
277         struct chunk_table *chunk_tab;
278         filedes_t out_fd;
279         SHA_CTX sha_ctx;
280         bool doing_sha;
281 };
282
283 static int
284 write_resource_cb(const void *restrict chunk, size_t chunk_size,
285                   void *restrict _ctx)
286 {
287         struct write_resource_ctx *ctx = _ctx;
288
289         if (ctx->doing_sha)
290                 sha1_update(&ctx->sha_ctx, chunk, chunk_size);
291         return write_wim_resource_chunk(chunk, chunk_size,
292                                         ctx->out_fd, ctx->compress,
293                                         ctx->chunk_tab);
294 }
295
296 /*
297  * Write a resource to an output WIM.
298  *
299  * @lte:  Lookup table entry for the resource, which could be in another WIM,
300  *        in an external file, or in another location.
301  *
302  * @out_fd:  File descriptor opened to the output WIM.
303  *
304  * @out_ctype:  One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate
305  *              which compression algorithm to use.
306  *
307  * @out_res_entry:  On success, this is filled in with the offset, flags,
308  *                  compressed size, and uncompressed size of the resource
309  *                  in the output WIM.
310  *
311  * @flags:  WIMLIB_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed
312  *          even if it could otherwise be copied directly from the input.
313  *
314  * Additional notes:  The SHA1 message digest of the uncompressed data is
315  * calculated (except when doing a raw copy --- see below).  If the @unhashed
316  * flag is set on the lookup table entry, this message digest is simply copied
317  * to it; otherwise, the message digest is compared with the existing one, and
318  * the function will fail if they do not match.
319  */
320 int
321 write_wim_resource(struct wim_lookup_table_entry *lte,
322                    filedes_t out_fd, int out_ctype,
323                    struct resource_entry *out_res_entry,
324                    int flags)
325 {
326         struct write_resource_ctx write_ctx;
327         u64 read_size;
328         u64 new_size;
329         off_t offset;
330         int ret;
331
332         flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS;
333
334         /* Get current position in output WIM */
335         offset = filedes_offset(out_fd);
336         if (offset == -1) {
337                 ERROR_WITH_ERRNO("Can't get position in output WIM");
338                 return WIMLIB_ERR_WRITE;
339         }
340
341         /* If we are not forcing the data to be recompressed, and the input
342          * resource is located in a WIM with the same compression type as that
343          * desired other than no compression, we can simply copy the compressed
344          * data without recompressing it.  This also means we must skip
345          * calculating the SHA1, as we never will see the uncompressed data. */
346         if (!(flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
347             lte->resource_location == RESOURCE_IN_WIM &&
348             out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
349             wimlib_get_compression_type(lte->wim) == out_ctype)
350         {
351                 flags |= WIMLIB_RESOURCE_FLAG_RAW;
352                 write_ctx.doing_sha = false;
353                 read_size = lte->resource_entry.size;
354         } else {
355                 write_ctx.doing_sha = true;
356                 sha1_init(&write_ctx.sha_ctx);
357                 read_size = lte->resource_entry.original_size;
358         }
359
360         /* Initialize the chunk table and set the compression function if
361          * compressing the resource. */
362         if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
363             (flags & WIMLIB_RESOURCE_FLAG_RAW)) {
364                 write_ctx.compress = NULL;
365                 write_ctx.chunk_tab = NULL;
366         } else {
367                 write_ctx.compress = get_compress_func(out_ctype);
368                 ret = begin_wim_resource_chunk_tab(lte, out_fd,
369                                                    offset,
370                                                    &write_ctx.chunk_tab);
371                 if (ret)
372                         return ret;
373         }
374
375         /* Write the entire resource by reading the entire resource and feeding
376          * the data through the write_resource_cb function. */
377         write_ctx.out_fd = out_fd;
378 try_write_again:
379         ret = read_resource_prefix(lte, read_size,
380                                    write_resource_cb, &write_ctx, flags);
381         if (ret)
382                 goto out_free_chunk_tab;
383
384         /* Verify SHA1 message digest of the resource, or set the hash for the
385          * first time. */
386         if (write_ctx.doing_sha) {
387                 ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte);
388                 if (ret)
389                         goto out_free_chunk_tab;
390         }
391
392         out_res_entry->flags = lte->resource_entry.flags;
393         out_res_entry->original_size = wim_resource_size(lte);
394         out_res_entry->offset = offset;
395         if (flags & WIMLIB_RESOURCE_FLAG_RAW) {
396                 /* Doing a raw write:  The new compressed size is the same as
397                  * the compressed size in the other WIM. */
398                 new_size = lte->resource_entry.size;
399         } else if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) {
400                 /* Using WIMLIB_COMPRESSION_TYPE_NONE:  The new compressed size
401                  * is the original size. */
402                 new_size = lte->resource_entry.original_size;
403                 out_res_entry->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
404         } else {
405                 /* Using a different compression type:  Call
406                  * finish_wim_resource_chunk_tab() and it will provide the new
407                  * compressed size. */
408                 ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, out_fd,
409                                                     &new_size);
410                 if (ret)
411                         goto out_free_chunk_tab;
412                 if (new_size >= wim_resource_size(lte)) {
413                         /* Oops!  We compressed the resource to larger than the original
414                          * size.  Write the resource uncompressed instead. */
415                         DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
416                               "writing uncompressed instead",
417                               wim_resource_size(lte), new_size);
418                         ret = seek_and_truncate(out_fd, offset);
419                         if (ret)
420                                 goto out_free_chunk_tab;
421                         write_ctx.compress = NULL;
422                         write_ctx.doing_sha = false;
423                         out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
424                         goto try_write_again;
425                 }
426                 out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
427         }
428         out_res_entry->size = new_size;
429         ret = 0;
430 out_free_chunk_tab:
431         FREE(write_ctx.chunk_tab);
432         return ret;
433 }
434
435 #ifdef ENABLE_MULTITHREADED_COMPRESSION
436
437 /* Blocking shared queue (solves the producer-consumer problem) */
438 struct shared_queue {
439         unsigned size;
440         unsigned front;
441         unsigned back;
442         unsigned filled_slots;
443         void **array;
444         pthread_mutex_t lock;
445         pthread_cond_t msg_avail_cond;
446         pthread_cond_t space_avail_cond;
447 };
448
449 static int
450 shared_queue_init(struct shared_queue *q, unsigned size)
451 {
452         wimlib_assert(size != 0);
453         q->array = CALLOC(sizeof(q->array[0]), size);
454         if (!q->array)
455                 goto err;
456         q->filled_slots = 0;
457         q->front = 0;
458         q->back = size - 1;
459         q->size = size;
460         if (pthread_mutex_init(&q->lock, NULL)) {
461                 ERROR_WITH_ERRNO("Failed to initialize mutex");
462                 goto err;
463         }
464         if (pthread_cond_init(&q->msg_avail_cond, NULL)) {
465                 ERROR_WITH_ERRNO("Failed to initialize condition variable");
466                 goto err_destroy_lock;
467         }
468         if (pthread_cond_init(&q->space_avail_cond, NULL)) {
469                 ERROR_WITH_ERRNO("Failed to initialize condition variable");
470                 goto err_destroy_msg_avail_cond;
471         }
472         return 0;
473 err_destroy_msg_avail_cond:
474         pthread_cond_destroy(&q->msg_avail_cond);
475 err_destroy_lock:
476         pthread_mutex_destroy(&q->lock);
477 err:
478         return WIMLIB_ERR_NOMEM;
479 }
480
481 static void
482 shared_queue_destroy(struct shared_queue *q)
483 {
484         FREE(q->array);
485         pthread_mutex_destroy(&q->lock);
486         pthread_cond_destroy(&q->msg_avail_cond);
487         pthread_cond_destroy(&q->space_avail_cond);
488 }
489
490 static void
491 shared_queue_put(struct shared_queue *q, void *obj)
492 {
493         pthread_mutex_lock(&q->lock);
494         while (q->filled_slots == q->size)
495                 pthread_cond_wait(&q->space_avail_cond, &q->lock);
496
497         q->back = (q->back + 1) % q->size;
498         q->array[q->back] = obj;
499         q->filled_slots++;
500
501         pthread_cond_broadcast(&q->msg_avail_cond);
502         pthread_mutex_unlock(&q->lock);
503 }
504
505 static void *
506 shared_queue_get(struct shared_queue *q)
507 {
508         void *obj;
509
510         pthread_mutex_lock(&q->lock);
511         while (q->filled_slots == 0)
512                 pthread_cond_wait(&q->msg_avail_cond, &q->lock);
513
514         obj = q->array[q->front];
515         q->array[q->front] = NULL;
516         q->front = (q->front + 1) % q->size;
517         q->filled_slots--;
518
519         pthread_cond_broadcast(&q->space_avail_cond);
520         pthread_mutex_unlock(&q->lock);
521         return obj;
522 }
523
524 struct compressor_thread_params {
525         struct shared_queue *res_to_compress_queue;
526         struct shared_queue *compressed_res_queue;
527         compress_func_t compress;
528 };
529
530 #define MAX_CHUNKS_PER_MSG 2
531
532 struct message {
533         struct wim_lookup_table_entry *lte;
534         u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
535         u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
536         unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
537         struct iovec out_chunks[MAX_CHUNKS_PER_MSG];
538         size_t total_out_bytes;
539         unsigned num_chunks;
540         struct list_head list;
541         bool complete;
542         u64 begin_chunk;
543 };
544
545 static void
546 compress_chunks(struct message *msg, compress_func_t compress)
547 {
548         msg->total_out_bytes = 0;
549         for (unsigned i = 0; i < msg->num_chunks; i++) {
550                 unsigned len = compress(msg->uncompressed_chunks[i],
551                                         msg->uncompressed_chunk_sizes[i],
552                                         msg->compressed_chunks[i]);
553                 void *out_chunk;
554                 unsigned out_len;
555                 if (len) {
556                         /* To be written compressed */
557                         out_chunk = msg->compressed_chunks[i];
558                         out_len = len;
559                 } else {
560                         /* To be written uncompressed */
561                         out_chunk = msg->uncompressed_chunks[i];
562                         out_len = msg->uncompressed_chunk_sizes[i];
563                 }
564                 msg->out_chunks[i].iov_base = out_chunk;
565                 msg->out_chunks[i].iov_len = out_len;
566                 msg->total_out_bytes += out_len;
567         }
568 }
569
570 /* Compressor thread routine.  This is a lot simpler than the main thread
571  * routine: just repeatedly get a group of chunks from the
572  * res_to_compress_queue, compress them, and put them in the
573  * compressed_res_queue.  A NULL pointer indicates that the thread should stop.
574  * */
575 static void *
576 compressor_thread_proc(void *arg)
577 {
578         struct compressor_thread_params *params = arg;
579         struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
580         struct shared_queue *compressed_res_queue = params->compressed_res_queue;
581         compress_func_t compress = params->compress;
582         struct message *msg;
583
584         DEBUG("Compressor thread ready");
585         while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
586                 compress_chunks(msg, compress);
587                 shared_queue_put(compressed_res_queue, msg);
588         }
589         DEBUG("Compressor thread terminating");
590         return NULL;
591 }
592 #endif /* ENABLE_MULTITHREADED_COMPRESSION */
593
594 static void
595 do_write_streams_progress(union wimlib_progress_info *progress,
596                           wimlib_progress_func_t progress_func,
597                           uint64_t size_added)
598 {
599         progress->write_streams.completed_bytes += size_added;
600         progress->write_streams.completed_streams++;
601         if (progress_func &&
602             progress->write_streams.completed_bytes >= progress->write_streams._private)
603         {
604                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
605                               progress);
606                 if (progress->write_streams._private == progress->write_streams.total_bytes) {
607                         progress->write_streams._private = ~0;
608                 } else {
609                         progress->write_streams._private =
610                                 min(progress->write_streams.total_bytes,
611                                     progress->write_streams.completed_bytes +
612                                         progress->write_streams.total_bytes / 100);
613                 }
614         }
615 }
616
617 struct serial_write_stream_ctx {
618         filedes_t out_fd;
619         int out_ctype;
620         int write_resource_flags;
621 };
622
623 static int
624 serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx)
625 {
626         struct serial_write_stream_ctx *ctx = _ctx;
627         return write_wim_resource(lte, ctx->out_fd,
628                                   ctx->out_ctype, &lte->output_resource_entry,
629                                   ctx->write_resource_flags);
630 }
631
632 /* Write a list of streams, taking into account that some streams may be
633  * duplicates that are checksummed and discarded on the fly, and also delegating
634  * the actual writing of a stream to a function @write_stream_cb, which is
635  * passed the context @write_stream_ctx. */
636 static int
637 do_write_stream_list(struct list_head *stream_list,
638                      struct wim_lookup_table *lookup_table,
639                      int (*write_stream_cb)(struct wim_lookup_table_entry *, void *),
640                      void *write_stream_ctx,
641                      wimlib_progress_func_t progress_func,
642                      union wimlib_progress_info *progress)
643 {
644         int ret = 0;
645         struct wim_lookup_table_entry *lte;
646
647         /* For each stream in @stream_list ... */
648         while (!list_empty(stream_list)) {
649                 lte = container_of(stream_list->next,
650                                    struct wim_lookup_table_entry,
651                                    write_streams_list);
652                 list_del(&lte->write_streams_list);
653                 if (lte->unhashed && !lte->unique_size) {
654                         /* Unhashed stream that shares a size with some other
655                          * stream in the WIM we are writing.  The stream must be
656                          * checksummed to know if we need to write it or not. */
657                         struct wim_lookup_table_entry *tmp;
658                         u32 orig_refcnt = lte->out_refcnt;
659
660                         ret = hash_unhashed_stream(lte, lookup_table, &tmp);
661                         if (ret)
662                                 break;
663                         if (tmp != lte) {
664                                 lte = tmp;
665                                 /* We found a duplicate stream. */
666                                 if (orig_refcnt != tmp->out_refcnt) {
667                                         /* We have already written, or are going
668                                          * to write, the duplicate stream.  So
669                                          * just skip to the next stream. */
670                                         DEBUG("Discarding duplicate stream of length %"PRIu64,
671                                               wim_resource_size(lte));
672                                         lte->no_progress = 0;
673                                         goto skip_to_progress;
674                                 }
675                         }
676                 }
677
678                 /* Here, @lte is either a hashed stream or an unhashed stream
679                  * with a unique size.  In either case we know that the stream
680                  * has to be written.  In either case the SHA1 message digest
681                  * will be calculated over the stream while writing it; however,
682                  * in the former case this is done merely to check the data,
683                  * while in the latter case this is done because we do not have
684                  * the SHA1 message digest yet.  */
685                 wimlib_assert(lte->out_refcnt != 0);
686                 lte->deferred = 0;
687                 lte->no_progress = 0;
688                 ret = (*write_stream_cb)(lte, write_stream_ctx);
689                 if (ret)
690                         break;
691                 /* In parallel mode, some streams are deferred for later,
692                  * serialized processing; ignore them here. */
693                 if (lte->deferred)
694                         continue;
695                 if (lte->unhashed) {
696                         list_del(&lte->unhashed_list);
697                         lookup_table_insert(lookup_table, lte);
698                         lte->unhashed = 0;
699                 }
700         skip_to_progress:
701                 if (!lte->no_progress) {
702                         do_write_streams_progress(progress,
703                                                   progress_func,
704                                                   wim_resource_size(lte));
705                 }
706         }
707         return ret;
708 }
709
710 static int
711 do_write_stream_list_serial(struct list_head *stream_list,
712                             struct wim_lookup_table *lookup_table,
713                             filedes_t out_fd,
714                             int out_ctype,
715                             int write_resource_flags,
716                             wimlib_progress_func_t progress_func,
717                             union wimlib_progress_info *progress)
718 {
719         struct serial_write_stream_ctx ctx = {
720                 .out_fd = out_fd,
721                 .out_ctype = out_ctype,
722                 .write_resource_flags = write_resource_flags,
723         };
724         return do_write_stream_list(stream_list,
725                                     lookup_table,
726                                     serial_write_stream,
727                                     &ctx,
728                                     progress_func,
729                                     progress);
730 }
731
732 static inline int
733 write_flags_to_resource_flags(int write_flags)
734 {
735         int resource_flags = 0;
736
737         if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
738                 resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS;
739         return resource_flags;
740 }
741
742 static int
743 write_stream_list_serial(struct list_head *stream_list,
744                          struct wim_lookup_table *lookup_table,
745                          filedes_t out_fd,
746                          int out_ctype,
747                          int write_resource_flags,
748                          wimlib_progress_func_t progress_func,
749                          union wimlib_progress_info *progress)
750 {
751         DEBUG("Writing stream list (serial version)");
752         progress->write_streams.num_threads = 1;
753         if (progress_func)
754                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
755         return do_write_stream_list_serial(stream_list,
756                                            lookup_table,
757                                            out_fd,
758                                            out_ctype,
759                                            write_resource_flags,
760                                            progress_func,
761                                            progress);
762 }
763
764 #ifdef ENABLE_MULTITHREADED_COMPRESSION
765 static int
766 write_wim_chunks(struct message *msg, filedes_t out_fd,
767                  struct chunk_table *chunk_tab)
768 {
769         for (unsigned i = 0; i < msg->num_chunks; i++) {
770                 *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
771                 chunk_tab->cur_offset += msg->out_chunks[i].iov_len;
772         }
773         if (full_writev(out_fd, msg->out_chunks,
774                         msg->num_chunks) != msg->total_out_bytes)
775         {
776                 ERROR_WITH_ERRNO("Failed to write WIM chunks");
777                 return WIMLIB_ERR_WRITE;
778         }
779         return 0;
780 }
781
782 struct main_writer_thread_ctx {
783         struct list_head *stream_list;
784         struct wim_lookup_table *lookup_table;
785         filedes_t out_fd;
786         int out_ctype;
787         int write_resource_flags;
788         struct shared_queue *res_to_compress_queue;
789         struct shared_queue *compressed_res_queue;
790         size_t num_messages;
791         wimlib_progress_func_t progress_func;
792         union wimlib_progress_info *progress;
793
794         struct list_head available_msgs;
795         struct list_head outstanding_streams;
796         struct list_head serial_streams;
797         size_t num_outstanding_messages;
798
799         SHA_CTX next_sha_ctx;
800         u64 next_chunk;
801         u64 next_num_chunks;
802         struct wim_lookup_table_entry *next_lte;
803
804         struct message *msgs;
805         struct message *next_msg;
806         struct chunk_table *cur_chunk_tab;
807 };
808
809 static int
810 init_message(struct message *msg)
811 {
812         for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
813                 msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
814                 msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
815                 if (msg->compressed_chunks[i] == NULL ||
816                     msg->uncompressed_chunks[i] == NULL)
817                         return WIMLIB_ERR_NOMEM;
818         }
819         return 0;
820 }
821
822 static void
823 destroy_message(struct message *msg)
824 {
825         for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
826                 FREE(msg->compressed_chunks[i]);
827                 FREE(msg->uncompressed_chunks[i]);
828         }
829 }
830
831 static void
832 free_messages(struct message *msgs, size_t num_messages)
833 {
834         if (msgs) {
835                 for (size_t i = 0; i < num_messages; i++)
836                         destroy_message(&msgs[i]);
837                 FREE(msgs);
838         }
839 }
840
841 static struct message *
842 allocate_messages(size_t num_messages)
843 {
844         struct message *msgs;
845
846         msgs = CALLOC(num_messages, sizeof(struct message));
847         if (!msgs)
848                 return NULL;
849         for (size_t i = 0; i < num_messages; i++) {
850                 if (init_message(&msgs[i])) {
851                         free_messages(msgs, num_messages);
852                         return NULL;
853                 }
854         }
855         return msgs;
856 }
857
858 static void
859 main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx)
860 {
861         while (ctx->num_outstanding_messages--)
862                 shared_queue_get(ctx->compressed_res_queue);
863         free_messages(ctx->msgs, ctx->num_messages);
864         FREE(ctx->cur_chunk_tab);
865 }
866
867 static int
868 main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx)
869 {
870         /* Pre-allocate all the buffers that will be needed to do the chunk
871          * compression. */
872         ctx->msgs = allocate_messages(ctx->num_messages);
873         if (!ctx->msgs)
874                 return WIMLIB_ERR_NOMEM;
875
876         /* Initially, all the messages are available to use. */
877         INIT_LIST_HEAD(&ctx->available_msgs);
878         for (size_t i = 0; i < ctx->num_messages; i++)
879                 list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs);
880
881         /* outstanding_streams is the list of streams that currently have had
882          * chunks sent off for compression.
883          *
884          * The first stream in outstanding_streams is the stream that is
885          * currently being written.
886          *
887          * The last stream in outstanding_streams is the stream that is
888          * currently being read and having chunks fed to the compressor threads.
889          * */
890         INIT_LIST_HEAD(&ctx->outstanding_streams);
891         ctx->num_outstanding_messages = 0;
892
893         ctx->next_msg = NULL;
894
895         /* Resources that don't need any chunks compressed are added to this
896          * list and written directly by the main thread. */
897         INIT_LIST_HEAD(&ctx->serial_streams);
898
899         ctx->cur_chunk_tab = NULL;
900
901         return 0;
902 }
903
904 static int
905 receive_compressed_chunks(struct main_writer_thread_ctx *ctx)
906 {
907         struct message *msg;
908         struct wim_lookup_table_entry *cur_lte;
909         int ret;
910
911         wimlib_assert(!list_empty(&ctx->outstanding_streams));
912         wimlib_assert(ctx->num_outstanding_messages != 0);
913
914         cur_lte = container_of(ctx->outstanding_streams.next,
915                                struct wim_lookup_table_entry,
916                                being_compressed_list);
917
918         /* Get the next message from the queue and process it.
919          * The message will contain 1 or more data chunks that have been
920          * compressed. */
921         msg = shared_queue_get(ctx->compressed_res_queue);
922         msg->complete = true;
923         --ctx->num_outstanding_messages;
924
925         /* Is this the next chunk in the current resource?  If it's not
926          * (i.e., an earlier chunk in a same or different resource
927          * hasn't been compressed yet), do nothing, and keep this
928          * message around until all earlier chunks are received.
929          *
930          * Otherwise, write all the chunks we can. */
931         while (cur_lte != NULL &&
932                !list_empty(&cur_lte->msg_list)
933                && (msg = container_of(cur_lte->msg_list.next,
934                                       struct message,
935                                       list))->complete)
936         {
937                 list_move(&msg->list, &ctx->available_msgs);
938                 if (msg->begin_chunk == 0) {
939                         /* This is the first set of chunks.  Leave space
940                          * for the chunk table in the output file. */
941                         off_t cur_offset = filedes_offset(ctx->out_fd);
942                         if (cur_offset == -1)
943                                 return WIMLIB_ERR_WRITE;
944                         ret = begin_wim_resource_chunk_tab(cur_lte,
945                                                            ctx->out_fd,
946                                                            cur_offset,
947                                                            &ctx->cur_chunk_tab);
948                         if (ret)
949                                 return ret;
950                 }
951
952                 /* Write the compressed chunks from the message. */
953                 ret = write_wim_chunks(msg, ctx->out_fd, ctx->cur_chunk_tab);
954                 if (ret)
955                         return ret;
956
957                 /* Was this the last chunk of the stream?  If so, finish
958                  * it. */
959                 if (list_empty(&cur_lte->msg_list) &&
960                     msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
961                 {
962                         u64 res_csize;
963                         off_t offset;
964
965                         ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
966                                                             ctx->out_fd,
967                                                             &res_csize);
968                         if (ret)
969                                 return ret;
970
971                         list_del(&cur_lte->being_compressed_list);
972
973                         /* Grab the offset of this stream in the output file
974                          * from the chunk table before we free it. */
975                         offset = ctx->cur_chunk_tab->file_offset;
976
977                         FREE(ctx->cur_chunk_tab);
978                         ctx->cur_chunk_tab = NULL;
979
980                         if (res_csize >= wim_resource_size(cur_lte)) {
981                                 /* Oops!  We compressed the resource to
982                                  * larger than the original size.  Write
983                                  * the resource uncompressed instead. */
984                                 DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
985                                       "writing uncompressed instead",
986                                       wim_resource_size(cur_lte), res_csize);
987                                 ret = seek_and_truncate(ctx->out_fd, offset);
988                                 if (ret)
989                                         return ret;
990                                 ret = write_wim_resource(cur_lte,
991                                                          ctx->out_fd,
992                                                          WIMLIB_COMPRESSION_TYPE_NONE,
993                                                          &cur_lte->output_resource_entry,
994                                                          ctx->write_resource_flags);
995                                 if (ret)
996                                         return ret;
997                         } else {
998                                 cur_lte->output_resource_entry.size =
999                                         res_csize;
1000
1001                                 cur_lte->output_resource_entry.original_size =
1002                                         cur_lte->resource_entry.original_size;
1003
1004                                 cur_lte->output_resource_entry.offset =
1005                                         offset;
1006
1007                                 cur_lte->output_resource_entry.flags =
1008                                         cur_lte->resource_entry.flags |
1009                                                 WIM_RESHDR_FLAG_COMPRESSED;
1010                         }
1011
1012                         do_write_streams_progress(ctx->progress,
1013                                                   ctx->progress_func,
1014                                                   wim_resource_size(cur_lte));
1015
1016                         /* Since we just finished writing a stream, write any
1017                          * streams that have been added to the serial_streams
1018                          * list for direct writing by the main thread (e.g.
1019                          * resources that don't need to be compressed because
1020                          * the desired compression type is the same as the
1021                          * previous compression type). */
1022                         if (!list_empty(&ctx->serial_streams)) {
1023                                 ret = do_write_stream_list_serial(&ctx->serial_streams,
1024                                                                   ctx->lookup_table,
1025                                                                   ctx->out_fd,
1026                                                                   ctx->out_ctype,
1027                                                                   ctx->write_resource_flags,
1028                                                                   ctx->progress_func,
1029                                                                   ctx->progress);
1030                                 if (ret)
1031                                         return ret;
1032                         }
1033
1034                         /* Advance to the next stream to write. */
1035                         if (list_empty(&ctx->outstanding_streams)) {
1036                                 cur_lte = NULL;
1037                         } else {
1038                                 cur_lte = container_of(ctx->outstanding_streams.next,
1039                                                        struct wim_lookup_table_entry,
1040                                                        being_compressed_list);
1041                         }
1042                 }
1043         }
1044         return 0;
1045 }
1046
1047 /* Called when the main thread has read a new chunk of data. */
1048 static int
1049 main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
1050 {
1051         struct main_writer_thread_ctx *ctx = _ctx;
1052         int ret;
1053         struct message *next_msg;
1054         u64 next_chunk_in_msg;
1055
1056         /* Update SHA1 message digest for the stream currently being read by the
1057          * main thread. */
1058         sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
1059
1060         /* We send chunks of data to the compressor chunks in batches which we
1061          * refer to as "messages".  @next_msg is the message that is currently
1062          * being prepared to send off.  If it is NULL, that indicates that we
1063          * need to start a new message. */
1064         next_msg = ctx->next_msg;
1065         if (!next_msg) {
1066                 /* We need to start a new message.  First check to see if there
1067                  * is a message available in the list of available messages.  If
1068                  * so, we can just take one.  If not, all the messages (there is
1069                  * a fixed number of them, proportional to the number of
1070                  * threads) have been sent off to the compressor threads, so we
1071                  * receive messages from the compressor threads containing
1072                  * compressed chunks of data.
1073                  *
1074                  * We may need to receive multiple messages before one is
1075                  * actually available to use because messages received that are
1076                  * *not* for the very next set of chunks to compress must be
1077                  * buffered until it's time to write those chunks. */
1078                 while (list_empty(&ctx->available_msgs)) {
1079                         ret = receive_compressed_chunks(ctx);
1080                         if (ret)
1081                                 return ret;
1082                 }
1083
1084                 next_msg = container_of(ctx->available_msgs.next,
1085                                         struct message, list);
1086                 list_del(&next_msg->list);
1087                 next_msg->complete = false;
1088                 next_msg->begin_chunk = ctx->next_chunk;
1089                 next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
1090                                            ctx->next_num_chunks - ctx->next_chunk);
1091                 ctx->next_msg = next_msg;
1092         }
1093
1094         /* Fill in the next chunk to compress */
1095         next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
1096
1097         next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
1098         memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
1099                chunk, chunk_size);
1100         ctx->next_chunk++;
1101         if (++next_chunk_in_msg == next_msg->num_chunks) {
1102                 /* Send off an array of chunks to compress */
1103                 list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
1104                 shared_queue_put(ctx->res_to_compress_queue, next_msg);
1105                 ++ctx->num_outstanding_messages;
1106                 ctx->next_msg = NULL;
1107         }
1108         return 0;
1109 }
1110
1111 static int
1112 main_writer_thread_finish(void *_ctx)
1113 {
1114         struct main_writer_thread_ctx *ctx = _ctx;
1115         int ret;
1116         while (ctx->num_outstanding_messages != 0) {
1117                 ret = receive_compressed_chunks(ctx);
1118                 if (ret)
1119                         return ret;
1120         }
1121         wimlib_assert(list_empty(&ctx->outstanding_streams));
1122         return do_write_stream_list_serial(&ctx->serial_streams,
1123                                            ctx->lookup_table,
1124                                            ctx->out_fd,
1125                                            ctx->out_ctype,
1126                                            ctx->write_resource_flags,
1127                                            ctx->progress_func,
1128                                            ctx->progress);
1129 }
1130
1131 static int
1132 submit_stream_for_compression(struct wim_lookup_table_entry *lte,
1133                               struct main_writer_thread_ctx *ctx)
1134 {
1135         int ret;
1136
1137         /* Read the entire stream @lte, feeding its data chunks to the
1138          * compressor threads.  Also SHA1-sum the stream; this is required in
1139          * the case that @lte is unhashed, and a nice additional verification
1140          * when @lte is already hashed. */
1141         sha1_init(&ctx->next_sha_ctx);
1142         ctx->next_chunk = 0;
1143         ctx->next_num_chunks = wim_resource_chunks(lte);
1144         ctx->next_lte = lte;
1145         INIT_LIST_HEAD(&lte->msg_list);
1146         list_add_tail(&lte->being_compressed_list, &ctx->outstanding_streams);
1147         ret = read_resource_prefix(lte, wim_resource_size(lte),
1148                                    main_writer_thread_cb, ctx, 0);
1149         if (ret == 0) {
1150                 wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
1151                 ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
1152         }
1153         return ret;
1154 }
1155
1156 static int
1157 main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx)
1158 {
1159         struct main_writer_thread_ctx *ctx = _ctx;
1160         int ret;
1161
1162         if (wim_resource_size(lte) < 1000 ||
1163             ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
1164             (lte->resource_location == RESOURCE_IN_WIM &&
1165              !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
1166              wimlib_get_compression_type(lte->wim) == ctx->out_ctype))
1167         {
1168                 /* Stream is too small or isn't being compressed.  Process it by
1169                  * the main thread when we have a chance.  We can't necessarily
1170                  * process it right here, as the main thread could be in the
1171                  * middle of writing a different stream. */
1172                 list_add_tail(&lte->write_streams_list, &ctx->serial_streams);
1173                 lte->deferred = 1;
1174                 ret = 0;
1175         } else {
1176                 ret = submit_stream_for_compression(lte, ctx);
1177         }
1178         lte->no_progress = 1;
1179         return ret;
1180 }
1181
1182 static long
1183 get_default_num_threads()
1184 {
1185 #ifdef __WIN32__
1186         return win32_get_number_of_processors();
1187 #else
1188         return sysconf(_SC_NPROCESSORS_ONLN);
1189 #endif
1190 }
1191
1192 /* Equivalent to write_stream_list_serial(), except this takes a @num_threads
1193  * parameter and will perform compression using that many threads.  Falls
1194  * back to write_stream_list_serial() on certain errors, such as a failure to
1195  * create the number of threads requested.
1196  *
1197  * High level description of the algorithm for writing compressed streams in
1198  * parallel:  We perform compression on chunks of size WIM_CHUNK_SIZE bytes
1199  * rather than on full files.  The currently executing thread becomes the main
1200  * thread and is entirely in charge of reading the data to compress (which may
1201  * be in any location understood by the resource code--- such as in an external
1202  * file being captured, or in another WIM file from which an image is being
1203  * exported) and actually writing the compressed data to the output file.
1204  * Additional threads are "compressor threads" and all execute the
1205  * compressor_thread_proc, where they repeatedly retrieve buffers of data from
1206  * the main thread, compress them, and hand them back to the main thread.
1207  *
1208  * Certain streams, such as streams that do not need to be compressed (e.g.
1209  * input compression type same as output compression type) or streams of very
1210  * small size are placed in a list (main_writer_thread_ctx.serial_list) and
1211  * handled entirely by the main thread at an appropriate time.
1212  *
1213  * At any given point in time, multiple streams may be having chunks compressed
1214  * concurrently.  The stream that the main thread is currently *reading* may be
1215  * later in the list that the stream that the main thread is currently
1216  * *writing*.
1217  */
1218 static int
1219 write_stream_list_parallel(struct list_head *stream_list,
1220                            struct wim_lookup_table *lookup_table,
1221                            filedes_t out_fd,
1222                            int out_ctype,
1223                            int write_resource_flags,
1224                            wimlib_progress_func_t progress_func,
1225                            union wimlib_progress_info *progress,
1226                            unsigned num_threads)
1227 {
1228         int ret;
1229         struct shared_queue res_to_compress_queue;
1230         struct shared_queue compressed_res_queue;
1231         pthread_t *compressor_threads = NULL;
1232
1233         if (num_threads == 0) {
1234                 long nthreads = get_default_num_threads();
1235                 if (nthreads < 1 || nthreads > UINT_MAX) {
1236                         WARNING("Could not determine number of processors! Assuming 1");
1237                         goto out_serial;
1238                 } else if (nthreads == 1) {
1239                         goto out_serial_quiet;
1240                 } else {
1241                         num_threads = nthreads;
1242                 }
1243         }
1244
1245         DEBUG("Writing stream list (parallel version, num_threads=%u)",
1246               num_threads);
1247
1248         progress->write_streams.num_threads = num_threads;
1249
1250         static const size_t MESSAGES_PER_THREAD = 2;
1251         size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
1252
1253         DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
1254
1255         ret = shared_queue_init(&res_to_compress_queue, queue_size);
1256         if (ret)
1257                 goto out_serial;
1258
1259         ret = shared_queue_init(&compressed_res_queue, queue_size);
1260         if (ret)
1261                 goto out_destroy_res_to_compress_queue;
1262
1263         struct compressor_thread_params params;
1264         params.res_to_compress_queue = &res_to_compress_queue;
1265         params.compressed_res_queue = &compressed_res_queue;
1266         params.compress = get_compress_func(out_ctype);
1267
1268         compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
1269         if (!compressor_threads) {
1270                 ret = WIMLIB_ERR_NOMEM;
1271                 goto out_destroy_compressed_res_queue;
1272         }
1273
1274         for (unsigned i = 0; i < num_threads; i++) {
1275                 DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
1276                 ret = pthread_create(&compressor_threads[i], NULL,
1277                                      compressor_thread_proc, &params);
1278                 if (ret != 0) {
1279                         ret = -1;
1280                         ERROR_WITH_ERRNO("Failed to create compressor "
1281                                          "thread %u of %u",
1282                                          i + 1, num_threads);
1283                         num_threads = i;
1284                         goto out_join;
1285                 }
1286         }
1287
1288         if (progress_func)
1289                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
1290
1291         struct main_writer_thread_ctx ctx;
1292         ctx.stream_list           = stream_list;
1293         ctx.lookup_table          = lookup_table;
1294         ctx.out_fd                = out_fd;
1295         ctx.out_ctype             = out_ctype;
1296         ctx.res_to_compress_queue = &res_to_compress_queue;
1297         ctx.compressed_res_queue  = &compressed_res_queue;
1298         ctx.num_messages          = queue_size;
1299         ctx.write_resource_flags  = write_resource_flags;
1300         ctx.progress_func         = progress_func;
1301         ctx.progress              = progress;
1302         ret = main_writer_thread_init_ctx(&ctx);
1303         if (ret)
1304                 goto out_join;
1305         ret = do_write_stream_list(stream_list, lookup_table,
1306                                    main_thread_process_next_stream,
1307                                    &ctx, progress_func, progress);
1308         if (ret)
1309                 goto out_destroy_ctx;
1310
1311         /* The main thread has finished reading all streams that are going to be
1312          * compressed in parallel, and it now needs to wait for all remaining
1313          * chunks to be compressed so that the remaining streams can actually be
1314          * written to the output file.  Furthermore, any remaining streams that
1315          * had processing deferred to the main thread need to be handled.  These
1316          * tasks are done by the main_writer_thread_finish() function. */
1317         ret = main_writer_thread_finish(&ctx);
1318 out_destroy_ctx:
1319         main_writer_thread_destroy_ctx(&ctx);
1320 out_join:
1321         for (unsigned i = 0; i < num_threads; i++)
1322                 shared_queue_put(&res_to_compress_queue, NULL);
1323
1324         for (unsigned i = 0; i < num_threads; i++) {
1325                 if (pthread_join(compressor_threads[i], NULL)) {
1326                         WARNING_WITH_ERRNO("Failed to join compressor "
1327                                            "thread %u of %u",
1328                                            i + 1, num_threads);
1329                 }
1330         }
1331         FREE(compressor_threads);
1332 out_destroy_compressed_res_queue:
1333         shared_queue_destroy(&compressed_res_queue);
1334 out_destroy_res_to_compress_queue:
1335         shared_queue_destroy(&res_to_compress_queue);
1336         if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
1337                 return ret;
1338 out_serial:
1339         WARNING("Falling back to single-threaded compression");
1340 out_serial_quiet:
1341         return write_stream_list_serial(stream_list,
1342                                         lookup_table,
1343                                         out_fd,
1344                                         out_ctype,
1345                                         write_resource_flags,
1346                                         progress_func,
1347                                         progress);
1348
1349 }
1350 #endif
1351
1352 /*
1353  * Write a list of streams to a WIM (@out_fd) using the compression type
1354  * @out_ctype and up to @num_threads compressor threads.
1355  */
1356 static int
1357 write_stream_list(struct list_head *stream_list,
1358                   struct wim_lookup_table *lookup_table,
1359                   filedes_t out_fd, int out_ctype, int write_flags,
1360                   unsigned num_threads, wimlib_progress_func_t progress_func)
1361 {
1362         struct wim_lookup_table_entry *lte;
1363         size_t num_streams = 0;
1364         u64 total_bytes = 0;
1365         u64 total_compression_bytes = 0;
1366         union wimlib_progress_info progress;
1367         int ret;
1368         int write_resource_flags;
1369
1370         if (list_empty(stream_list))
1371                 return 0;
1372
1373         write_resource_flags = write_flags_to_resource_flags(write_flags);
1374
1375         /* Calculate the total size of the streams to be written.  Note: this
1376          * will be the uncompressed size, as we may not know the compressed size
1377          * yet, and also this will assume that every unhashed stream will be
1378          * written (which will not necessarily be the case). */
1379         list_for_each_entry(lte, stream_list, write_streams_list) {
1380                 num_streams++;
1381                 total_bytes += wim_resource_size(lte);
1382                 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
1383                        && (wim_resource_compression_type(lte) != out_ctype ||
1384                            (write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS)))
1385                 {
1386                         total_compression_bytes += wim_resource_size(lte);
1387                 }
1388         }
1389         progress.write_streams.total_bytes       = total_bytes;
1390         progress.write_streams.total_streams     = num_streams;
1391         progress.write_streams.completed_bytes   = 0;
1392         progress.write_streams.completed_streams = 0;
1393         progress.write_streams.num_threads       = num_threads;
1394         progress.write_streams.compression_type  = out_ctype;
1395         progress.write_streams._private          = 0;
1396
1397 #ifdef ENABLE_MULTITHREADED_COMPRESSION
1398         if (total_compression_bytes >= 1000000 && num_threads != 1)
1399                 ret = write_stream_list_parallel(stream_list,
1400                                                  lookup_table,
1401                                                  out_fd,
1402                                                  out_ctype,
1403                                                  write_resource_flags,
1404                                                  progress_func,
1405                                                  &progress,
1406                                                  num_threads);
1407         else
1408 #endif
1409                 ret = write_stream_list_serial(stream_list,
1410                                                lookup_table,
1411                                                out_fd,
1412                                                out_ctype,
1413                                                write_resource_flags,
1414                                                progress_func,
1415                                                &progress);
1416         return ret;
1417 }
1418
1419 struct stream_size_table {
1420         struct hlist_head *array;
1421         size_t num_entries;
1422         size_t capacity;
1423 };
1424
1425 static int
1426 init_stream_size_table(struct stream_size_table *tab, size_t capacity)
1427 {
1428         tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1429         if (!tab->array)
1430                 return WIMLIB_ERR_NOMEM;
1431         tab->num_entries = 0;
1432         tab->capacity = capacity;
1433         return 0;
1434 }
1435
1436 static void
1437 destroy_stream_size_table(struct stream_size_table *tab)
1438 {
1439         FREE(tab->array);
1440 }
1441
1442 static int
1443 stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
1444 {
1445         struct stream_size_table *tab = _tab;
1446         size_t pos;
1447         struct wim_lookup_table_entry *same_size_lte;
1448         struct hlist_node *tmp;
1449
1450         pos = hash_u64(wim_resource_size(lte)) % tab->capacity;
1451         lte->unique_size = 1;
1452         hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
1453                 if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) {
1454                         lte->unique_size = 0;
1455                         same_size_lte->unique_size = 0;
1456                         break;
1457                 }
1458         }
1459
1460         hlist_add_head(&lte->hash_list_2, &tab->array[pos]);
1461         tab->num_entries++;
1462         return 0;
1463 }
1464
1465
1466 struct lte_overwrite_prepare_args {
1467         WIMStruct *wim;
1468         off_t end_offset;
1469         struct list_head stream_list;
1470         struct stream_size_table stream_size_tab;
1471 };
1472
1473 /* First phase of preparing streams for an in-place overwrite.  This is called
1474  * on all streams, both hashed and unhashed, except the metadata resources. */
1475 static int
1476 lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args)
1477 {
1478         struct lte_overwrite_prepare_args *args = _args;
1479
1480         wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA));
1481         if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim)
1482                 list_add_tail(&lte->write_streams_list, &args->stream_list);
1483         lte->out_refcnt = lte->refcnt;
1484         stream_size_table_insert(lte, &args->stream_size_tab);
1485         return 0;
1486 }
1487
1488 /* Second phase of preparing streams for an in-place overwrite.  This is called
1489  * on existing metadata resources and hashed streams, but not unhashed streams.
1490  *
1491  * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so
1492  * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as
1493  * the latter uses lte->hash_list_2, while the former expects to set
1494  * lte->output_resource_entry. */
1495 static int
1496 lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args)
1497 {
1498         struct lte_overwrite_prepare_args *args = _args;
1499
1500         if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) {
1501                 /* We can't do an in place overwrite on the WIM if there are
1502                  * streams after the XML data. */
1503                 if (lte->resource_entry.offset +
1504                     lte->resource_entry.size > args->end_offset)
1505                 {
1506                 #ifdef ENABLE_ERROR_MESSAGES
1507                         ERROR("The following resource is after the XML data:");
1508                         print_lookup_table_entry(lte, stderr);
1509                 #endif
1510                         return WIMLIB_ERR_RESOURCE_ORDER;
1511                 }
1512                 copy_resource_entry(&lte->output_resource_entry,
1513                                     &lte->resource_entry);
1514         }
1515         return 0;
1516 }
1517
1518 /* Given a WIM that we are going to overwrite in place with zero or more
1519  * additional streams added, construct a list the list of new unique streams
1520  * ('struct wim_lookup_table_entry's) that must be written, plus any unhashed
1521  * streams that need to be added but may be identical to other hashed or
1522  * unhashed streams.  These unhashed streams are checksummed while the streams
1523  * are being written.  To aid this process, the member @unique_size is set to 1
1524  * on streams that have a unique size and therefore must be written.
1525  *
1526  * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
1527  * indicate the number of times the stream is referenced in only the streams
1528  * that are being written; this may still be adjusted later when unhashed
1529  * streams are being resolved.
1530  */
1531 static int
1532 prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset,
1533                               struct list_head *stream_list)
1534 {
1535         int ret;
1536         struct lte_overwrite_prepare_args args;
1537         unsigned i;
1538
1539         args.wim = wim;
1540         args.end_offset = end_offset;
1541         ret = init_stream_size_table(&args.stream_size_tab,
1542                                      wim->lookup_table->capacity);
1543         if (ret)
1544                 return ret;
1545
1546         INIT_LIST_HEAD(&args.stream_list);
1547         for (i = 0; i < wim->hdr.image_count; i++) {
1548                 struct wim_image_metadata *imd;
1549                 struct wim_lookup_table_entry *lte;
1550
1551                 imd = wim->image_metadata[i];
1552                 image_for_each_unhashed_stream(lte, imd)
1553                         lte_overwrite_prepare(lte, &args);
1554         }
1555         for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args);
1556         list_transfer(&args.stream_list, stream_list);
1557
1558         for (i = 0; i < wim->hdr.image_count; i++) {
1559                 ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte,
1560                                               &args);
1561                 if (ret)
1562                         goto out_destroy_stream_size_table;
1563         }
1564         ret = for_lookup_table_entry(wim->lookup_table,
1565                                      lte_overwrite_prepare_2, &args);
1566 out_destroy_stream_size_table:
1567         destroy_stream_size_table(&args.stream_size_tab);
1568         return ret;
1569 }
1570
1571
1572 struct find_streams_ctx {
1573         struct list_head stream_list;
1574         struct stream_size_table stream_size_tab;
1575 };
1576
1577 static void
1578 inode_find_streams_to_write(struct wim_inode *inode,
1579                             struct wim_lookup_table *table,
1580                             struct list_head *stream_list,
1581                             struct stream_size_table *tab)
1582 {
1583         struct wim_lookup_table_entry *lte;
1584         for (unsigned i = 0; i <= inode->i_num_ads; i++) {
1585                 lte = inode_stream_lte(inode, i, table);
1586                 if (lte) {
1587                         if (lte->out_refcnt == 0) {
1588                                 if (lte->unhashed)
1589                                         stream_size_table_insert(lte, tab);
1590                                 list_add_tail(&lte->write_streams_list, stream_list);
1591                         }
1592                         lte->out_refcnt += inode->i_nlink;
1593                 }
1594         }
1595 }
1596
1597 static int
1598 image_find_streams_to_write(WIMStruct *w)
1599 {
1600         struct find_streams_ctx *ctx;
1601         struct wim_image_metadata *imd;
1602         struct wim_inode *inode;
1603         struct wim_lookup_table_entry *lte;
1604
1605         ctx = w->private;
1606         imd = wim_get_current_image_metadata(w);
1607
1608         image_for_each_unhashed_stream(lte, imd)
1609                 lte->out_refcnt = 0;
1610
1611         /* Go through this image's inodes to find any streams that have not been
1612          * found yet. */
1613         image_for_each_inode(inode, imd) {
1614                 inode_find_streams_to_write(inode, w->lookup_table,
1615                                             &ctx->stream_list,
1616                                             &ctx->stream_size_tab);
1617         }
1618         return 0;
1619 }
1620
1621 /* Given a WIM that from which one or all of the images is being written, build
1622  * the list of unique streams ('struct wim_lookup_table_entry's) that must be
1623  * written, plus any unhashed streams that need to be written but may be
1624  * identical to other hashed or unhashed streams being written.  These unhashed
1625  * streams are checksummed while the streams are being written.  To aid this
1626  * process, the member @unique_size is set to 1 on streams that have a unique
1627  * size and therefore must be written.
1628  *
1629  * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
1630  * indicate the number of times the stream is referenced in only the streams
1631  * that are being written; this may still be adjusted later when unhashed
1632  * streams are being resolved.
1633  */
1634 static int
1635 prepare_stream_list(WIMStruct *wim, int image, struct list_head *stream_list)
1636 {
1637         int ret;
1638         struct find_streams_ctx ctx;
1639
1640         for_lookup_table_entry(wim->lookup_table, lte_zero_out_refcnt, NULL);
1641         ret = init_stream_size_table(&ctx.stream_size_tab,
1642                                      wim->lookup_table->capacity);
1643         if (ret)
1644                 return ret;
1645         for_lookup_table_entry(wim->lookup_table, stream_size_table_insert,
1646                                &ctx.stream_size_tab);
1647         INIT_LIST_HEAD(&ctx.stream_list);
1648         wim->private = &ctx;
1649         ret = for_image(wim, image, image_find_streams_to_write);
1650         destroy_stream_size_table(&ctx.stream_size_tab);
1651         if (ret == 0)
1652                 list_transfer(&ctx.stream_list, stream_list);
1653         return ret;
1654 }
1655
1656 /* Writes the streams for the specified @image in @wim to @wim->out_fd.
1657  */
1658 static int
1659 write_wim_streams(WIMStruct *wim, int image, int write_flags,
1660                   unsigned num_threads,
1661                   wimlib_progress_func_t progress_func)
1662 {
1663         int ret;
1664         struct list_head stream_list;
1665
1666         ret = prepare_stream_list(wim, image, &stream_list);
1667         if (ret)
1668                 return ret;
1669         return write_stream_list(&stream_list,
1670                                  wim->lookup_table,
1671                                  wim->out_fd,
1672                                  wimlib_get_compression_type(wim),
1673                                  write_flags,
1674                                  num_threads,
1675                                  progress_func);
1676 }
1677
1678 /*
1679  * Finish writing a WIM file: write the lookup table, xml data, and integrity
1680  * table (optional), then overwrite the WIM header.
1681  *
1682  * write_flags is a bitwise OR of the following:
1683  *
1684  *      (public)  WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
1685  *              Include an integrity table.
1686  *
1687  *      (public)  WIMLIB_WRITE_FLAG_SHOW_PROGRESS:
1688  *              Show progress information when (if) writing the integrity table.
1689  *
1690  *      (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE:
1691  *              Don't write the lookup table.
1692  *
1693  *      (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE:
1694  *              When (if) writing the integrity table, re-use entries from the
1695  *              existing integrity table, if possible.
1696  *
1697  *      (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
1698  *              After writing the XML data but before writing the integrity
1699  *              table, write a temporary WIM header and flush the stream so that
1700  *              the WIM is less likely to become corrupted upon abrupt program
1701  *              termination.
1702  *
1703  *      (private) WIMLIB_WRITE_FLAG_FSYNC:
1704  *              fsync() the output file before closing it.
1705  *
1706  */
1707 int
1708 finish_write(WIMStruct *w, int image, int write_flags,
1709              wimlib_progress_func_t progress_func)
1710 {
1711         int ret;
1712         struct wim_header hdr;
1713
1714         /* @hdr will be the header for the new WIM.  First copy all the data
1715          * from the header in the WIMStruct; then set all the fields that may
1716          * have changed, including the resource entries, boot index, and image
1717          * count.  */
1718         memcpy(&hdr, &w->hdr, sizeof(struct wim_header));
1719
1720         /* Set image count and boot index correctly for single image writes */
1721         if (image != WIMLIB_ALL_IMAGES) {
1722                 hdr.image_count = 1;
1723                 if (hdr.boot_idx == image)
1724                         hdr.boot_idx = 1;
1725                 else
1726                         hdr.boot_idx = 0;
1727         }
1728
1729         /* In the WIM header, there is room for the resource entry for a
1730          * metadata resource labeled as the "boot metadata".  This entry should
1731          * be zeroed out if there is no bootable image (boot_idx 0).  Otherwise,
1732          * it should be a copy of the resource entry for the image that is
1733          * marked as bootable.  This is not well documented...  */
1734         if (hdr.boot_idx == 0) {
1735                 zero_resource_entry(&hdr.boot_metadata_res_entry);
1736         } else {
1737                 copy_resource_entry(&hdr.boot_metadata_res_entry,
1738                             &w->image_metadata[ hdr.boot_idx- 1
1739                                         ]->metadata_lte->output_resource_entry);
1740         }
1741
1742         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
1743                 ret = write_lookup_table(w, image, &hdr.lookup_table_res_entry);
1744                 if (ret)
1745                         goto out_close_wim;
1746         }
1747
1748         ret = write_xml_data(w->wim_info, image, w->out_fd,
1749                              (write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE) ?
1750                               wim_info_get_total_bytes(w->wim_info) : 0,
1751                              &hdr.xml_res_entry);
1752         if (ret)
1753                 goto out_close_wim;
1754
1755         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
1756                 if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
1757                         struct wim_header checkpoint_hdr;
1758                         memcpy(&checkpoint_hdr, &hdr, sizeof(struct wim_header));
1759                         zero_resource_entry(&checkpoint_hdr.integrity);
1760                         ret = write_header(&checkpoint_hdr, w->out_fd);
1761                         if (ret)
1762                                 goto out_close_wim;
1763                 }
1764
1765                 off_t old_lookup_table_end;
1766                 off_t new_lookup_table_end;
1767                 if (write_flags & WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE) {
1768                         old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
1769                                                w->hdr.lookup_table_res_entry.size;
1770                 } else {
1771                         old_lookup_table_end = 0;
1772                 }
1773                 new_lookup_table_end = hdr.lookup_table_res_entry.offset +
1774                                        hdr.lookup_table_res_entry.size;
1775
1776                 ret = write_integrity_table(w->out_fd,
1777                                             &hdr.integrity,
1778                                             new_lookup_table_end,
1779                                             old_lookup_table_end,
1780                                             progress_func);
1781                 if (ret)
1782                         goto out_close_wim;
1783         } else {
1784                 zero_resource_entry(&hdr.integrity);
1785         }
1786
1787         ret = write_header(&hdr, w->out_fd);
1788         if (ret)
1789                 goto out_close_wim;
1790
1791         if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
1792                 if (fsync(w->out_fd)) {
1793                         ERROR_WITH_ERRNO("Error syncing data to WIM file");
1794                         ret = WIMLIB_ERR_WRITE;
1795                 }
1796         }
1797 out_close_wim:
1798         if (close(w->out_fd)) {
1799                 ERROR_WITH_ERRNO("Failed to close the output WIM file");
1800                 if (ret == 0)
1801                         ret = WIMLIB_ERR_WRITE;
1802         }
1803         w->out_fd = INVALID_FILEDES;
1804         return ret;
1805 }
1806
1807 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
1808 int
1809 lock_wim(WIMStruct *w, filedes_t fd)
1810 {
1811         int ret = 0;
1812         if (fd != INVALID_FILEDES && !w->wim_locked) {
1813                 ret = flock(fd, LOCK_EX | LOCK_NB);
1814                 if (ret != 0) {
1815                         if (errno == EWOULDBLOCK) {
1816                                 ERROR("`%"TS"' is already being modified or has been "
1817                                       "mounted read-write\n"
1818                                       "        by another process!", w->filename);
1819                                 ret = WIMLIB_ERR_ALREADY_LOCKED;
1820                         } else {
1821                                 WARNING_WITH_ERRNO("Failed to lock `%"TS"'",
1822                                                    w->filename);
1823                                 ret = 0;
1824                         }
1825                 } else {
1826                         w->wim_locked = 1;
1827                 }
1828         }
1829         return ret;
1830 }
1831 #endif
1832
1833 static int
1834 open_wim_writable(WIMStruct *w, const tchar *path, int open_flags)
1835 {
1836         w->out_fd = open(path, open_flags, 0644);
1837         if (w->out_fd == INVALID_FILEDES) {
1838                 ERROR_WITH_ERRNO("Failed to open `%"TS"' for writing", path);
1839                 return WIMLIB_ERR_OPEN;
1840         }
1841         return 0;
1842 }
1843
1844
1845 void
1846 close_wim_writable(WIMStruct *w)
1847 {
1848         if (w->out_fd != INVALID_FILEDES) {
1849                 if (close(w->out_fd))
1850                         WARNING_WITH_ERRNO("Failed to close output WIM");
1851                 w->out_fd = INVALID_FILEDES;
1852         }
1853 }
1854
1855 /* Open file stream and write dummy header for WIM. */
1856 int
1857 begin_write(WIMStruct *w, const tchar *path, int write_flags)
1858 {
1859         int ret;
1860         int open_flags = O_TRUNC | O_CREAT;
1861         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
1862                 open_flags |= O_RDWR;
1863         else
1864                 open_flags |= O_WRONLY;
1865         ret = open_wim_writable(w, path, open_flags);
1866         if (ret)
1867                 return ret;
1868         /* Write dummy header. It will be overwritten later. */
1869         ret = write_header(&w->hdr, w->out_fd);
1870         if (ret)
1871                 return ret;
1872         if (lseek(w->out_fd, WIM_HEADER_DISK_SIZE, SEEK_SET) == -1) {
1873                 ERROR_WITH_ERRNO("Failed to seek to end of WIM header");
1874                 return WIMLIB_ERR_WRITE;
1875         }
1876         return 0;
1877 }
1878
1879 /* Writes a stand-alone WIM to a file.  */
1880 WIMLIBAPI int
1881 wimlib_write(WIMStruct *w, const tchar *path,
1882              int image, int write_flags, unsigned num_threads,
1883              wimlib_progress_func_t progress_func)
1884 {
1885         int ret;
1886
1887         if (!path)
1888                 return WIMLIB_ERR_INVALID_PARAM;
1889
1890         write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
1891
1892         if (image != WIMLIB_ALL_IMAGES &&
1893              (image < 1 || image > w->hdr.image_count))
1894                 return WIMLIB_ERR_INVALID_IMAGE;
1895
1896         if (w->hdr.total_parts != 1) {
1897                 ERROR("Cannot call wimlib_write() on part of a split WIM");
1898                 return WIMLIB_ERR_SPLIT_UNSUPPORTED;
1899         }
1900
1901         ret = begin_write(w, path, write_flags);
1902         if (ret)
1903                 goto out_close_wim;
1904
1905         ret = write_wim_streams(w, image, write_flags, num_threads,
1906                                 progress_func);
1907         if (ret)
1908                 goto out_close_wim;
1909
1910         if (progress_func)
1911                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL);
1912
1913         ret = for_image(w, image, write_metadata_resource);
1914         if (ret)
1915                 goto out_close_wim;
1916
1917         if (progress_func)
1918                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL);
1919
1920         ret = finish_write(w, image, write_flags, progress_func);
1921         /* finish_write() closed the WIM for us */
1922         goto out;
1923 out_close_wim:
1924         close_wim_writable(w);
1925 out:
1926         DEBUG("wimlib_write(path=%"TS") = %d", path, ret);
1927         return ret;
1928 }
1929
1930 static bool
1931 any_images_modified(WIMStruct *w)
1932 {
1933         for (int i = 0; i < w->hdr.image_count; i++)
1934                 if (w->image_metadata[i]->modified)
1935                         return true;
1936         return false;
1937 }
1938
1939 /*
1940  * Overwrite a WIM, possibly appending streams to it.
1941  *
1942  * A WIM looks like (or is supposed to look like) the following:
1943  *
1944  *                   Header (212 bytes)
1945  *                   Streams and metadata resources (variable size)
1946  *                   Lookup table (variable size)
1947  *                   XML data (variable size)
1948  *                   Integrity table (optional) (variable size)
1949  *
1950  * If we are not adding any streams or metadata resources, the lookup table is
1951  * unchanged--- so we only need to overwrite the XML data, integrity table, and
1952  * header.  This operation is potentially unsafe if the program is abruptly
1953  * terminated while the XML data or integrity table are being overwritten, but
1954  * before the new header has been written.  To partially alleviate this problem,
1955  * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to
1956  * finish_write() to cause a temporary WIM header to be written after the XML
1957  * data has been written.  This may prevent the WIM from becoming corrupted if
1958  * the program is terminated while the integrity table is being calculated (but
1959  * no guarantees, due to write re-ordering...).
1960  *
1961  * If we are adding new streams or images (metadata resources), the lookup table
1962  * needs to be changed, and those streams need to be written.  In this case, we
1963  * try to perform a safe update of the WIM file by writing the streams *after*
1964  * the end of the previous WIM, then writing the new lookup table, XML data, and
1965  * (optionally) integrity table following the new streams.  This will produce a
1966  * layout like the following:
1967  *
1968  *                   Header (212 bytes)
1969  *                   (OLD) Streams and metadata resources (variable size)
1970  *                   (OLD) Lookup table (variable size)
1971  *                   (OLD) XML data (variable size)
1972  *                   (OLD) Integrity table (optional) (variable size)
1973  *                   (NEW) Streams and metadata resources (variable size)
1974  *                   (NEW) Lookup table (variable size)
1975  *                   (NEW) XML data (variable size)
1976  *                   (NEW) Integrity table (optional) (variable size)
1977  *
1978  * At all points, the WIM is valid as nothing points to the new data yet.  Then,
1979  * the header is overwritten to point to the new lookup table, XML data, and
1980  * integrity table, to produce the following layout:
1981  *
1982  *                   Header (212 bytes)
1983  *                   Streams and metadata resources (variable size)
1984  *                   Nothing (variable size)
1985  *                   More Streams and metadata resources (variable size)
1986  *                   Lookup table (variable size)
1987  *                   XML data (variable size)
1988  *                   Integrity table (optional) (variable size)
1989  *
1990  * This method allows an image to be appended to a large WIM very quickly, and
1991  * is is crash-safe except in the case of write re-ordering, but the
1992  * disadvantage is that a small hole is left in the WIM where the old lookup
1993  * table, xml data, and integrity table were.  (These usually only take up a
1994  * small amount of space compared to the streams, however.)
1995  */
1996 static int
1997 overwrite_wim_inplace(WIMStruct *w, int write_flags,
1998                       unsigned num_threads,
1999                       wimlib_progress_func_t progress_func)
2000 {
2001         int ret;
2002         struct list_head stream_list;
2003         off_t old_wim_end;
2004         u64 old_lookup_table_end, old_xml_begin, old_xml_end;
2005         int open_flags;
2006
2007         DEBUG("Overwriting `%"TS"' in-place", w->filename);
2008
2009         /* Make sure that the integrity table (if present) is after the XML
2010          * data, and that there are no stream resources, metadata resources, or
2011          * lookup tables after the XML data.  Otherwise, these data would be
2012          * overwritten. */
2013         old_xml_begin = w->hdr.xml_res_entry.offset;
2014         old_xml_end = old_xml_begin + w->hdr.xml_res_entry.size;
2015         old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
2016                                w->hdr.lookup_table_res_entry.size;
2017         if (w->hdr.integrity.offset != 0 && w->hdr.integrity.offset < old_xml_end) {
2018                 ERROR("Didn't expect the integrity table to be before the XML data");
2019                 return WIMLIB_ERR_RESOURCE_ORDER;
2020         }
2021
2022         if (old_lookup_table_end > old_xml_begin) {
2023                 ERROR("Didn't expect the lookup table to be after the XML data");
2024                 return WIMLIB_ERR_RESOURCE_ORDER;
2025         }
2026
2027         /* Set @old_wim_end, which indicates the point beyond which we don't
2028          * allow any file and metadata resources to appear without returning
2029          * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
2030          * overwrite these resources). */
2031         if (!w->deletion_occurred && !any_images_modified(w)) {
2032                 /* If no images have been modified and no images have been
2033                  * deleted, a new lookup table does not need to be written.  We
2034                  * shall write the new XML data and optional integrity table
2035                  * immediately after the lookup table.  Note that this may
2036                  * overwrite an existing integrity table. */
2037                 DEBUG("Skipping writing lookup table "
2038                       "(no images modified or deleted)");
2039                 old_wim_end = old_lookup_table_end;
2040                 write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE |
2041                                WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
2042         } else if (w->hdr.integrity.offset) {
2043                 /* Old WIM has an integrity table; begin writing new streams
2044                  * after it. */
2045                 old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size;
2046         } else {
2047                 /* No existing integrity table; begin writing new streams after
2048                  * the old XML data. */
2049                 old_wim_end = old_xml_end;
2050         }
2051
2052         ret = prepare_streams_for_overwrite(w, old_wim_end, &stream_list);
2053         if (ret)
2054                 return ret;
2055
2056         open_flags = 0;
2057         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2058                 open_flags |= O_RDWR;
2059         else
2060                 open_flags |= O_WRONLY;
2061         ret = open_wim_writable(w, w->filename, open_flags);
2062         if (ret)
2063                 return ret;
2064
2065         ret = lock_wim(w, w->out_fd);
2066         if (ret) {
2067                 close_wim_writable(w);
2068                 return ret;
2069         }
2070
2071         if (lseek(w->out_fd, old_wim_end, SEEK_SET) == -1) {
2072                 ERROR_WITH_ERRNO("Can't seek to end of WIM");
2073                 close_wim_writable(w);
2074                 w->wim_locked = 0;
2075                 return WIMLIB_ERR_WRITE;
2076         }
2077
2078         DEBUG("Writing newly added streams (offset = %"PRIu64")",
2079               old_wim_end);
2080         ret = write_stream_list(&stream_list,
2081                                 w->lookup_table,
2082                                 w->out_fd,
2083                                 wimlib_get_compression_type(w),
2084                                 write_flags,
2085                                 num_threads,
2086                                 progress_func);
2087         if (ret)
2088                 goto out_truncate;
2089
2090         for (int i = 0; i < w->hdr.image_count; i++) {
2091                 if (w->image_metadata[i]->modified) {
2092                         select_wim_image(w, i + 1);
2093                         ret = write_metadata_resource(w);
2094                         if (ret)
2095                                 goto out_truncate;
2096                 }
2097         }
2098         write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE;
2099         ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags,
2100                            progress_func);
2101 out_truncate:
2102         close_wim_writable(w);
2103         if (ret != 0 && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
2104                 WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",
2105                         w->filename, old_wim_end);
2106                 /* Return value of truncate() is ignored because this is already
2107                  * an error path. */
2108                 (void)ttruncate(w->filename, old_wim_end);
2109         }
2110         w->wim_locked = 0;
2111         return ret;
2112 }
2113
2114 static int
2115 overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags,
2116                           unsigned num_threads,
2117                           wimlib_progress_func_t progress_func)
2118 {
2119         size_t wim_name_len;
2120         int ret;
2121
2122         DEBUG("Overwriting `%"TS"' via a temporary file", w->filename);
2123
2124         /* Write the WIM to a temporary file in the same directory as the
2125          * original WIM. */
2126         wim_name_len = tstrlen(w->filename);
2127         tchar tmpfile[wim_name_len + 10];
2128         tmemcpy(tmpfile, w->filename, wim_name_len);
2129         randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
2130         tmpfile[wim_name_len + 9] = T('\0');
2131
2132         ret = wimlib_write(w, tmpfile, WIMLIB_ALL_IMAGES,
2133                            write_flags | WIMLIB_WRITE_FLAG_FSYNC,
2134                            num_threads, progress_func);
2135         if (ret) {
2136                 ERROR("Failed to write the WIM file `%"TS"'", tmpfile);
2137                 goto out_unlink;
2138         }
2139
2140         close_wim(w);
2141
2142         DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, w->filename);
2143         /* Rename the new file to the old file .*/
2144         if (trename(tmpfile, w->filename) != 0) {
2145                 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
2146                                  tmpfile, w->filename);
2147                 ret = WIMLIB_ERR_RENAME;
2148                 goto out_unlink;
2149         }
2150
2151         if (progress_func) {
2152                 union wimlib_progress_info progress;
2153                 progress.rename.from = tmpfile;
2154                 progress.rename.to = w->filename;
2155                 progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress);
2156         }
2157         goto out;
2158 out_unlink:
2159         /* Remove temporary file. */
2160         if (tunlink(tmpfile) != 0)
2161                 WARNING_WITH_ERRNO("Failed to remove `%"TS"'", tmpfile);
2162 out:
2163         return ret;
2164 }
2165
2166 /*
2167  * Writes a WIM file to the original file that it was read from, overwriting it.
2168  */
2169 WIMLIBAPI int
2170 wimlib_overwrite(WIMStruct *w, int write_flags,
2171                  unsigned num_threads,
2172                  wimlib_progress_func_t progress_func)
2173 {
2174         write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
2175
2176         if (!w->filename)
2177                 return WIMLIB_ERR_NO_FILENAME;
2178
2179         if (w->hdr.total_parts != 1) {
2180                 ERROR("Cannot modify a split WIM");
2181                 return WIMLIB_ERR_SPLIT_UNSUPPORTED;
2182         }
2183
2184         if ((!w->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
2185             && !(write_flags & WIMLIB_WRITE_FLAG_REBUILD))
2186         {
2187                 int ret;
2188                 ret = overwrite_wim_inplace(w, write_flags, num_threads,
2189                                             progress_func);
2190                 if (ret == WIMLIB_ERR_RESOURCE_ORDER)
2191                         WARNING("Falling back to re-building entire WIM");
2192                 else
2193                         return ret;
2194         }
2195         return overwrite_wim_via_tmpfile(w, write_flags, num_threads,
2196                                          progress_func);
2197 }