]> wimlib.net Git - wimlib/blob - src/write.c
do_write_streams_progress(): Decrease total_bytes when duplicate stream discarded
[wimlib] / src / write.c
1 /*
2  * write.c
3  *
4  * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5  * compressed file resources, etc.
6  */
7
8 /*
9  * Copyright (C) 2012, 2013 Eric Biggers
10  *
11  * This file is part of wimlib, a library for working with WIM files.
12  *
13  * wimlib is free software; you can redistribute it and/or modify it under the
14  * terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 3 of the License, or (at your option)
16  * any later version.
17  *
18  * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
19  * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
20  * A PARTICULAR PURPOSE. See the GNU General Public License for more
21  * details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with wimlib; if not, see http://www.gnu.org/licenses/.
25  */
26
27 #include "config.h"
28
29 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
30 /* On BSD, this should be included before "list.h" so that "list.h" can
31  * overwrite the LIST_HEAD macro. */
32 #  include <sys/file.h>
33 #endif
34
35 #ifdef __WIN32__
36 #  include "win32.h"
37 #endif
38
39 #include "list.h"
40 #include "wimlib_internal.h"
41 #include "buffer_io.h"
42 #include "dentry.h"
43 #include "lookup_table.h"
44 #include "xml.h"
45
46 #ifdef ENABLE_MULTITHREADED_COMPRESSION
47 #  include <pthread.h>
48 #endif
49
50 #include <unistd.h>
51 #include <fcntl.h>
52 #include <errno.h>
53
54 #ifdef WITH_NTFS_3G
55 #  include <time.h>
56 #  include <ntfs-3g/attrib.h>
57 #  include <ntfs-3g/inode.h>
58 #  include <ntfs-3g/dir.h>
59 #endif
60
61 #ifdef HAVE_ALLOCA_H
62 #  include <alloca.h>
63 #else
64 #  include <stdlib.h>
65 #endif
66
67 #include <limits.h>
68
69 #ifndef __WIN32__
70 #  include <sys/uio.h> /* for `struct iovec' */
71 #endif
72
73 /* Chunk table that's located at the beginning of each compressed resource in
74  * the WIM.  (This is not the on-disk format; the on-disk format just has an
75  * array of offsets.) */
76 struct chunk_table {
77         off_t file_offset;
78         u64 num_chunks;
79         u64 original_resource_size;
80         u64 bytes_per_chunk_entry;
81         u64 table_disk_size;
82         u64 cur_offset;
83         u64 *cur_offset_p;
84         u64 offsets[0];
85 };
86
87 /*
88  * Allocates and initializes a chunk table, and reserves space for it in the
89  * output file.
90  */
91 static int
92 begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
93                              int out_fd,
94                              off_t file_offset,
95                              struct chunk_table **chunk_tab_ret)
96 {
97         u64 size = wim_resource_size(lte);
98         u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE;
99         size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
100         struct chunk_table *chunk_tab = CALLOC(1, alloc_size);
101
102         DEBUG("Begin chunk table for stream with size %"PRIu64, size);
103
104         if (!chunk_tab) {
105                 ERROR("Failed to allocate chunk table for %"PRIu64" byte "
106                       "resource", size);
107                 return WIMLIB_ERR_NOMEM;
108         }
109         chunk_tab->file_offset = file_offset;
110         chunk_tab->num_chunks = num_chunks;
111         chunk_tab->original_resource_size = size;
112         chunk_tab->bytes_per_chunk_entry = (size >= (1ULL << 32)) ? 8 : 4;
113         chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
114                                      (num_chunks - 1);
115         chunk_tab->cur_offset = 0;
116         chunk_tab->cur_offset_p = chunk_tab->offsets;
117
118         if (full_write(out_fd, chunk_tab,
119                        chunk_tab->table_disk_size) != chunk_tab->table_disk_size)
120         {
121                 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
122                                  "file resource");
123                 FREE(chunk_tab);
124                 return WIMLIB_ERR_WRITE;
125         }
126         *chunk_tab_ret = chunk_tab;
127         return 0;
128 }
129
130 /*
131  * compress_func_t- Pointer to a function to compresses a chunk
132  *                  of a WIM resource.  This may be either
133  *                  wimlib_xpress_compress() (xpress-compress.c) or
134  *                  wimlib_lzx_compress() (lzx-compress.c).
135  *
136  * @chunk:        Uncompressed data of the chunk.
137  * @chunk_size:   Size of the uncompressed chunk, in bytes.
138  * @out:          Pointer to output buffer of size at least (@chunk_size - 1) bytes.
139  *
140  * Returns the size of the compressed data written to @out in bytes, or 0 if the
141  * data could not be compressed to (@chunk_size - 1) bytes or fewer.
142  *
143  * As a special requirement, the compression code is optimized for the WIM
144  * format and therefore requires (@chunk_size <= 32768).
145  *
146  * As another special requirement, the compression code will read up to 8 bytes
147  * off the end of the @chunk array for performance reasons.  The values of these
148  * bytes will not affect the output of the compression, but the calling code
149  * must make sure that the buffer holding the uncompressed chunk is actually at
150  * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
151  * mapped memory that will not cause a memory access violation if accessed.
152  */
153 typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
154                                     void *out);
155
156 static compress_func_t
157 get_compress_func(int out_ctype)
158 {
159         if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
160                 return wimlib_lzx_compress;
161         else
162                 return wimlib_xpress_compress;
163 }
164
165 /*
166  * Writes a chunk of a WIM resource to an output file.
167  *
168  * @chunk:        Uncompressed data of the chunk.
169  * @chunk_size:   Size of the chunk (<= WIM_CHUNK_SIZE)
170  * @out_fd:       File descriptor to write the chunk to.
171  * @compress:     Compression function to use (NULL if writing uncompressed
172  *                      data).
173  * @chunk_tab:    Pointer to chunk table being created.  It is updated with the
174  *                      offset of the chunk we write.
175  *
176  * Returns 0 on success; nonzero on failure.
177  */
178 static int
179 write_wim_resource_chunk(const void * restrict chunk,
180                          unsigned chunk_size,
181                          int out_fd,
182                          compress_func_t compress,
183                          struct chunk_table * restrict chunk_tab)
184 {
185         const void *out_chunk;
186         unsigned out_chunk_size;
187         if (compress) {
188                 void *compressed_chunk = alloca(chunk_size);
189
190                 out_chunk_size = (*compress)(chunk, chunk_size, compressed_chunk);
191                 if (out_chunk_size) {
192                         /* Write compressed */
193                         out_chunk = compressed_chunk;
194                 } else {
195                         /* Write uncompressed */
196                         out_chunk = chunk;
197                         out_chunk_size = chunk_size;
198                 }
199                 *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
200                 chunk_tab->cur_offset += out_chunk_size;
201         } else {
202                 /* Write uncompressed */
203                 out_chunk = chunk;
204                 out_chunk_size = chunk_size;
205         }
206         if (full_write(out_fd, out_chunk, out_chunk_size) != out_chunk_size) {
207                 ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
208                 return WIMLIB_ERR_WRITE;
209         }
210         return 0;
211 }
212
213 /*
214  * Finishes a WIM chunk table and writes it to the output file at the correct
215  * offset.
216  *
217  * The final size of the full compressed resource is returned in the
218  * @compressed_size_p.
219  */
220 static int
221 finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
222                               int out_fd, u64 *compressed_size_p)
223 {
224         size_t bytes_written;
225
226         if (chunk_tab->bytes_per_chunk_entry == 8) {
227                 array_cpu_to_le64(chunk_tab->offsets, chunk_tab->num_chunks);
228         } else {
229                 for (u64 i = 0; i < chunk_tab->num_chunks; i++)
230                         ((u32*)chunk_tab->offsets)[i] =
231                                 cpu_to_le32(chunk_tab->offsets[i]);
232         }
233         bytes_written = full_pwrite(out_fd,
234                                     (u8*)chunk_tab->offsets + chunk_tab->bytes_per_chunk_entry,
235                                     chunk_tab->table_disk_size,
236                                     chunk_tab->file_offset);
237         if (bytes_written != chunk_tab->table_disk_size) {
238                 ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
239                                  "file resource");
240                 return WIMLIB_ERR_WRITE;
241         }
242         *compressed_size_p = chunk_tab->cur_offset + chunk_tab->table_disk_size;
243         return 0;
244 }
245
246 static int
247 seek_and_truncate(int out_fd, off_t offset)
248 {
249         if (lseek(out_fd, offset, SEEK_SET) == -1 ||
250             ftruncate(out_fd, offset))
251         {
252                 ERROR_WITH_ERRNO("Failed to truncate output WIM file");
253                 return WIMLIB_ERR_WRITE;
254         } else {
255                 return 0;
256         }
257 }
258
259 static int
260 finalize_and_check_sha1(SHA_CTX * restrict sha_ctx,
261                         struct wim_lookup_table_entry * restrict lte)
262 {
263         u8 md[SHA1_HASH_SIZE];
264         sha1_final(md, sha_ctx);
265         if (lte->unhashed) {
266                 copy_hash(lte->hash, md);
267         } else if (!hashes_equal(md, lte->hash)) {
268                 ERROR("WIM resource has incorrect hash!");
269                 if (lte_filename_valid(lte)) {
270                         ERROR("We were reading it from \"%"TS"\"; maybe "
271                               "it changed while we were reading it.",
272                               lte->file_on_disk);
273                 }
274                 return WIMLIB_ERR_INVALID_RESOURCE_HASH;
275         }
276         return 0;
277 }
278
279
280 struct write_resource_ctx {
281         compress_func_t compress;
282         struct chunk_table *chunk_tab;
283         int out_fd;
284         SHA_CTX sha_ctx;
285         bool doing_sha;
286 };
287
288 static int
289 write_resource_cb(const void *restrict chunk, size_t chunk_size,
290                   void *restrict _ctx)
291 {
292         struct write_resource_ctx *ctx = _ctx;
293
294         if (ctx->doing_sha)
295                 sha1_update(&ctx->sha_ctx, chunk, chunk_size);
296         return write_wim_resource_chunk(chunk, chunk_size,
297                                         ctx->out_fd, ctx->compress,
298                                         ctx->chunk_tab);
299 }
300
301 /*
302  * Write a resource to an output WIM.
303  *
304  * @lte:  Lookup table entry for the resource, which could be in another WIM,
305  *        in an external file, or in another location.
306  *
307  * @out_fd:  File descriptor opened to the output WIM.
308  *
309  * @out_ctype:  One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate
310  *              which compression algorithm to use.
311  *
312  * @out_res_entry:  On success, this is filled in with the offset, flags,
313  *                  compressed size, and uncompressed size of the resource
314  *                  in the output WIM.
315  *
316  * @flags:  WIMLIB_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed
317  *          even if it could otherwise be copied directly from the input.
318  *
319  * Additional notes:  The SHA1 message digest of the uncompressed data is
320  * calculated (except when doing a raw copy --- see below).  If the @unhashed
321  * flag is set on the lookup table entry, this message digest is simply copied
322  * to it; otherwise, the message digest is compared with the existing one, and
323  * the function will fail if they do not match.
324  */
325 int
326 write_wim_resource(struct wim_lookup_table_entry *lte,
327                    int out_fd, int out_ctype,
328                    struct resource_entry *out_res_entry,
329                    int flags)
330 {
331         struct write_resource_ctx write_ctx;
332         u64 read_size;
333         u64 new_size;
334         off_t offset;
335         int ret;
336
337         flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS;
338
339         /* Get current position in output WIM */
340         offset = filedes_offset(out_fd);
341         if (offset == -1) {
342                 ERROR_WITH_ERRNO("Can't get position in output WIM");
343                 return WIMLIB_ERR_WRITE;
344         }
345
346         /* If we are not forcing the data to be recompressed, and the input
347          * resource is located in a WIM with the same compression type as that
348          * desired other than no compression, we can simply copy the compressed
349          * data without recompressing it.  This also means we must skip
350          * calculating the SHA1, as we never will see the uncompressed data. */
351         if (!(flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
352             lte->resource_location == RESOURCE_IN_WIM &&
353             out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
354             wimlib_get_compression_type(lte->wim) == out_ctype)
355         {
356                 flags |= WIMLIB_RESOURCE_FLAG_RAW;
357                 write_ctx.doing_sha = false;
358                 read_size = lte->resource_entry.size;
359         } else {
360                 write_ctx.doing_sha = true;
361                 sha1_init(&write_ctx.sha_ctx);
362                 read_size = lte->resource_entry.original_size;
363         }
364
365         /* Initialize the chunk table and set the compression function if
366          * compressing the resource. */
367         if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
368             (flags & WIMLIB_RESOURCE_FLAG_RAW)) {
369                 write_ctx.compress = NULL;
370                 write_ctx.chunk_tab = NULL;
371         } else {
372                 write_ctx.compress = get_compress_func(out_ctype);
373                 ret = begin_wim_resource_chunk_tab(lte, out_fd,
374                                                    offset,
375                                                    &write_ctx.chunk_tab);
376                 if (ret)
377                         return ret;
378         }
379
380         /* Write the entire resource by reading the entire resource and feeding
381          * the data through the write_resource_cb function. */
382         write_ctx.out_fd = out_fd;
383 try_write_again:
384         ret = read_resource_prefix(lte, read_size,
385                                    write_resource_cb, &write_ctx, flags);
386         if (ret)
387                 goto out_free_chunk_tab;
388
389         /* Verify SHA1 message digest of the resource, or set the hash for the
390          * first time. */
391         if (write_ctx.doing_sha) {
392                 ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte);
393                 if (ret)
394                         goto out_free_chunk_tab;
395         }
396
397         out_res_entry->flags = lte->resource_entry.flags;
398         out_res_entry->original_size = wim_resource_size(lte);
399         out_res_entry->offset = offset;
400         if (flags & WIMLIB_RESOURCE_FLAG_RAW) {
401                 /* Doing a raw write:  The new compressed size is the same as
402                  * the compressed size in the other WIM. */
403                 new_size = lte->resource_entry.size;
404         } else if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) {
405                 /* Using WIMLIB_COMPRESSION_TYPE_NONE:  The new compressed size
406                  * is the original size. */
407                 new_size = lte->resource_entry.original_size;
408                 out_res_entry->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
409         } else {
410                 /* Using a different compression type:  Call
411                  * finish_wim_resource_chunk_tab() and it will provide the new
412                  * compressed size. */
413                 ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, out_fd,
414                                                     &new_size);
415                 if (ret)
416                         goto out_free_chunk_tab;
417                 if (new_size >= wim_resource_size(lte)) {
418                         /* Oops!  We compressed the resource to larger than the original
419                          * size.  Write the resource uncompressed instead. */
420                         DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
421                               "writing uncompressed instead",
422                               wim_resource_size(lte), new_size);
423                         ret = seek_and_truncate(out_fd, offset);
424                         if (ret)
425                                 goto out_free_chunk_tab;
426                         write_ctx.compress = NULL;
427                         write_ctx.doing_sha = false;
428                         out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
429                         goto try_write_again;
430                 }
431                 out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
432         }
433         out_res_entry->size = new_size;
434         ret = 0;
435 out_free_chunk_tab:
436         FREE(write_ctx.chunk_tab);
437         return ret;
438 }
439
440 #ifdef ENABLE_MULTITHREADED_COMPRESSION
441
442 /* Blocking shared queue (solves the producer-consumer problem) */
443 struct shared_queue {
444         unsigned size;
445         unsigned front;
446         unsigned back;
447         unsigned filled_slots;
448         void **array;
449         pthread_mutex_t lock;
450         pthread_cond_t msg_avail_cond;
451         pthread_cond_t space_avail_cond;
452 };
453
454 static int
455 shared_queue_init(struct shared_queue *q, unsigned size)
456 {
457         wimlib_assert(size != 0);
458         q->array = CALLOC(sizeof(q->array[0]), size);
459         if (!q->array)
460                 goto err;
461         q->filled_slots = 0;
462         q->front = 0;
463         q->back = size - 1;
464         q->size = size;
465         if (pthread_mutex_init(&q->lock, NULL)) {
466                 ERROR_WITH_ERRNO("Failed to initialize mutex");
467                 goto err;
468         }
469         if (pthread_cond_init(&q->msg_avail_cond, NULL)) {
470                 ERROR_WITH_ERRNO("Failed to initialize condition variable");
471                 goto err_destroy_lock;
472         }
473         if (pthread_cond_init(&q->space_avail_cond, NULL)) {
474                 ERROR_WITH_ERRNO("Failed to initialize condition variable");
475                 goto err_destroy_msg_avail_cond;
476         }
477         return 0;
478 err_destroy_msg_avail_cond:
479         pthread_cond_destroy(&q->msg_avail_cond);
480 err_destroy_lock:
481         pthread_mutex_destroy(&q->lock);
482 err:
483         return WIMLIB_ERR_NOMEM;
484 }
485
486 static void
487 shared_queue_destroy(struct shared_queue *q)
488 {
489         FREE(q->array);
490         pthread_mutex_destroy(&q->lock);
491         pthread_cond_destroy(&q->msg_avail_cond);
492         pthread_cond_destroy(&q->space_avail_cond);
493 }
494
495 static void
496 shared_queue_put(struct shared_queue *q, void *obj)
497 {
498         pthread_mutex_lock(&q->lock);
499         while (q->filled_slots == q->size)
500                 pthread_cond_wait(&q->space_avail_cond, &q->lock);
501
502         q->back = (q->back + 1) % q->size;
503         q->array[q->back] = obj;
504         q->filled_slots++;
505
506         pthread_cond_broadcast(&q->msg_avail_cond);
507         pthread_mutex_unlock(&q->lock);
508 }
509
510 static void *
511 shared_queue_get(struct shared_queue *q)
512 {
513         void *obj;
514
515         pthread_mutex_lock(&q->lock);
516         while (q->filled_slots == 0)
517                 pthread_cond_wait(&q->msg_avail_cond, &q->lock);
518
519         obj = q->array[q->front];
520         q->array[q->front] = NULL;
521         q->front = (q->front + 1) % q->size;
522         q->filled_slots--;
523
524         pthread_cond_broadcast(&q->space_avail_cond);
525         pthread_mutex_unlock(&q->lock);
526         return obj;
527 }
528
529 struct compressor_thread_params {
530         struct shared_queue *res_to_compress_queue;
531         struct shared_queue *compressed_res_queue;
532         compress_func_t compress;
533 };
534
535 #define MAX_CHUNKS_PER_MSG 2
536
537 struct message {
538         struct wim_lookup_table_entry *lte;
539         u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
540         u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
541         unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
542         struct iovec out_chunks[MAX_CHUNKS_PER_MSG];
543         size_t total_out_bytes;
544         unsigned num_chunks;
545         struct list_head list;
546         bool complete;
547         u64 begin_chunk;
548 };
549
550 static void
551 compress_chunks(struct message *msg, compress_func_t compress)
552 {
553         msg->total_out_bytes = 0;
554         for (unsigned i = 0; i < msg->num_chunks; i++) {
555                 unsigned len = compress(msg->uncompressed_chunks[i],
556                                         msg->uncompressed_chunk_sizes[i],
557                                         msg->compressed_chunks[i]);
558                 void *out_chunk;
559                 unsigned out_len;
560                 if (len) {
561                         /* To be written compressed */
562                         out_chunk = msg->compressed_chunks[i];
563                         out_len = len;
564                 } else {
565                         /* To be written uncompressed */
566                         out_chunk = msg->uncompressed_chunks[i];
567                         out_len = msg->uncompressed_chunk_sizes[i];
568                 }
569                 msg->out_chunks[i].iov_base = out_chunk;
570                 msg->out_chunks[i].iov_len = out_len;
571                 msg->total_out_bytes += out_len;
572         }
573 }
574
575 /* Compressor thread routine.  This is a lot simpler than the main thread
576  * routine: just repeatedly get a group of chunks from the
577  * res_to_compress_queue, compress them, and put them in the
578  * compressed_res_queue.  A NULL pointer indicates that the thread should stop.
579  * */
580 static void *
581 compressor_thread_proc(void *arg)
582 {
583         struct compressor_thread_params *params = arg;
584         struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
585         struct shared_queue *compressed_res_queue = params->compressed_res_queue;
586         compress_func_t compress = params->compress;
587         struct message *msg;
588
589         DEBUG("Compressor thread ready");
590         while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
591                 compress_chunks(msg, compress);
592                 shared_queue_put(compressed_res_queue, msg);
593         }
594         DEBUG("Compressor thread terminating");
595         return NULL;
596 }
597 #endif /* ENABLE_MULTITHREADED_COMPRESSION */
598
599 static void
600 do_write_streams_progress(union wimlib_progress_info *progress,
601                           wimlib_progress_func_t progress_func,
602                           uint64_t size_added,
603                           bool stream_discarded)
604 {
605         if (stream_discarded) {
606                 progress->write_streams.total_bytes -= size_added;
607                 if (progress->write_streams._private != ~(uint64_t)0 &&
608                     progress->write_streams._private > progress->write_streams.total_bytes)
609                 {
610                         progress->write_streams._private = progress->write_streams.total_bytes;
611                 }
612         } else {
613                 progress->write_streams.completed_bytes += size_added;
614         }
615         progress->write_streams.completed_streams++;
616         if (progress_func &&
617             progress->write_streams.completed_bytes >= progress->write_streams._private)
618         {
619                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
620                               progress);
621                 if (progress->write_streams._private == progress->write_streams.total_bytes) {
622                         progress->write_streams._private = ~(uint64_t)0;
623                 } else {
624                         progress->write_streams._private =
625                                 min(progress->write_streams.total_bytes,
626                                     progress->write_streams.completed_bytes +
627                                         progress->write_streams.total_bytes / 100);
628                 }
629         }
630 }
631
632 struct serial_write_stream_ctx {
633         int out_fd;
634         int out_ctype;
635         int write_resource_flags;
636 };
637
638 static int
639 serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx)
640 {
641         struct serial_write_stream_ctx *ctx = _ctx;
642         return write_wim_resource(lte, ctx->out_fd,
643                                   ctx->out_ctype, &lte->output_resource_entry,
644                                   ctx->write_resource_flags);
645 }
646
647 /* Write a list of streams, taking into account that some streams may be
648  * duplicates that are checksummed and discarded on the fly, and also delegating
649  * the actual writing of a stream to a function @write_stream_cb, which is
650  * passed the context @write_stream_ctx. */
651 static int
652 do_write_stream_list(struct list_head *stream_list,
653                      struct wim_lookup_table *lookup_table,
654                      int (*write_stream_cb)(struct wim_lookup_table_entry *, void *),
655                      void *write_stream_ctx,
656                      wimlib_progress_func_t progress_func,
657                      union wimlib_progress_info *progress)
658 {
659         int ret = 0;
660         struct wim_lookup_table_entry *lte;
661         bool stream_discarded;
662
663         /* For each stream in @stream_list ... */
664         while (!list_empty(stream_list)) {
665                 stream_discarded = false;
666                 lte = container_of(stream_list->next,
667                                    struct wim_lookup_table_entry,
668                                    write_streams_list);
669                 list_del(&lte->write_streams_list);
670                 if (lte->unhashed && !lte->unique_size) {
671                         /* Unhashed stream that shares a size with some other
672                          * stream in the WIM we are writing.  The stream must be
673                          * checksummed to know if we need to write it or not. */
674                         struct wim_lookup_table_entry *tmp;
675                         u32 orig_refcnt = lte->out_refcnt;
676
677                         ret = hash_unhashed_stream(lte, lookup_table, &tmp);
678                         if (ret)
679                                 break;
680                         if (tmp != lte) {
681                                 lte = tmp;
682                                 /* We found a duplicate stream. */
683                                 if (orig_refcnt != tmp->out_refcnt) {
684                                         /* We have already written, or are going
685                                          * to write, the duplicate stream.  So
686                                          * just skip to the next stream. */
687                                         DEBUG("Discarding duplicate stream of length %"PRIu64,
688                                               wim_resource_size(lte));
689                                         lte->no_progress = 0;
690                                         stream_discarded = true;
691                                         goto skip_to_progress;
692                                 }
693                         }
694                 }
695
696                 /* Here, @lte is either a hashed stream or an unhashed stream
697                  * with a unique size.  In either case we know that the stream
698                  * has to be written.  In either case the SHA1 message digest
699                  * will be calculated over the stream while writing it; however,
700                  * in the former case this is done merely to check the data,
701                  * while in the latter case this is done because we do not have
702                  * the SHA1 message digest yet.  */
703                 wimlib_assert(lte->out_refcnt != 0);
704                 lte->deferred = 0;
705                 lte->no_progress = 0;
706                 ret = (*write_stream_cb)(lte, write_stream_ctx);
707                 if (ret)
708                         break;
709                 /* In parallel mode, some streams are deferred for later,
710                  * serialized processing; ignore them here. */
711                 if (lte->deferred)
712                         continue;
713                 if (lte->unhashed) {
714                         list_del(&lte->unhashed_list);
715                         lookup_table_insert(lookup_table, lte);
716                         lte->unhashed = 0;
717                 }
718         skip_to_progress:
719                 if (!lte->no_progress) {
720                         do_write_streams_progress(progress,
721                                                   progress_func,
722                                                   wim_resource_size(lte),
723                                                   stream_discarded);
724                 }
725         }
726         return ret;
727 }
728
729 static int
730 do_write_stream_list_serial(struct list_head *stream_list,
731                             struct wim_lookup_table *lookup_table,
732                             int out_fd,
733                             int out_ctype,
734                             int write_resource_flags,
735                             wimlib_progress_func_t progress_func,
736                             union wimlib_progress_info *progress)
737 {
738         struct serial_write_stream_ctx ctx = {
739                 .out_fd = out_fd,
740                 .out_ctype = out_ctype,
741                 .write_resource_flags = write_resource_flags,
742         };
743         return do_write_stream_list(stream_list,
744                                     lookup_table,
745                                     serial_write_stream,
746                                     &ctx,
747                                     progress_func,
748                                     progress);
749 }
750
751 static inline int
752 write_flags_to_resource_flags(int write_flags)
753 {
754         int resource_flags = 0;
755
756         if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
757                 resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS;
758         return resource_flags;
759 }
760
761 static int
762 write_stream_list_serial(struct list_head *stream_list,
763                          struct wim_lookup_table *lookup_table,
764                          int out_fd,
765                          int out_ctype,
766                          int write_resource_flags,
767                          wimlib_progress_func_t progress_func,
768                          union wimlib_progress_info *progress)
769 {
770         DEBUG("Writing stream list (serial version)");
771         progress->write_streams.num_threads = 1;
772         if (progress_func)
773                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
774         return do_write_stream_list_serial(stream_list,
775                                            lookup_table,
776                                            out_fd,
777                                            out_ctype,
778                                            write_resource_flags,
779                                            progress_func,
780                                            progress);
781 }
782
783 #ifdef ENABLE_MULTITHREADED_COMPRESSION
784 static int
785 write_wim_chunks(struct message *msg, int out_fd,
786                  struct chunk_table *chunk_tab)
787 {
788         for (unsigned i = 0; i < msg->num_chunks; i++) {
789                 *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
790                 chunk_tab->cur_offset += msg->out_chunks[i].iov_len;
791         }
792         if (full_writev(out_fd, msg->out_chunks,
793                         msg->num_chunks) != msg->total_out_bytes)
794         {
795                 ERROR_WITH_ERRNO("Failed to write WIM chunks");
796                 return WIMLIB_ERR_WRITE;
797         }
798         return 0;
799 }
800
801 struct main_writer_thread_ctx {
802         struct list_head *stream_list;
803         struct wim_lookup_table *lookup_table;
804         int out_fd;
805         int out_ctype;
806         int write_resource_flags;
807         struct shared_queue *res_to_compress_queue;
808         struct shared_queue *compressed_res_queue;
809         size_t num_messages;
810         wimlib_progress_func_t progress_func;
811         union wimlib_progress_info *progress;
812
813         struct list_head available_msgs;
814         struct list_head outstanding_streams;
815         struct list_head serial_streams;
816         size_t num_outstanding_messages;
817
818         SHA_CTX next_sha_ctx;
819         u64 next_chunk;
820         u64 next_num_chunks;
821         struct wim_lookup_table_entry *next_lte;
822
823         struct message *msgs;
824         struct message *next_msg;
825         struct chunk_table *cur_chunk_tab;
826 };
827
828 static int
829 init_message(struct message *msg)
830 {
831         for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
832                 msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
833                 msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
834                 if (msg->compressed_chunks[i] == NULL ||
835                     msg->uncompressed_chunks[i] == NULL)
836                         return WIMLIB_ERR_NOMEM;
837         }
838         return 0;
839 }
840
841 static void
842 destroy_message(struct message *msg)
843 {
844         for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
845                 FREE(msg->compressed_chunks[i]);
846                 FREE(msg->uncompressed_chunks[i]);
847         }
848 }
849
850 static void
851 free_messages(struct message *msgs, size_t num_messages)
852 {
853         if (msgs) {
854                 for (size_t i = 0; i < num_messages; i++)
855                         destroy_message(&msgs[i]);
856                 FREE(msgs);
857         }
858 }
859
860 static struct message *
861 allocate_messages(size_t num_messages)
862 {
863         struct message *msgs;
864
865         msgs = CALLOC(num_messages, sizeof(struct message));
866         if (!msgs)
867                 return NULL;
868         for (size_t i = 0; i < num_messages; i++) {
869                 if (init_message(&msgs[i])) {
870                         free_messages(msgs, num_messages);
871                         return NULL;
872                 }
873         }
874         return msgs;
875 }
876
877 static void
878 main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx)
879 {
880         while (ctx->num_outstanding_messages--)
881                 shared_queue_get(ctx->compressed_res_queue);
882         free_messages(ctx->msgs, ctx->num_messages);
883         FREE(ctx->cur_chunk_tab);
884 }
885
886 static int
887 main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx)
888 {
889         /* Pre-allocate all the buffers that will be needed to do the chunk
890          * compression. */
891         ctx->msgs = allocate_messages(ctx->num_messages);
892         if (!ctx->msgs)
893                 return WIMLIB_ERR_NOMEM;
894
895         /* Initially, all the messages are available to use. */
896         INIT_LIST_HEAD(&ctx->available_msgs);
897         for (size_t i = 0; i < ctx->num_messages; i++)
898                 list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs);
899
900         /* outstanding_streams is the list of streams that currently have had
901          * chunks sent off for compression.
902          *
903          * The first stream in outstanding_streams is the stream that is
904          * currently being written.
905          *
906          * The last stream in outstanding_streams is the stream that is
907          * currently being read and having chunks fed to the compressor threads.
908          * */
909         INIT_LIST_HEAD(&ctx->outstanding_streams);
910         ctx->num_outstanding_messages = 0;
911
912         ctx->next_msg = NULL;
913
914         /* Resources that don't need any chunks compressed are added to this
915          * list and written directly by the main thread. */
916         INIT_LIST_HEAD(&ctx->serial_streams);
917
918         ctx->cur_chunk_tab = NULL;
919
920         return 0;
921 }
922
923 static int
924 receive_compressed_chunks(struct main_writer_thread_ctx *ctx)
925 {
926         struct message *msg;
927         struct wim_lookup_table_entry *cur_lte;
928         int ret;
929
930         wimlib_assert(!list_empty(&ctx->outstanding_streams));
931         wimlib_assert(ctx->num_outstanding_messages != 0);
932
933         cur_lte = container_of(ctx->outstanding_streams.next,
934                                struct wim_lookup_table_entry,
935                                being_compressed_list);
936
937         /* Get the next message from the queue and process it.
938          * The message will contain 1 or more data chunks that have been
939          * compressed. */
940         msg = shared_queue_get(ctx->compressed_res_queue);
941         msg->complete = true;
942         --ctx->num_outstanding_messages;
943
944         /* Is this the next chunk in the current resource?  If it's not
945          * (i.e., an earlier chunk in a same or different resource
946          * hasn't been compressed yet), do nothing, and keep this
947          * message around until all earlier chunks are received.
948          *
949          * Otherwise, write all the chunks we can. */
950         while (cur_lte != NULL &&
951                !list_empty(&cur_lte->msg_list)
952                && (msg = container_of(cur_lte->msg_list.next,
953                                       struct message,
954                                       list))->complete)
955         {
956                 list_move(&msg->list, &ctx->available_msgs);
957                 if (msg->begin_chunk == 0) {
958                         /* This is the first set of chunks.  Leave space
959                          * for the chunk table in the output file. */
960                         off_t cur_offset = filedes_offset(ctx->out_fd);
961                         if (cur_offset == -1)
962                                 return WIMLIB_ERR_WRITE;
963                         ret = begin_wim_resource_chunk_tab(cur_lte,
964                                                            ctx->out_fd,
965                                                            cur_offset,
966                                                            &ctx->cur_chunk_tab);
967                         if (ret)
968                                 return ret;
969                 }
970
971                 /* Write the compressed chunks from the message. */
972                 ret = write_wim_chunks(msg, ctx->out_fd, ctx->cur_chunk_tab);
973                 if (ret)
974                         return ret;
975
976                 /* Was this the last chunk of the stream?  If so, finish
977                  * it. */
978                 if (list_empty(&cur_lte->msg_list) &&
979                     msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
980                 {
981                         u64 res_csize;
982                         off_t offset;
983
984                         ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
985                                                             ctx->out_fd,
986                                                             &res_csize);
987                         if (ret)
988                                 return ret;
989
990                         list_del(&cur_lte->being_compressed_list);
991
992                         /* Grab the offset of this stream in the output file
993                          * from the chunk table before we free it. */
994                         offset = ctx->cur_chunk_tab->file_offset;
995
996                         FREE(ctx->cur_chunk_tab);
997                         ctx->cur_chunk_tab = NULL;
998
999                         if (res_csize >= wim_resource_size(cur_lte)) {
1000                                 /* Oops!  We compressed the resource to
1001                                  * larger than the original size.  Write
1002                                  * the resource uncompressed instead. */
1003                                 DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
1004                                       "writing uncompressed instead",
1005                                       wim_resource_size(cur_lte), res_csize);
1006                                 ret = seek_and_truncate(ctx->out_fd, offset);
1007                                 if (ret)
1008                                         return ret;
1009                                 ret = write_wim_resource(cur_lte,
1010                                                          ctx->out_fd,
1011                                                          WIMLIB_COMPRESSION_TYPE_NONE,
1012                                                          &cur_lte->output_resource_entry,
1013                                                          ctx->write_resource_flags);
1014                                 if (ret)
1015                                         return ret;
1016                         } else {
1017                                 cur_lte->output_resource_entry.size =
1018                                         res_csize;
1019
1020                                 cur_lte->output_resource_entry.original_size =
1021                                         cur_lte->resource_entry.original_size;
1022
1023                                 cur_lte->output_resource_entry.offset =
1024                                         offset;
1025
1026                                 cur_lte->output_resource_entry.flags =
1027                                         cur_lte->resource_entry.flags |
1028                                                 WIM_RESHDR_FLAG_COMPRESSED;
1029                         }
1030
1031                         do_write_streams_progress(ctx->progress,
1032                                                   ctx->progress_func,
1033                                                   wim_resource_size(cur_lte),
1034                                                   false);
1035
1036                         /* Since we just finished writing a stream, write any
1037                          * streams that have been added to the serial_streams
1038                          * list for direct writing by the main thread (e.g.
1039                          * resources that don't need to be compressed because
1040                          * the desired compression type is the same as the
1041                          * previous compression type). */
1042                         if (!list_empty(&ctx->serial_streams)) {
1043                                 ret = do_write_stream_list_serial(&ctx->serial_streams,
1044                                                                   ctx->lookup_table,
1045                                                                   ctx->out_fd,
1046                                                                   ctx->out_ctype,
1047                                                                   ctx->write_resource_flags,
1048                                                                   ctx->progress_func,
1049                                                                   ctx->progress);
1050                                 if (ret)
1051                                         return ret;
1052                         }
1053
1054                         /* Advance to the next stream to write. */
1055                         if (list_empty(&ctx->outstanding_streams)) {
1056                                 cur_lte = NULL;
1057                         } else {
1058                                 cur_lte = container_of(ctx->outstanding_streams.next,
1059                                                        struct wim_lookup_table_entry,
1060                                                        being_compressed_list);
1061                         }
1062                 }
1063         }
1064         return 0;
1065 }
1066
1067 /* Called when the main thread has read a new chunk of data. */
1068 static int
1069 main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
1070 {
1071         struct main_writer_thread_ctx *ctx = _ctx;
1072         int ret;
1073         struct message *next_msg;
1074         u64 next_chunk_in_msg;
1075
1076         /* Update SHA1 message digest for the stream currently being read by the
1077          * main thread. */
1078         sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
1079
1080         /* We send chunks of data to the compressor chunks in batches which we
1081          * refer to as "messages".  @next_msg is the message that is currently
1082          * being prepared to send off.  If it is NULL, that indicates that we
1083          * need to start a new message. */
1084         next_msg = ctx->next_msg;
1085         if (!next_msg) {
1086                 /* We need to start a new message.  First check to see if there
1087                  * is a message available in the list of available messages.  If
1088                  * so, we can just take one.  If not, all the messages (there is
1089                  * a fixed number of them, proportional to the number of
1090                  * threads) have been sent off to the compressor threads, so we
1091                  * receive messages from the compressor threads containing
1092                  * compressed chunks of data.
1093                  *
1094                  * We may need to receive multiple messages before one is
1095                  * actually available to use because messages received that are
1096                  * *not* for the very next set of chunks to compress must be
1097                  * buffered until it's time to write those chunks. */
1098                 while (list_empty(&ctx->available_msgs)) {
1099                         ret = receive_compressed_chunks(ctx);
1100                         if (ret)
1101                                 return ret;
1102                 }
1103
1104                 next_msg = container_of(ctx->available_msgs.next,
1105                                         struct message, list);
1106                 list_del(&next_msg->list);
1107                 next_msg->complete = false;
1108                 next_msg->begin_chunk = ctx->next_chunk;
1109                 next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
1110                                            ctx->next_num_chunks - ctx->next_chunk);
1111                 ctx->next_msg = next_msg;
1112         }
1113
1114         /* Fill in the next chunk to compress */
1115         next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
1116
1117         next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
1118         memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
1119                chunk, chunk_size);
1120         ctx->next_chunk++;
1121         if (++next_chunk_in_msg == next_msg->num_chunks) {
1122                 /* Send off an array of chunks to compress */
1123                 list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
1124                 shared_queue_put(ctx->res_to_compress_queue, next_msg);
1125                 ++ctx->num_outstanding_messages;
1126                 ctx->next_msg = NULL;
1127         }
1128         return 0;
1129 }
1130
1131 static int
1132 main_writer_thread_finish(void *_ctx)
1133 {
1134         struct main_writer_thread_ctx *ctx = _ctx;
1135         int ret;
1136         while (ctx->num_outstanding_messages != 0) {
1137                 ret = receive_compressed_chunks(ctx);
1138                 if (ret)
1139                         return ret;
1140         }
1141         wimlib_assert(list_empty(&ctx->outstanding_streams));
1142         return do_write_stream_list_serial(&ctx->serial_streams,
1143                                            ctx->lookup_table,
1144                                            ctx->out_fd,
1145                                            ctx->out_ctype,
1146                                            ctx->write_resource_flags,
1147                                            ctx->progress_func,
1148                                            ctx->progress);
1149 }
1150
1151 static int
1152 submit_stream_for_compression(struct wim_lookup_table_entry *lte,
1153                               struct main_writer_thread_ctx *ctx)
1154 {
1155         int ret;
1156
1157         /* Read the entire stream @lte, feeding its data chunks to the
1158          * compressor threads.  Also SHA1-sum the stream; this is required in
1159          * the case that @lte is unhashed, and a nice additional verification
1160          * when @lte is already hashed. */
1161         sha1_init(&ctx->next_sha_ctx);
1162         ctx->next_chunk = 0;
1163         ctx->next_num_chunks = wim_resource_chunks(lte);
1164         ctx->next_lte = lte;
1165         INIT_LIST_HEAD(&lte->msg_list);
1166         list_add_tail(&lte->being_compressed_list, &ctx->outstanding_streams);
1167         ret = read_resource_prefix(lte, wim_resource_size(lte),
1168                                    main_writer_thread_cb, ctx, 0);
1169         if (ret == 0) {
1170                 wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
1171                 ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
1172         }
1173         return ret;
1174 }
1175
1176 static int
1177 main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx)
1178 {
1179         struct main_writer_thread_ctx *ctx = _ctx;
1180         int ret;
1181
1182         if (wim_resource_size(lte) < 1000 ||
1183             ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
1184             (lte->resource_location == RESOURCE_IN_WIM &&
1185              !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
1186              wimlib_get_compression_type(lte->wim) == ctx->out_ctype))
1187         {
1188                 /* Stream is too small or isn't being compressed.  Process it by
1189                  * the main thread when we have a chance.  We can't necessarily
1190                  * process it right here, as the main thread could be in the
1191                  * middle of writing a different stream. */
1192                 list_add_tail(&lte->write_streams_list, &ctx->serial_streams);
1193                 lte->deferred = 1;
1194                 ret = 0;
1195         } else {
1196                 ret = submit_stream_for_compression(lte, ctx);
1197         }
1198         lte->no_progress = 1;
1199         return ret;
1200 }
1201
1202 static long
1203 get_default_num_threads()
1204 {
1205 #ifdef __WIN32__
1206         return win32_get_number_of_processors();
1207 #else
1208         return sysconf(_SC_NPROCESSORS_ONLN);
1209 #endif
1210 }
1211
1212 /* Equivalent to write_stream_list_serial(), except this takes a @num_threads
1213  * parameter and will perform compression using that many threads.  Falls
1214  * back to write_stream_list_serial() on certain errors, such as a failure to
1215  * create the number of threads requested.
1216  *
1217  * High level description of the algorithm for writing compressed streams in
1218  * parallel:  We perform compression on chunks of size WIM_CHUNK_SIZE bytes
1219  * rather than on full files.  The currently executing thread becomes the main
1220  * thread and is entirely in charge of reading the data to compress (which may
1221  * be in any location understood by the resource code--- such as in an external
1222  * file being captured, or in another WIM file from which an image is being
1223  * exported) and actually writing the compressed data to the output file.
1224  * Additional threads are "compressor threads" and all execute the
1225  * compressor_thread_proc, where they repeatedly retrieve buffers of data from
1226  * the main thread, compress them, and hand them back to the main thread.
1227  *
1228  * Certain streams, such as streams that do not need to be compressed (e.g.
1229  * input compression type same as output compression type) or streams of very
1230  * small size are placed in a list (main_writer_thread_ctx.serial_list) and
1231  * handled entirely by the main thread at an appropriate time.
1232  *
1233  * At any given point in time, multiple streams may be having chunks compressed
1234  * concurrently.  The stream that the main thread is currently *reading* may be
1235  * later in the list that the stream that the main thread is currently
1236  * *writing*.
1237  */
1238 static int
1239 write_stream_list_parallel(struct list_head *stream_list,
1240                            struct wim_lookup_table *lookup_table,
1241                            int out_fd,
1242                            int out_ctype,
1243                            int write_resource_flags,
1244                            wimlib_progress_func_t progress_func,
1245                            union wimlib_progress_info *progress,
1246                            unsigned num_threads)
1247 {
1248         int ret;
1249         struct shared_queue res_to_compress_queue;
1250         struct shared_queue compressed_res_queue;
1251         pthread_t *compressor_threads = NULL;
1252
1253         if (num_threads == 0) {
1254                 long nthreads = get_default_num_threads();
1255                 if (nthreads < 1 || nthreads > UINT_MAX) {
1256                         WARNING("Could not determine number of processors! Assuming 1");
1257                         goto out_serial;
1258                 } else if (nthreads == 1) {
1259                         goto out_serial_quiet;
1260                 } else {
1261                         num_threads = nthreads;
1262                 }
1263         }
1264
1265         DEBUG("Writing stream list (parallel version, num_threads=%u)",
1266               num_threads);
1267
1268         progress->write_streams.num_threads = num_threads;
1269
1270         static const size_t MESSAGES_PER_THREAD = 2;
1271         size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
1272
1273         DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
1274
1275         ret = shared_queue_init(&res_to_compress_queue, queue_size);
1276         if (ret)
1277                 goto out_serial;
1278
1279         ret = shared_queue_init(&compressed_res_queue, queue_size);
1280         if (ret)
1281                 goto out_destroy_res_to_compress_queue;
1282
1283         struct compressor_thread_params params;
1284         params.res_to_compress_queue = &res_to_compress_queue;
1285         params.compressed_res_queue = &compressed_res_queue;
1286         params.compress = get_compress_func(out_ctype);
1287
1288         compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
1289         if (!compressor_threads) {
1290                 ret = WIMLIB_ERR_NOMEM;
1291                 goto out_destroy_compressed_res_queue;
1292         }
1293
1294         for (unsigned i = 0; i < num_threads; i++) {
1295                 DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
1296                 ret = pthread_create(&compressor_threads[i], NULL,
1297                                      compressor_thread_proc, &params);
1298                 if (ret != 0) {
1299                         ret = -1;
1300                         ERROR_WITH_ERRNO("Failed to create compressor "
1301                                          "thread %u of %u",
1302                                          i + 1, num_threads);
1303                         num_threads = i;
1304                         goto out_join;
1305                 }
1306         }
1307
1308         if (progress_func)
1309                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
1310
1311         struct main_writer_thread_ctx ctx;
1312         ctx.stream_list           = stream_list;
1313         ctx.lookup_table          = lookup_table;
1314         ctx.out_fd                = out_fd;
1315         ctx.out_ctype             = out_ctype;
1316         ctx.res_to_compress_queue = &res_to_compress_queue;
1317         ctx.compressed_res_queue  = &compressed_res_queue;
1318         ctx.num_messages          = queue_size;
1319         ctx.write_resource_flags  = write_resource_flags;
1320         ctx.progress_func         = progress_func;
1321         ctx.progress              = progress;
1322         ret = main_writer_thread_init_ctx(&ctx);
1323         if (ret)
1324                 goto out_join;
1325         ret = do_write_stream_list(stream_list, lookup_table,
1326                                    main_thread_process_next_stream,
1327                                    &ctx, progress_func, progress);
1328         if (ret)
1329                 goto out_destroy_ctx;
1330
1331         /* The main thread has finished reading all streams that are going to be
1332          * compressed in parallel, and it now needs to wait for all remaining
1333          * chunks to be compressed so that the remaining streams can actually be
1334          * written to the output file.  Furthermore, any remaining streams that
1335          * had processing deferred to the main thread need to be handled.  These
1336          * tasks are done by the main_writer_thread_finish() function. */
1337         ret = main_writer_thread_finish(&ctx);
1338 out_destroy_ctx:
1339         main_writer_thread_destroy_ctx(&ctx);
1340 out_join:
1341         for (unsigned i = 0; i < num_threads; i++)
1342                 shared_queue_put(&res_to_compress_queue, NULL);
1343
1344         for (unsigned i = 0; i < num_threads; i++) {
1345                 if (pthread_join(compressor_threads[i], NULL)) {
1346                         WARNING_WITH_ERRNO("Failed to join compressor "
1347                                            "thread %u of %u",
1348                                            i + 1, num_threads);
1349                 }
1350         }
1351         FREE(compressor_threads);
1352 out_destroy_compressed_res_queue:
1353         shared_queue_destroy(&compressed_res_queue);
1354 out_destroy_res_to_compress_queue:
1355         shared_queue_destroy(&res_to_compress_queue);
1356         if (ret >= 0 && ret != WIMLIB_ERR_NOMEM)
1357                 return ret;
1358 out_serial:
1359         WARNING("Falling back to single-threaded compression");
1360 out_serial_quiet:
1361         return write_stream_list_serial(stream_list,
1362                                         lookup_table,
1363                                         out_fd,
1364                                         out_ctype,
1365                                         write_resource_flags,
1366                                         progress_func,
1367                                         progress);
1368
1369 }
1370 #endif
1371
1372 /*
1373  * Write a list of streams to a WIM (@out_fd) using the compression type
1374  * @out_ctype and up to @num_threads compressor threads.
1375  */
1376 static int
1377 write_stream_list(struct list_head *stream_list,
1378                   struct wim_lookup_table *lookup_table,
1379                   int out_fd, int out_ctype, int write_flags,
1380                   unsigned num_threads, wimlib_progress_func_t progress_func)
1381 {
1382         struct wim_lookup_table_entry *lte;
1383         size_t num_streams = 0;
1384         u64 total_bytes = 0;
1385         u64 total_compression_bytes = 0;
1386         union wimlib_progress_info progress;
1387         int ret;
1388         int write_resource_flags;
1389
1390         if (list_empty(stream_list))
1391                 return 0;
1392
1393         write_resource_flags = write_flags_to_resource_flags(write_flags);
1394
1395         /* Calculate the total size of the streams to be written.  Note: this
1396          * will be the uncompressed size, as we may not know the compressed size
1397          * yet, and also this will assume that every unhashed stream will be
1398          * written (which will not necessarily be the case). */
1399         list_for_each_entry(lte, stream_list, write_streams_list) {
1400                 num_streams++;
1401                 total_bytes += wim_resource_size(lte);
1402                 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
1403                        && (wim_resource_compression_type(lte) != out_ctype ||
1404                            (write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS)))
1405                 {
1406                         total_compression_bytes += wim_resource_size(lte);
1407                 }
1408         }
1409         progress.write_streams.total_bytes       = total_bytes;
1410         progress.write_streams.total_streams     = num_streams;
1411         progress.write_streams.completed_bytes   = 0;
1412         progress.write_streams.completed_streams = 0;
1413         progress.write_streams.num_threads       = num_threads;
1414         progress.write_streams.compression_type  = out_ctype;
1415         progress.write_streams._private          = 0;
1416
1417 #ifdef ENABLE_MULTITHREADED_COMPRESSION
1418         if (total_compression_bytes >= 1000000 && num_threads != 1)
1419                 ret = write_stream_list_parallel(stream_list,
1420                                                  lookup_table,
1421                                                  out_fd,
1422                                                  out_ctype,
1423                                                  write_resource_flags,
1424                                                  progress_func,
1425                                                  &progress,
1426                                                  num_threads);
1427         else
1428 #endif
1429                 ret = write_stream_list_serial(stream_list,
1430                                                lookup_table,
1431                                                out_fd,
1432                                                out_ctype,
1433                                                write_resource_flags,
1434                                                progress_func,
1435                                                &progress);
1436         return ret;
1437 }
1438
1439 struct stream_size_table {
1440         struct hlist_head *array;
1441         size_t num_entries;
1442         size_t capacity;
1443 };
1444
1445 static int
1446 init_stream_size_table(struct stream_size_table *tab, size_t capacity)
1447 {
1448         tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1449         if (!tab->array)
1450                 return WIMLIB_ERR_NOMEM;
1451         tab->num_entries = 0;
1452         tab->capacity = capacity;
1453         return 0;
1454 }
1455
1456 static void
1457 destroy_stream_size_table(struct stream_size_table *tab)
1458 {
1459         FREE(tab->array);
1460 }
1461
1462 static int
1463 stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab)
1464 {
1465         struct stream_size_table *tab = _tab;
1466         size_t pos;
1467         struct wim_lookup_table_entry *same_size_lte;
1468         struct hlist_node *tmp;
1469
1470         pos = hash_u64(wim_resource_size(lte)) % tab->capacity;
1471         lte->unique_size = 1;
1472         hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
1473                 if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) {
1474                         lte->unique_size = 0;
1475                         same_size_lte->unique_size = 0;
1476                         break;
1477                 }
1478         }
1479
1480         hlist_add_head(&lte->hash_list_2, &tab->array[pos]);
1481         tab->num_entries++;
1482         return 0;
1483 }
1484
1485
1486 struct lte_overwrite_prepare_args {
1487         WIMStruct *wim;
1488         off_t end_offset;
1489         struct list_head stream_list;
1490         struct stream_size_table stream_size_tab;
1491 };
1492
1493 /* First phase of preparing streams for an in-place overwrite.  This is called
1494  * on all streams, both hashed and unhashed, except the metadata resources. */
1495 static int
1496 lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args)
1497 {
1498         struct lte_overwrite_prepare_args *args = _args;
1499
1500         wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA));
1501         if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim)
1502                 list_add_tail(&lte->write_streams_list, &args->stream_list);
1503         lte->out_refcnt = lte->refcnt;
1504         stream_size_table_insert(lte, &args->stream_size_tab);
1505         return 0;
1506 }
1507
1508 /* Second phase of preparing streams for an in-place overwrite.  This is called
1509  * on existing metadata resources and hashed streams, but not unhashed streams.
1510  *
1511  * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so
1512  * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as
1513  * the latter uses lte->hash_list_2, while the former expects to set
1514  * lte->output_resource_entry. */
1515 static int
1516 lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args)
1517 {
1518         struct lte_overwrite_prepare_args *args = _args;
1519
1520         if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) {
1521                 /* We can't do an in place overwrite on the WIM if there are
1522                  * streams after the XML data. */
1523                 if (lte->resource_entry.offset +
1524                     lte->resource_entry.size > args->end_offset)
1525                 {
1526                 #ifdef ENABLE_ERROR_MESSAGES
1527                         ERROR("The following resource is after the XML data:");
1528                         print_lookup_table_entry(lte, stderr);
1529                 #endif
1530                         return WIMLIB_ERR_RESOURCE_ORDER;
1531                 }
1532                 copy_resource_entry(&lte->output_resource_entry,
1533                                     &lte->resource_entry);
1534         }
1535         return 0;
1536 }
1537
1538 /* Given a WIM that we are going to overwrite in place with zero or more
1539  * additional streams added, construct a list the list of new unique streams
1540  * ('struct wim_lookup_table_entry's) that must be written, plus any unhashed
1541  * streams that need to be added but may be identical to other hashed or
1542  * unhashed streams.  These unhashed streams are checksummed while the streams
1543  * are being written.  To aid this process, the member @unique_size is set to 1
1544  * on streams that have a unique size and therefore must be written.
1545  *
1546  * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
1547  * indicate the number of times the stream is referenced in only the streams
1548  * that are being written; this may still be adjusted later when unhashed
1549  * streams are being resolved.
1550  */
1551 static int
1552 prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset,
1553                               struct list_head *stream_list)
1554 {
1555         int ret;
1556         struct lte_overwrite_prepare_args args;
1557         unsigned i;
1558
1559         args.wim = wim;
1560         args.end_offset = end_offset;
1561         ret = init_stream_size_table(&args.stream_size_tab,
1562                                      wim->lookup_table->capacity);
1563         if (ret)
1564                 return ret;
1565
1566         INIT_LIST_HEAD(&args.stream_list);
1567         for (i = 0; i < wim->hdr.image_count; i++) {
1568                 struct wim_image_metadata *imd;
1569                 struct wim_lookup_table_entry *lte;
1570
1571                 imd = wim->image_metadata[i];
1572                 image_for_each_unhashed_stream(lte, imd)
1573                         lte_overwrite_prepare(lte, &args);
1574         }
1575         for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args);
1576         list_transfer(&args.stream_list, stream_list);
1577
1578         for (i = 0; i < wim->hdr.image_count; i++) {
1579                 ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte,
1580                                               &args);
1581                 if (ret)
1582                         goto out_destroy_stream_size_table;
1583         }
1584         ret = for_lookup_table_entry(wim->lookup_table,
1585                                      lte_overwrite_prepare_2, &args);
1586 out_destroy_stream_size_table:
1587         destroy_stream_size_table(&args.stream_size_tab);
1588         return ret;
1589 }
1590
1591
1592 struct find_streams_ctx {
1593         struct list_head stream_list;
1594         struct stream_size_table stream_size_tab;
1595 };
1596
1597 static void
1598 inode_find_streams_to_write(struct wim_inode *inode,
1599                             struct wim_lookup_table *table,
1600                             struct list_head *stream_list,
1601                             struct stream_size_table *tab)
1602 {
1603         struct wim_lookup_table_entry *lte;
1604         for (unsigned i = 0; i <= inode->i_num_ads; i++) {
1605                 lte = inode_stream_lte(inode, i, table);
1606                 if (lte) {
1607                         if (lte->out_refcnt == 0) {
1608                                 if (lte->unhashed)
1609                                         stream_size_table_insert(lte, tab);
1610                                 list_add_tail(&lte->write_streams_list, stream_list);
1611                         }
1612                         lte->out_refcnt += inode->i_nlink;
1613                 }
1614         }
1615 }
1616
1617 static int
1618 image_find_streams_to_write(WIMStruct *w)
1619 {
1620         struct find_streams_ctx *ctx;
1621         struct wim_image_metadata *imd;
1622         struct wim_inode *inode;
1623         struct wim_lookup_table_entry *lte;
1624
1625         ctx = w->private;
1626         imd = wim_get_current_image_metadata(w);
1627
1628         image_for_each_unhashed_stream(lte, imd)
1629                 lte->out_refcnt = 0;
1630
1631         /* Go through this image's inodes to find any streams that have not been
1632          * found yet. */
1633         image_for_each_inode(inode, imd) {
1634                 inode_find_streams_to_write(inode, w->lookup_table,
1635                                             &ctx->stream_list,
1636                                             &ctx->stream_size_tab);
1637         }
1638         return 0;
1639 }
1640
1641 /* Given a WIM that from which one or all of the images is being written, build
1642  * the list of unique streams ('struct wim_lookup_table_entry's) that must be
1643  * written, plus any unhashed streams that need to be written but may be
1644  * identical to other hashed or unhashed streams being written.  These unhashed
1645  * streams are checksummed while the streams are being written.  To aid this
1646  * process, the member @unique_size is set to 1 on streams that have a unique
1647  * size and therefore must be written.
1648  *
1649  * The out_refcnt member of each 'struct wim_lookup_table_entry' is set to
1650  * indicate the number of times the stream is referenced in only the streams
1651  * that are being written; this may still be adjusted later when unhashed
1652  * streams are being resolved.
1653  */
1654 static int
1655 prepare_stream_list(WIMStruct *wim, int image, struct list_head *stream_list)
1656 {
1657         int ret;
1658         struct find_streams_ctx ctx;
1659
1660         for_lookup_table_entry(wim->lookup_table, lte_zero_out_refcnt, NULL);
1661         ret = init_stream_size_table(&ctx.stream_size_tab,
1662                                      wim->lookup_table->capacity);
1663         if (ret)
1664                 return ret;
1665         for_lookup_table_entry(wim->lookup_table, stream_size_table_insert,
1666                                &ctx.stream_size_tab);
1667         INIT_LIST_HEAD(&ctx.stream_list);
1668         wim->private = &ctx;
1669         ret = for_image(wim, image, image_find_streams_to_write);
1670         destroy_stream_size_table(&ctx.stream_size_tab);
1671         if (ret == 0)
1672                 list_transfer(&ctx.stream_list, stream_list);
1673         return ret;
1674 }
1675
1676 /* Writes the streams for the specified @image in @wim to @wim->out_fd.
1677  */
1678 static int
1679 write_wim_streams(WIMStruct *wim, int image, int write_flags,
1680                   unsigned num_threads,
1681                   wimlib_progress_func_t progress_func)
1682 {
1683         int ret;
1684         struct list_head stream_list;
1685
1686         ret = prepare_stream_list(wim, image, &stream_list);
1687         if (ret)
1688                 return ret;
1689         return write_stream_list(&stream_list,
1690                                  wim->lookup_table,
1691                                  wim->out_fd,
1692                                  wimlib_get_compression_type(wim),
1693                                  write_flags,
1694                                  num_threads,
1695                                  progress_func);
1696 }
1697
1698 /*
1699  * Finish writing a WIM file: write the lookup table, xml data, and integrity
1700  * table (optional), then overwrite the WIM header.
1701  *
1702  * write_flags is a bitwise OR of the following:
1703  *
1704  *      (public)  WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
1705  *              Include an integrity table.
1706  *
1707  *      (public)  WIMLIB_WRITE_FLAG_SHOW_PROGRESS:
1708  *              Show progress information when (if) writing the integrity table.
1709  *
1710  *      (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE:
1711  *              Don't write the lookup table.
1712  *
1713  *      (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE:
1714  *              When (if) writing the integrity table, re-use entries from the
1715  *              existing integrity table, if possible.
1716  *
1717  *      (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
1718  *              After writing the XML data but before writing the integrity
1719  *              table, write a temporary WIM header and flush the stream so that
1720  *              the WIM is less likely to become corrupted upon abrupt program
1721  *              termination.
1722  *
1723  *      (private) WIMLIB_WRITE_FLAG_FSYNC:
1724  *              fsync() the output file before closing it.
1725  *
1726  */
1727 int
1728 finish_write(WIMStruct *w, int image, int write_flags,
1729              wimlib_progress_func_t progress_func)
1730 {
1731         int ret;
1732         struct wim_header hdr;
1733
1734         /* @hdr will be the header for the new WIM.  First copy all the data
1735          * from the header in the WIMStruct; then set all the fields that may
1736          * have changed, including the resource entries, boot index, and image
1737          * count.  */
1738         memcpy(&hdr, &w->hdr, sizeof(struct wim_header));
1739
1740         /* Set image count and boot index correctly for single image writes */
1741         if (image != WIMLIB_ALL_IMAGES) {
1742                 hdr.image_count = 1;
1743                 if (hdr.boot_idx == image)
1744                         hdr.boot_idx = 1;
1745                 else
1746                         hdr.boot_idx = 0;
1747         }
1748
1749         /* In the WIM header, there is room for the resource entry for a
1750          * metadata resource labeled as the "boot metadata".  This entry should
1751          * be zeroed out if there is no bootable image (boot_idx 0).  Otherwise,
1752          * it should be a copy of the resource entry for the image that is
1753          * marked as bootable.  This is not well documented...  */
1754         if (hdr.boot_idx == 0) {
1755                 zero_resource_entry(&hdr.boot_metadata_res_entry);
1756         } else {
1757                 copy_resource_entry(&hdr.boot_metadata_res_entry,
1758                             &w->image_metadata[ hdr.boot_idx- 1
1759                                         ]->metadata_lte->output_resource_entry);
1760         }
1761
1762         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
1763                 ret = write_lookup_table(w, image, &hdr.lookup_table_res_entry);
1764                 if (ret)
1765                         goto out_close_wim;
1766         }
1767
1768         ret = write_xml_data(w->wim_info, image, w->out_fd,
1769                              (write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE) ?
1770                               wim_info_get_total_bytes(w->wim_info) : 0,
1771                              &hdr.xml_res_entry);
1772         if (ret)
1773                 goto out_close_wim;
1774
1775         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
1776                 if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
1777                         struct wim_header checkpoint_hdr;
1778                         memcpy(&checkpoint_hdr, &hdr, sizeof(struct wim_header));
1779                         zero_resource_entry(&checkpoint_hdr.integrity);
1780                         ret = write_header(&checkpoint_hdr, w->out_fd);
1781                         if (ret)
1782                                 goto out_close_wim;
1783                 }
1784
1785                 off_t old_lookup_table_end;
1786                 off_t new_lookup_table_end;
1787                 if (write_flags & WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE) {
1788                         old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
1789                                                w->hdr.lookup_table_res_entry.size;
1790                 } else {
1791                         old_lookup_table_end = 0;
1792                 }
1793                 new_lookup_table_end = hdr.lookup_table_res_entry.offset +
1794                                        hdr.lookup_table_res_entry.size;
1795
1796                 ret = write_integrity_table(w->out_fd,
1797                                             &hdr.integrity,
1798                                             new_lookup_table_end,
1799                                             old_lookup_table_end,
1800                                             progress_func);
1801                 if (ret)
1802                         goto out_close_wim;
1803         } else {
1804                 zero_resource_entry(&hdr.integrity);
1805         }
1806
1807         ret = write_header(&hdr, w->out_fd);
1808         if (ret)
1809                 goto out_close_wim;
1810
1811         if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
1812                 if (fsync(w->out_fd)) {
1813                         ERROR_WITH_ERRNO("Error syncing data to WIM file");
1814                         ret = WIMLIB_ERR_WRITE;
1815                 }
1816         }
1817 out_close_wim:
1818         if (close(w->out_fd)) {
1819                 ERROR_WITH_ERRNO("Failed to close the output WIM file");
1820                 if (ret == 0)
1821                         ret = WIMLIB_ERR_WRITE;
1822         }
1823         w->out_fd = -1;
1824         return ret;
1825 }
1826
1827 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
1828 int
1829 lock_wim(WIMStruct *w, int fd)
1830 {
1831         int ret = 0;
1832         if (fd != -1 && !w->wim_locked) {
1833                 ret = flock(fd, LOCK_EX | LOCK_NB);
1834                 if (ret != 0) {
1835                         if (errno == EWOULDBLOCK) {
1836                                 ERROR("`%"TS"' is already being modified or has been "
1837                                       "mounted read-write\n"
1838                                       "        by another process!", w->filename);
1839                                 ret = WIMLIB_ERR_ALREADY_LOCKED;
1840                         } else {
1841                                 WARNING_WITH_ERRNO("Failed to lock `%"TS"'",
1842                                                    w->filename);
1843                                 ret = 0;
1844                         }
1845                 } else {
1846                         w->wim_locked = 1;
1847                 }
1848         }
1849         return ret;
1850 }
1851 #endif
1852
1853 static int
1854 open_wim_writable(WIMStruct *w, const tchar *path, int open_flags)
1855 {
1856         w->out_fd = topen(path, open_flags | O_BINARY, 0644);
1857         if (w->out_fd == -1) {
1858                 ERROR_WITH_ERRNO("Failed to open `%"TS"' for writing", path);
1859                 return WIMLIB_ERR_OPEN;
1860         }
1861         return 0;
1862 }
1863
1864
1865 void
1866 close_wim_writable(WIMStruct *w)
1867 {
1868         if (w->out_fd != -1) {
1869                 if (close(w->out_fd))
1870                         WARNING_WITH_ERRNO("Failed to close output WIM");
1871                 w->out_fd = -1;
1872         }
1873 }
1874
1875 /* Open file stream and write dummy header for WIM. */
1876 int
1877 begin_write(WIMStruct *w, const tchar *path, int write_flags)
1878 {
1879         int ret;
1880         int open_flags = O_TRUNC | O_CREAT;
1881         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
1882                 open_flags |= O_RDWR;
1883         else
1884                 open_flags |= O_WRONLY;
1885         ret = open_wim_writable(w, path, open_flags);
1886         if (ret)
1887                 return ret;
1888         /* Write dummy header. It will be overwritten later. */
1889         ret = write_header(&w->hdr, w->out_fd);
1890         if (ret)
1891                 return ret;
1892         if (lseek(w->out_fd, WIM_HEADER_DISK_SIZE, SEEK_SET) == -1) {
1893                 ERROR_WITH_ERRNO("Failed to seek to end of WIM header");
1894                 return WIMLIB_ERR_WRITE;
1895         }
1896         return 0;
1897 }
1898
1899 /* Writes a stand-alone WIM to a file.  */
1900 WIMLIBAPI int
1901 wimlib_write(WIMStruct *w, const tchar *path,
1902              int image, int write_flags, unsigned num_threads,
1903              wimlib_progress_func_t progress_func)
1904 {
1905         int ret;
1906
1907         if (!path)
1908                 return WIMLIB_ERR_INVALID_PARAM;
1909
1910         write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
1911
1912         if (image != WIMLIB_ALL_IMAGES &&
1913              (image < 1 || image > w->hdr.image_count))
1914                 return WIMLIB_ERR_INVALID_IMAGE;
1915
1916         if (w->hdr.total_parts != 1) {
1917                 ERROR("Cannot call wimlib_write() on part of a split WIM");
1918                 return WIMLIB_ERR_SPLIT_UNSUPPORTED;
1919         }
1920
1921         ret = begin_write(w, path, write_flags);
1922         if (ret)
1923                 goto out_close_wim;
1924
1925         ret = write_wim_streams(w, image, write_flags, num_threads,
1926                                 progress_func);
1927         if (ret)
1928                 goto out_close_wim;
1929
1930         if (progress_func)
1931                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL);
1932
1933         ret = for_image(w, image, write_metadata_resource);
1934         if (ret)
1935                 goto out_close_wim;
1936
1937         if (progress_func)
1938                 progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL);
1939
1940         ret = finish_write(w, image, write_flags, progress_func);
1941         /* finish_write() closed the WIM for us */
1942         goto out;
1943 out_close_wim:
1944         close_wim_writable(w);
1945 out:
1946         DEBUG("wimlib_write(path=%"TS") = %d", path, ret);
1947         return ret;
1948 }
1949
1950 static bool
1951 any_images_modified(WIMStruct *w)
1952 {
1953         for (int i = 0; i < w->hdr.image_count; i++)
1954                 if (w->image_metadata[i]->modified)
1955                         return true;
1956         return false;
1957 }
1958
1959 /*
1960  * Overwrite a WIM, possibly appending streams to it.
1961  *
1962  * A WIM looks like (or is supposed to look like) the following:
1963  *
1964  *                   Header (212 bytes)
1965  *                   Streams and metadata resources (variable size)
1966  *                   Lookup table (variable size)
1967  *                   XML data (variable size)
1968  *                   Integrity table (optional) (variable size)
1969  *
1970  * If we are not adding any streams or metadata resources, the lookup table is
1971  * unchanged--- so we only need to overwrite the XML data, integrity table, and
1972  * header.  This operation is potentially unsafe if the program is abruptly
1973  * terminated while the XML data or integrity table are being overwritten, but
1974  * before the new header has been written.  To partially alleviate this problem,
1975  * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to
1976  * finish_write() to cause a temporary WIM header to be written after the XML
1977  * data has been written.  This may prevent the WIM from becoming corrupted if
1978  * the program is terminated while the integrity table is being calculated (but
1979  * no guarantees, due to write re-ordering...).
1980  *
1981  * If we are adding new streams or images (metadata resources), the lookup table
1982  * needs to be changed, and those streams need to be written.  In this case, we
1983  * try to perform a safe update of the WIM file by writing the streams *after*
1984  * the end of the previous WIM, then writing the new lookup table, XML data, and
1985  * (optionally) integrity table following the new streams.  This will produce a
1986  * layout like the following:
1987  *
1988  *                   Header (212 bytes)
1989  *                   (OLD) Streams and metadata resources (variable size)
1990  *                   (OLD) Lookup table (variable size)
1991  *                   (OLD) XML data (variable size)
1992  *                   (OLD) Integrity table (optional) (variable size)
1993  *                   (NEW) Streams and metadata resources (variable size)
1994  *                   (NEW) Lookup table (variable size)
1995  *                   (NEW) XML data (variable size)
1996  *                   (NEW) Integrity table (optional) (variable size)
1997  *
1998  * At all points, the WIM is valid as nothing points to the new data yet.  Then,
1999  * the header is overwritten to point to the new lookup table, XML data, and
2000  * integrity table, to produce the following layout:
2001  *
2002  *                   Header (212 bytes)
2003  *                   Streams and metadata resources (variable size)
2004  *                   Nothing (variable size)
2005  *                   More Streams and metadata resources (variable size)
2006  *                   Lookup table (variable size)
2007  *                   XML data (variable size)
2008  *                   Integrity table (optional) (variable size)
2009  *
2010  * This method allows an image to be appended to a large WIM very quickly, and
2011  * is is crash-safe except in the case of write re-ordering, but the
2012  * disadvantage is that a small hole is left in the WIM where the old lookup
2013  * table, xml data, and integrity table were.  (These usually only take up a
2014  * small amount of space compared to the streams, however.)
2015  */
2016 static int
2017 overwrite_wim_inplace(WIMStruct *w, int write_flags,
2018                       unsigned num_threads,
2019                       wimlib_progress_func_t progress_func)
2020 {
2021         int ret;
2022         struct list_head stream_list;
2023         off_t old_wim_end;
2024         u64 old_lookup_table_end, old_xml_begin, old_xml_end;
2025         int open_flags;
2026
2027         DEBUG("Overwriting `%"TS"' in-place", w->filename);
2028
2029         /* Make sure that the integrity table (if present) is after the XML
2030          * data, and that there are no stream resources, metadata resources, or
2031          * lookup tables after the XML data.  Otherwise, these data would be
2032          * overwritten. */
2033         old_xml_begin = w->hdr.xml_res_entry.offset;
2034         old_xml_end = old_xml_begin + w->hdr.xml_res_entry.size;
2035         old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
2036                                w->hdr.lookup_table_res_entry.size;
2037         if (w->hdr.integrity.offset != 0 && w->hdr.integrity.offset < old_xml_end) {
2038                 ERROR("Didn't expect the integrity table to be before the XML data");
2039                 return WIMLIB_ERR_RESOURCE_ORDER;
2040         }
2041
2042         if (old_lookup_table_end > old_xml_begin) {
2043                 ERROR("Didn't expect the lookup table to be after the XML data");
2044                 return WIMLIB_ERR_RESOURCE_ORDER;
2045         }
2046
2047         /* Set @old_wim_end, which indicates the point beyond which we don't
2048          * allow any file and metadata resources to appear without returning
2049          * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
2050          * overwrite these resources). */
2051         if (!w->deletion_occurred && !any_images_modified(w)) {
2052                 /* If no images have been modified and no images have been
2053                  * deleted, a new lookup table does not need to be written.  We
2054                  * shall write the new XML data and optional integrity table
2055                  * immediately after the lookup table.  Note that this may
2056                  * overwrite an existing integrity table. */
2057                 DEBUG("Skipping writing lookup table "
2058                       "(no images modified or deleted)");
2059                 old_wim_end = old_lookup_table_end;
2060                 write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE |
2061                                WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
2062         } else if (w->hdr.integrity.offset) {
2063                 /* Old WIM has an integrity table; begin writing new streams
2064                  * after it. */
2065                 old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size;
2066         } else {
2067                 /* No existing integrity table; begin writing new streams after
2068                  * the old XML data. */
2069                 old_wim_end = old_xml_end;
2070         }
2071
2072         ret = prepare_streams_for_overwrite(w, old_wim_end, &stream_list);
2073         if (ret)
2074                 return ret;
2075
2076         open_flags = 0;
2077         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2078                 open_flags |= O_RDWR;
2079         else
2080                 open_flags |= O_WRONLY;
2081         ret = open_wim_writable(w, w->filename, open_flags);
2082         if (ret)
2083                 return ret;
2084
2085         ret = lock_wim(w, w->out_fd);
2086         if (ret) {
2087                 close_wim_writable(w);
2088                 return ret;
2089         }
2090
2091         if (lseek(w->out_fd, old_wim_end, SEEK_SET) == -1) {
2092                 ERROR_WITH_ERRNO("Can't seek to end of WIM");
2093                 close_wim_writable(w);
2094                 w->wim_locked = 0;
2095                 return WIMLIB_ERR_WRITE;
2096         }
2097
2098         DEBUG("Writing newly added streams (offset = %"PRIu64")",
2099               old_wim_end);
2100         ret = write_stream_list(&stream_list,
2101                                 w->lookup_table,
2102                                 w->out_fd,
2103                                 wimlib_get_compression_type(w),
2104                                 write_flags,
2105                                 num_threads,
2106                                 progress_func);
2107         if (ret)
2108                 goto out_truncate;
2109
2110         for (int i = 0; i < w->hdr.image_count; i++) {
2111                 if (w->image_metadata[i]->modified) {
2112                         select_wim_image(w, i + 1);
2113                         ret = write_metadata_resource(w);
2114                         if (ret)
2115                                 goto out_truncate;
2116                 }
2117         }
2118         write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE;
2119         ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags,
2120                            progress_func);
2121 out_truncate:
2122         close_wim_writable(w);
2123         if (ret != 0 && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
2124                 WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",
2125                         w->filename, old_wim_end);
2126                 /* Return value of truncate() is ignored because this is already
2127                  * an error path. */
2128                 (void)ttruncate(w->filename, old_wim_end);
2129         }
2130         w->wim_locked = 0;
2131         return ret;
2132 }
2133
2134 static int
2135 overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags,
2136                           unsigned num_threads,
2137                           wimlib_progress_func_t progress_func)
2138 {
2139         size_t wim_name_len;
2140         int ret;
2141
2142         DEBUG("Overwriting `%"TS"' via a temporary file", w->filename);
2143
2144         /* Write the WIM to a temporary file in the same directory as the
2145          * original WIM. */
2146         wim_name_len = tstrlen(w->filename);
2147         tchar tmpfile[wim_name_len + 10];
2148         tmemcpy(tmpfile, w->filename, wim_name_len);
2149         randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
2150         tmpfile[wim_name_len + 9] = T('\0');
2151
2152         ret = wimlib_write(w, tmpfile, WIMLIB_ALL_IMAGES,
2153                            write_flags | WIMLIB_WRITE_FLAG_FSYNC,
2154                            num_threads, progress_func);
2155         if (ret) {
2156                 ERROR("Failed to write the WIM file `%"TS"'", tmpfile);
2157                 goto out_unlink;
2158         }
2159
2160         close_wim(w);
2161
2162         DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, w->filename);
2163         /* Rename the new file to the old file .*/
2164         if (trename(tmpfile, w->filename) != 0) {
2165                 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
2166                                  tmpfile, w->filename);
2167                 ret = WIMLIB_ERR_RENAME;
2168                 goto out_unlink;
2169         }
2170
2171         if (progress_func) {
2172                 union wimlib_progress_info progress;
2173                 progress.rename.from = tmpfile;
2174                 progress.rename.to = w->filename;
2175                 progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress);
2176         }
2177         goto out;
2178 out_unlink:
2179         /* Remove temporary file. */
2180         if (tunlink(tmpfile) != 0)
2181                 WARNING_WITH_ERRNO("Failed to remove `%"TS"'", tmpfile);
2182 out:
2183         return ret;
2184 }
2185
2186 /*
2187  * Writes a WIM file to the original file that it was read from, overwriting it.
2188  */
2189 WIMLIBAPI int
2190 wimlib_overwrite(WIMStruct *w, int write_flags,
2191                  unsigned num_threads,
2192                  wimlib_progress_func_t progress_func)
2193 {
2194         write_flags &= WIMLIB_WRITE_MASK_PUBLIC;
2195
2196         if (!w->filename)
2197                 return WIMLIB_ERR_NO_FILENAME;
2198
2199         if (w->hdr.total_parts != 1) {
2200                 ERROR("Cannot modify a split WIM");
2201                 return WIMLIB_ERR_SPLIT_UNSUPPORTED;
2202         }
2203
2204         if ((!w->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
2205             && !(write_flags & WIMLIB_WRITE_FLAG_REBUILD))
2206         {
2207                 int ret;
2208                 ret = overwrite_wim_inplace(w, write_flags, num_threads,
2209                                             progress_func);
2210                 if (ret == WIMLIB_ERR_RESOURCE_ORDER)
2211                         WARNING("Falling back to re-building entire WIM");
2212                 else
2213                         return ret;
2214         }
2215         return overwrite_wim_via_tmpfile(w, write_flags, num_threads,
2216                                          progress_func);
2217 }