]> wimlib.net Git - wimlib/blob - src/write.c
write: various cleanups
[wimlib] / src / write.c
1 /*
2  * write.c
3  *
4  * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5  * compressed file resources, etc.
6  */
7
8 /*
9  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
10  *
11  * This file is free software; you can redistribute it and/or modify it under
12  * the terms of the GNU Lesser General Public License as published by the Free
13  * Software Foundation; either version 3 of the License, or (at your option) any
14  * later version.
15  *
16  * This file is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
19  * details.
20  *
21  * You should have received a copy of the GNU Lesser General Public License
22  * along with this file; if not, see http://www.gnu.org/licenses/.
23  */
24
25 #ifdef HAVE_CONFIG_H
26 #  include "config.h"
27 #endif
28
29 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
30 /* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
31  * overwrite the LIST_HEAD macro. */
32 #  include <sys/file.h>
33 #endif
34
35 #include <errno.h>
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39
40 #include "wimlib/alloca.h"
41 #include "wimlib/assert.h"
42 #include "wimlib/blob_table.h"
43 #include "wimlib/chunk_compressor.h"
44 #include "wimlib/endianness.h"
45 #include "wimlib/error.h"
46 #include "wimlib/file_io.h"
47 #include "wimlib/header.h"
48 #include "wimlib/inode.h"
49 #include "wimlib/integrity.h"
50 #include "wimlib/metadata.h"
51 #include "wimlib/paths.h"
52 #include "wimlib/progress.h"
53 #include "wimlib/resource.h"
54 #include "wimlib/solid.h"
55 #ifdef __WIN32__
56 #  include "wimlib/win32.h" /* win32_rename_replacement() */
57 #endif
58 #include "wimlib/write.h"
59 #include "wimlib/xml.h"
60
61
62 /* wimlib internal flags used when writing resources.  */
63 #define WRITE_RESOURCE_FLAG_RECOMPRESS          0x00000001
64 #define WRITE_RESOURCE_FLAG_PIPABLE             0x00000002
65 #define WRITE_RESOURCE_FLAG_SOLID               0x00000004
66 #define WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE 0x00000008
67 #define WRITE_RESOURCE_FLAG_SOLID_SORT          0x00000010
68
69 static int
70 write_flags_to_resource_flags(int write_flags)
71 {
72         int write_resource_flags = 0;
73
74         if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
75                 write_resource_flags |= WRITE_RESOURCE_FLAG_RECOMPRESS;
76
77         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
78                 write_resource_flags |= WRITE_RESOURCE_FLAG_PIPABLE;
79
80         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
81                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
82
83         if (write_flags & WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES)
84                 write_resource_flags |= WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE;
85
86         if ((write_flags & (WIMLIB_WRITE_FLAG_SOLID |
87                             WIMLIB_WRITE_FLAG_NO_SOLID_SORT)) ==
88             WIMLIB_WRITE_FLAG_SOLID)
89                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID_SORT;
90
91         return write_resource_flags;
92 }
93
94 struct filter_context {
95         int write_flags;
96         WIMStruct *wim;
97 };
98
99 /*
100  * Determine whether the specified blob should be filtered out from the write.
101  *
102  * Return values:
103  *
104  *  < 0 : The blob should be hard-filtered; that is, not included in the output
105  *        WIM file at all.
106  *    0 : The blob should not be filtered out.
107  *  > 0 : The blob should be soft-filtered; that is, it already exists in the
108  *        WIM file and may not need to be written again.
109  */
110 static int
111 blob_filtered(const struct blob_descriptor *blob,
112               const struct filter_context *ctx)
113 {
114         int write_flags;
115         WIMStruct *wim;
116
117         if (ctx == NULL)
118                 return 0;
119
120         write_flags = ctx->write_flags;
121         wim = ctx->wim;
122
123         if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE &&
124             blob->blob_location == BLOB_IN_WIM &&
125             blob->rdesc->wim == wim)
126                 return 1;
127
128         if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS &&
129             blob->blob_location == BLOB_IN_WIM &&
130             blob->rdesc->wim != wim)
131                 return -1;
132
133         return 0;
134 }
135
136 static bool
137 blob_hard_filtered(const struct blob_descriptor *blob,
138                    struct filter_context *ctx)
139 {
140         return blob_filtered(blob, ctx) < 0;
141 }
142
143 static inline int
144 may_soft_filter_blobs(const struct filter_context *ctx)
145 {
146         if (ctx == NULL)
147                 return 0;
148         return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE;
149 }
150
151 static inline int
152 may_hard_filter_blobs(const struct filter_context *ctx)
153 {
154         if (ctx == NULL)
155                 return 0;
156         return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS;
157 }
158
159 static inline int
160 may_filter_blobs(const struct filter_context *ctx)
161 {
162         return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx));
163 }
164
165 /* Return true if the specified resource is compressed and the compressed data
166  * can be reused with the specified output parameters.  */
167 static bool
168 can_raw_copy(const struct blob_descriptor *blob,
169              int write_resource_flags, int out_ctype, u32 out_chunk_size)
170 {
171         const struct wim_resource_descriptor *rdesc;
172
173         if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS)
174                 return false;
175
176         if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
177                 return false;
178
179         if (blob->blob_location != BLOB_IN_WIM)
180                 return false;
181
182         rdesc = blob->rdesc;
183
184         if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
185                 return false;
186
187         if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
188                 /* Normal compressed resource: Must use same compression type
189                  * and chunk size.  */
190                 return (rdesc->compression_type == out_ctype &&
191                         rdesc->chunk_size == out_chunk_size);
192         }
193
194         if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) &&
195             (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
196         {
197                 /* Solid resource: Such resources may contain multiple blobs,
198                  * and in general only a subset of them need to be written.  As
199                  * a heuristic, re-use the raw data if more than two-thirds the
200                  * uncompressed size is being written.  */
201
202                 /* Note: solid resources contain a header that specifies the
203                  * compression type and chunk size; therefore we don't need to
204                  * check if they are compatible with @out_ctype and
205                  * @out_chunk_size.  */
206
207                 struct blob_descriptor *res_blob;
208                 u64 write_size = 0;
209
210                 list_for_each_entry(res_blob, &rdesc->blob_list, rdesc_node)
211                         if (res_blob->will_be_in_output_wim)
212                                 write_size += res_blob->size;
213
214                 return (write_size > rdesc->uncompressed_size * 2 / 3);
215         }
216
217         return false;
218 }
219
220 static u32
221 reshdr_flags_for_blob(const struct blob_descriptor *blob)
222 {
223         u32 reshdr_flags = 0;
224         if (blob->is_metadata)
225                 reshdr_flags |= WIM_RESHDR_FLAG_METADATA;
226         return reshdr_flags;
227 }
228
229 static void
230 blob_set_out_reshdr_for_reuse(struct blob_descriptor *blob)
231 {
232         const struct wim_resource_descriptor *rdesc;
233
234         wimlib_assert(blob->blob_location == BLOB_IN_WIM);
235         rdesc = blob->rdesc;
236
237         if (rdesc->flags & WIM_RESHDR_FLAG_SOLID) {
238                 blob->out_reshdr.offset_in_wim = blob->offset_in_res;
239                 blob->out_reshdr.uncompressed_size = 0;
240                 blob->out_reshdr.size_in_wim = blob->size;
241
242                 blob->out_res_offset_in_wim = rdesc->offset_in_wim;
243                 blob->out_res_size_in_wim = rdesc->size_in_wim;
244                 blob->out_res_uncompressed_size = rdesc->uncompressed_size;
245         } else {
246                 blob->out_reshdr.offset_in_wim = rdesc->offset_in_wim;
247                 blob->out_reshdr.uncompressed_size = rdesc->uncompressed_size;
248                 blob->out_reshdr.size_in_wim = rdesc->size_in_wim;
249         }
250         blob->out_reshdr.flags = rdesc->flags;
251 }
252
253
254 /* Write the header for a blob in a pipable WIM.  */
255 static int
256 write_pwm_blob_header(const struct blob_descriptor *blob,
257                       struct filedes *out_fd, bool compressed)
258 {
259         struct pwm_blob_hdr blob_hdr;
260         u32 reshdr_flags;
261         int ret;
262
263         wimlib_assert(!blob->unhashed);
264
265         blob_hdr.magic = cpu_to_le64(PWM_BLOB_MAGIC);
266         blob_hdr.uncompressed_size = cpu_to_le64(blob->size);
267         copy_hash(blob_hdr.hash, blob->hash);
268         reshdr_flags = reshdr_flags_for_blob(blob);
269         if (compressed)
270                 reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
271         blob_hdr.flags = cpu_to_le32(reshdr_flags);
272         ret = full_write(out_fd, &blob_hdr, sizeof(blob_hdr));
273         if (ret)
274                 ERROR_WITH_ERRNO("Write error");
275         return ret;
276 }
277
278 struct write_blobs_progress_data {
279         wimlib_progress_func_t progfunc;
280         void *progctx;
281         union wimlib_progress_info progress;
282         uint64_t next_progress;
283 };
284
285 static int
286 do_write_blobs_progress(struct write_blobs_progress_data *progress_data,
287                         u64 complete_size, u32 complete_count, bool discarded)
288 {
289         union wimlib_progress_info *progress = &progress_data->progress;
290         int ret;
291
292         if (discarded) {
293                 progress->write_streams.total_bytes -= complete_size;
294                 progress->write_streams.total_streams -= complete_count;
295                 if (progress_data->next_progress != ~(uint64_t)0 &&
296                     progress_data->next_progress > progress->write_streams.total_bytes)
297                 {
298                         progress_data->next_progress = progress->write_streams.total_bytes;
299                 }
300         } else {
301                 progress->write_streams.completed_bytes += complete_size;
302                 progress->write_streams.completed_streams += complete_count;
303         }
304
305         if (progress->write_streams.completed_bytes >= progress_data->next_progress)
306         {
307                 ret = call_progress(progress_data->progfunc,
308                                     WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
309                                     progress,
310                                     progress_data->progctx);
311                 if (ret)
312                         return ret;
313
314                 if (progress_data->next_progress == progress->write_streams.total_bytes) {
315                         progress_data->next_progress = ~(uint64_t)0;
316                 } else {
317                         /* Handle rate-limiting of messages  */
318
319                         /* Send new message as soon as another 1/128 of the
320                          * total has been written.  (Arbitrary number.)  */
321                         progress_data->next_progress =
322                                 progress->write_streams.completed_bytes +
323                                         progress->write_streams.total_bytes / 128;
324
325                         /* ... Unless that would be more than 5000000 bytes, in
326                          * which case send the next after the next 5000000
327                          * bytes.  (Another arbitrary number.)  */
328                         if (progress->write_streams.completed_bytes + 5000000 <
329                             progress_data->next_progress)
330                                 progress_data->next_progress =
331                                         progress->write_streams.completed_bytes + 5000000;
332
333                         /* ... But always send a message as soon as we're
334                          * completely done.  */
335                         if (progress->write_streams.total_bytes <
336                             progress_data->next_progress)
337                                 progress_data->next_progress =
338                                         progress->write_streams.total_bytes;
339                 }
340         }
341         return 0;
342 }
343
344 struct write_blobs_ctx {
345         /* File descriptor to which the blobs are being written.  */
346         struct filedes *out_fd;
347
348         /* Blob table for the WIMStruct on whose behalf the blobs are being
349          * written.  */
350         struct blob_table *blob_table;
351
352         /* Compression format to use.  */
353         int out_ctype;
354
355         /* Maximum uncompressed chunk size in compressed resources to use.  */
356         u32 out_chunk_size;
357
358         /* Flags that affect how the blobs will be written.  */
359         int write_resource_flags;
360
361         /* Data used for issuing WRITE_STREAMS progress.  */
362         struct write_blobs_progress_data progress_data;
363
364         struct filter_context *filter_ctx;
365
366         /* Upper bound on the total number of bytes that need to be compressed.
367          * */
368         u64 num_bytes_to_compress;
369
370         /* Pointer to the chunk_compressor implementation being used for
371          * compressing chunks of data, or NULL if chunks are being written
372          * uncompressed.  */
373         struct chunk_compressor *compressor;
374
375         /* A buffer of size @out_chunk_size that has been loaned out from the
376          * chunk compressor and is currently being filled with the uncompressed
377          * data of the next chunk.  */
378         u8 *cur_chunk_buf;
379
380         /* Number of bytes in @cur_chunk_buf that are currently filled.  */
381         size_t cur_chunk_buf_filled;
382
383         /* List of blobs that currently have chunks being compressed.  */
384         struct list_head blobs_being_compressed;
385
386         /* List of blobs in the solid resource.  Blobs are moved here after
387          * @blobs_being_compressed only when writing a solid resource.  */
388         struct list_head blobs_in_solid_resource;
389
390         /* Current uncompressed offset in the blob being read.  */
391         u64 cur_read_blob_offset;
392
393         /* Uncompressed size of the blob currently being read.  */
394         u64 cur_read_blob_size;
395
396         /* Current uncompressed offset in the blob being written.  */
397         u64 cur_write_blob_offset;
398
399         /* Uncompressed size of resource currently being written.  */
400         u64 cur_write_res_size;
401
402         /* Array that is filled in with compressed chunk sizes as a resource is
403          * being written.  */
404         u64 *chunk_csizes;
405
406         /* Index of next entry in @chunk_csizes to fill in.  */
407         size_t chunk_index;
408
409         /* Number of entries in @chunk_csizes currently allocated.  */
410         size_t num_alloc_chunks;
411
412         /* Offset in the output file of the start of the chunks of the resource
413          * currently being written.  */
414         u64 chunks_start_offset;
415 };
416
417 /* Reserve space for the chunk table and prepare to accumulate the chunk table
418  * in memory.  */
419 static int
420 begin_chunk_table(struct write_blobs_ctx *ctx, u64 res_expected_size)
421 {
422         u64 expected_num_chunks;
423         u64 expected_num_chunk_entries;
424         size_t reserve_size;
425         int ret;
426
427         /* Calculate the number of chunks and chunk entries that should be
428          * needed for the resource.  These normally will be the final values,
429          * but in SOLID mode some of the blobs we're planning to write into the
430          * resource may be duplicates, and therefore discarded, potentially
431          * decreasing the number of chunk entries needed.  */
432         expected_num_chunks = DIV_ROUND_UP(res_expected_size, ctx->out_chunk_size);
433         expected_num_chunk_entries = expected_num_chunks;
434         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
435                 expected_num_chunk_entries--;
436
437         /* Make sure the chunk_csizes array is long enough to store the
438          * compressed size of each chunk.  */
439         if (expected_num_chunks > ctx->num_alloc_chunks) {
440                 u64 new_length = expected_num_chunks + 50;
441
442                 if ((size_t)new_length != new_length) {
443                         ERROR("Resource size too large (%"PRIu64" bytes!",
444                               res_expected_size);
445                         return WIMLIB_ERR_NOMEM;
446                 }
447
448                 FREE(ctx->chunk_csizes);
449                 ctx->chunk_csizes = MALLOC(new_length * sizeof(ctx->chunk_csizes[0]));
450                 if (ctx->chunk_csizes == NULL) {
451                         ctx->num_alloc_chunks = 0;
452                         return WIMLIB_ERR_NOMEM;
453                 }
454                 ctx->num_alloc_chunks = new_length;
455         }
456
457         ctx->chunk_index = 0;
458
459         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) {
460                 /* Reserve space for the chunk table in the output file.  In the
461                  * case of solid resources this reserves the upper bound for the
462                  * needed space, not necessarily the exact space which will
463                  * prove to be needed.  At this point, we just use @chunk_csizes
464                  * for a buffer of 0's because the actual compressed chunk sizes
465                  * are unknown.  */
466                 reserve_size = expected_num_chunk_entries *
467                                get_chunk_entry_size(res_expected_size,
468                                                     0 != (ctx->write_resource_flags &
469                                                           WRITE_RESOURCE_FLAG_SOLID));
470                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
471                         reserve_size += sizeof(struct alt_chunk_table_header_disk);
472                 memset(ctx->chunk_csizes, 0, reserve_size);
473                 ret = full_write(ctx->out_fd, ctx->chunk_csizes, reserve_size);
474                 if (ret)
475                         return ret;
476         }
477         return 0;
478 }
479
480 static int
481 begin_write_resource(struct write_blobs_ctx *ctx, u64 res_expected_size)
482 {
483         int ret;
484
485         wimlib_assert(res_expected_size != 0);
486
487         if (ctx->compressor != NULL) {
488                 ret = begin_chunk_table(ctx, res_expected_size);
489                 if (ret)
490                         return ret;
491         }
492
493         /* Output file descriptor is now positioned at the offset at which to
494          * write the first chunk of the resource.  */
495         ctx->chunks_start_offset = ctx->out_fd->offset;
496         ctx->cur_write_blob_offset = 0;
497         ctx->cur_write_res_size = res_expected_size;
498         return 0;
499 }
500
501 static int
502 end_chunk_table(struct write_blobs_ctx *ctx, u64 res_actual_size,
503                 u64 *res_start_offset_ret, u64 *res_store_size_ret)
504 {
505         size_t actual_num_chunks;
506         size_t actual_num_chunk_entries;
507         size_t chunk_entry_size;
508         int ret;
509
510         actual_num_chunks = ctx->chunk_index;
511         actual_num_chunk_entries = actual_num_chunks;
512         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
513                 actual_num_chunk_entries--;
514
515         chunk_entry_size = get_chunk_entry_size(res_actual_size,
516                                                 0 != (ctx->write_resource_flags &
517                                                       WRITE_RESOURCE_FLAG_SOLID));
518
519         typedef le64 _may_alias_attribute aliased_le64_t;
520         typedef le32 _may_alias_attribute aliased_le32_t;
521
522         if (chunk_entry_size == 4) {
523                 aliased_le32_t *entries = (aliased_le32_t*)ctx->chunk_csizes;
524
525                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
526                         for (size_t i = 0; i < actual_num_chunk_entries; i++)
527                                 entries[i] = cpu_to_le32(ctx->chunk_csizes[i]);
528                 } else {
529                         u32 offset = ctx->chunk_csizes[0];
530                         for (size_t i = 0; i < actual_num_chunk_entries; i++) {
531                                 u32 next_size = ctx->chunk_csizes[i + 1];
532                                 entries[i] = cpu_to_le32(offset);
533                                 offset += next_size;
534                         }
535                 }
536         } else {
537                 aliased_le64_t *entries = (aliased_le64_t*)ctx->chunk_csizes;
538
539                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
540                         for (size_t i = 0; i < actual_num_chunk_entries; i++)
541                                 entries[i] = cpu_to_le64(ctx->chunk_csizes[i]);
542                 } else {
543                         u64 offset = ctx->chunk_csizes[0];
544                         for (size_t i = 0; i < actual_num_chunk_entries; i++) {
545                                 u64 next_size = ctx->chunk_csizes[i + 1];
546                                 entries[i] = cpu_to_le64(offset);
547                                 offset += next_size;
548                         }
549                 }
550         }
551
552         size_t chunk_table_size = actual_num_chunk_entries * chunk_entry_size;
553         u64 res_start_offset;
554         u64 res_end_offset;
555
556         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
557                 ret = full_write(ctx->out_fd, ctx->chunk_csizes, chunk_table_size);
558                 if (ret)
559                         goto write_error;
560                 res_end_offset = ctx->out_fd->offset;
561                 res_start_offset = ctx->chunks_start_offset;
562         } else {
563                 res_end_offset = ctx->out_fd->offset;
564
565                 u64 chunk_table_offset;
566
567                 chunk_table_offset = ctx->chunks_start_offset - chunk_table_size;
568
569                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
570                         struct alt_chunk_table_header_disk hdr;
571
572                         hdr.res_usize = cpu_to_le64(res_actual_size);
573                         hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size);
574                         hdr.compression_format = cpu_to_le32(ctx->out_ctype);
575
576                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1);
577                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2);
578                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
579
580                         ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr),
581                                           chunk_table_offset - sizeof(hdr));
582                         if (ret)
583                                 goto write_error;
584                         res_start_offset = chunk_table_offset - sizeof(hdr);
585                 } else {
586                         res_start_offset = chunk_table_offset;
587                 }
588
589                 ret = full_pwrite(ctx->out_fd, ctx->chunk_csizes,
590                                   chunk_table_size, chunk_table_offset);
591                 if (ret)
592                         goto write_error;
593         }
594
595         *res_start_offset_ret = res_start_offset;
596         *res_store_size_ret = res_end_offset - res_start_offset;
597
598         return 0;
599
600 write_error:
601         ERROR_WITH_ERRNO("Write error");
602         return ret;
603 }
604
605 /* Finish writing a WIM resource by writing or updating the chunk table (if not
606  * writing the data uncompressed) and loading its metadata into @out_reshdr.  */
607 static int
608 end_write_resource(struct write_blobs_ctx *ctx, struct wim_reshdr *out_reshdr)
609 {
610         int ret;
611         u64 res_size_in_wim;
612         u64 res_uncompressed_size;
613         u64 res_offset_in_wim;
614
615         wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size ||
616                       (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID));
617         res_uncompressed_size = ctx->cur_write_res_size;
618
619         if (ctx->compressor) {
620                 ret = end_chunk_table(ctx, res_uncompressed_size,
621                                       &res_offset_in_wim, &res_size_in_wim);
622                 if (ret)
623                         return ret;
624         } else {
625                 res_offset_in_wim = ctx->chunks_start_offset;
626                 res_size_in_wim = ctx->out_fd->offset - res_offset_in_wim;
627         }
628         out_reshdr->uncompressed_size = res_uncompressed_size;
629         out_reshdr->size_in_wim = res_size_in_wim;
630         out_reshdr->offset_in_wim = res_offset_in_wim;
631         return 0;
632 }
633
634 /* Call when no more data from the file at @path is needed.  */
635 static int
636 done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx)
637 {
638         union wimlib_progress_info info;
639
640         info.done_with_file.path_to_file = path;
641
642         return call_progress(progfunc, WIMLIB_PROGRESS_MSG_DONE_WITH_FILE,
643                              &info, progctx);
644 }
645
646 static int
647 do_done_with_blob(struct blob_descriptor *blob,
648                   wimlib_progress_func_t progfunc, void *progctx)
649 {
650         int ret;
651         struct wim_inode *inode;
652
653         if (!blob->may_send_done_with_file)
654                 return 0;
655
656         inode = blob->file_inode;
657
658         wimlib_assert(inode != NULL);
659         wimlib_assert(inode->num_remaining_streams > 0);
660         if (--inode->num_remaining_streams > 0)
661                 return 0;
662
663 #ifdef __WIN32__
664         /* XXX: This logic really should be somewhere else.  */
665
666         /* We want the path to the file, but blob->file_on_disk might actually
667          * refer to a named data stream.  Temporarily strip the named data
668          * stream from the path.  */
669         wchar_t *p_colon = NULL;
670         wchar_t *p_question_mark = NULL;
671         const wchar_t *p_stream_name;
672
673         p_stream_name = path_stream_name(blob->file_on_disk);
674         if (unlikely(p_stream_name)) {
675                 p_colon = (wchar_t *)(p_stream_name - 1);
676                 wimlib_assert(*p_colon == L':');
677                 *p_colon = L'\0';
678         }
679
680         /* We also should use a fake Win32 path instead of a NT path  */
681         if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) {
682                 p_question_mark = &blob->file_on_disk[1];
683                 *p_question_mark = L'\\';
684         }
685 #endif
686
687         ret = done_with_file(blob->file_on_disk, progfunc, progctx);
688
689 #ifdef __WIN32__
690         if (p_colon)
691                 *p_colon = L':';
692         if (p_question_mark)
693                 *p_question_mark = L'?';
694 #endif
695         return ret;
696 }
697
698 /* Handle WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES mode.  */
699 static inline int
700 done_with_blob(struct blob_descriptor *blob, struct write_blobs_ctx *ctx)
701 {
702         if (likely(!(ctx->write_resource_flags &
703                      WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE)))
704                 return 0;
705         return do_done_with_blob(blob, ctx->progress_data.progfunc,
706                                  ctx->progress_data.progctx);
707 }
708
709 /* Begin processing a blob for writing.  */
710 static int
711 write_blob_begin_read(struct blob_descriptor *blob, void *_ctx)
712 {
713         struct write_blobs_ctx *ctx = _ctx;
714         int ret;
715
716         wimlib_assert(blob->size > 0);
717
718         ctx->cur_read_blob_offset = 0;
719         ctx->cur_read_blob_size = blob->size;
720
721         /* As an optimization, we allow some blobs to be "unhashed", meaning
722          * their SHA-1 message digests are unknown.  This is the case with blobs
723          * that are added by scanning a directory tree with wimlib_add_image(),
724          * for example.  Since WIM uses single-instance blobs, we don't know
725          * whether such each such blob really need to written until it is
726          * actually checksummed, unless it has a unique size.  In such cases we
727          * read and checksum the blob in this function, thereby advancing ahead
728          * of read_blob_list(), which will still provide the data again to
729          * write_blob_process_chunk().  This is okay because an unhashed blob
730          * cannot be in a WIM resource, which might be costly to decompress.  */
731         if (ctx->blob_table != NULL && blob->unhashed && !blob->unique_size) {
732
733                 struct blob_descriptor *new_blob;
734
735                 ret = hash_unhashed_blob(blob, ctx->blob_table, &new_blob);
736                 if (ret)
737                         return ret;
738                 if (new_blob != blob) {
739                         /* Duplicate blob detected.  */
740
741                         if (new_blob->will_be_in_output_wim ||
742                             blob_filtered(new_blob, ctx->filter_ctx))
743                         {
744                                 /* The duplicate blob is already being included
745                                  * in the output WIM, or it would be filtered
746                                  * out if it had been.  Skip writing this blob
747                                  * (and reading it again) entirely, passing its
748                                  * output reference count to the duplicate blob
749                                  * in the former case.  */
750                                 ret = do_write_blobs_progress(&ctx->progress_data,
751                                                               blob->size, 1, true);
752                                 list_del(&blob->write_blobs_list);
753                                 list_del(&blob->blob_table_list);
754                                 if (new_blob->will_be_in_output_wim)
755                                         new_blob->out_refcnt += blob->out_refcnt;
756                                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
757                                         ctx->cur_write_res_size -= blob->size;
758                                 if (!ret)
759                                         ret = done_with_blob(blob, ctx);
760                                 free_blob_descriptor(blob);
761                                 if (ret)
762                                         return ret;
763                                 return BEGIN_BLOB_STATUS_SKIP_BLOB;
764                         } else {
765                                 /* The duplicate blob can validly be written,
766                                  * but was not marked as such.  Discard the
767                                  * current blob descriptor and use the
768                                  * duplicate, but actually freeing the current
769                                  * blob descriptor must wait until
770                                  * read_blob_list() has finished reading its
771                                  * data.  */
772                                 list_replace(&blob->write_blobs_list,
773                                              &new_blob->write_blobs_list);
774                                 list_replace(&blob->blob_table_list,
775                                              &new_blob->blob_table_list);
776                                 blob->will_be_in_output_wim = 0;
777                                 new_blob->out_refcnt = blob->out_refcnt;
778                                 new_blob->will_be_in_output_wim = 1;
779                                 new_blob->may_send_done_with_file = 0;
780                                 blob = new_blob;
781                         }
782                 }
783         }
784         list_move_tail(&blob->write_blobs_list, &ctx->blobs_being_compressed);
785         return 0;
786 }
787
788 /* Rewrite a blob that was just written compressed (as a non-solid WIM resource)
789  * as uncompressed instead.  */
790 static int
791 write_blob_uncompressed(struct blob_descriptor *blob, struct filedes *out_fd)
792 {
793         int ret;
794         u64 begin_offset = blob->out_reshdr.offset_in_wim;
795         u64 end_offset = out_fd->offset;
796
797         if (filedes_seek(out_fd, begin_offset) == -1)
798                 return 0;
799
800         ret = extract_full_blob_to_fd(blob, out_fd);
801         if (ret) {
802                 /* Error reading the uncompressed data.  */
803                 if (out_fd->offset == begin_offset &&
804                     filedes_seek(out_fd, end_offset) != -1)
805                 {
806                         /* Nothing was actually written yet, and we successfully
807                          * seeked to the end of the compressed resource, so
808                          * don't issue a hard error; just keep the compressed
809                          * resource instead.  */
810                         WARNING("Recovered compressed resource of "
811                                 "size %"PRIu64", continuing on.", blob->size);
812                         return 0;
813                 }
814                 return ret;
815         }
816
817         wimlib_assert(out_fd->offset - begin_offset == blob->size);
818
819         if (out_fd->offset < end_offset &&
820             0 != ftruncate(out_fd->fd, out_fd->offset))
821         {
822                 ERROR_WITH_ERRNO("Can't truncate output file to "
823                                  "offset %"PRIu64, out_fd->offset);
824                 return WIMLIB_ERR_WRITE;
825         }
826
827         blob->out_reshdr.size_in_wim = blob->size;
828         blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED |
829                                     WIM_RESHDR_FLAG_SOLID);
830         return 0;
831 }
832
833 /* Returns true if the specified blob, which was written as a non-solid
834  * resource, should be truncated from the WIM file and re-written uncompressed.
835  * blob->out_reshdr must be filled in from the initial write of the blob.  */
836 static bool
837 should_rewrite_blob_uncompressed(const struct write_blobs_ctx *ctx,
838                                  const struct blob_descriptor *blob)
839 {
840         /* If the compressed data is smaller than the uncompressed data, prefer
841          * the compressed data.  */
842         if (blob->out_reshdr.size_in_wim < blob->out_reshdr.uncompressed_size)
843                 return false;
844
845         /* If we're not actually writing compressed data, then there's no need
846          * for re-writing.  */
847         if (!ctx->compressor)
848                 return false;
849
850         /* If writing a pipable WIM, everything we write to the output is final
851          * (it might actually be a pipe!).  */
852         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)
853                 return false;
854
855         /* If the blob that would need to be re-read is located in a solid
856          * resource in another WIM file, then re-reading it would be costly.  So
857          * don't do it.
858          *
859          * Exception: if the compressed size happens to be *exactly* the same as
860          * the uncompressed size, then the blob *must* be written uncompressed
861          * in order to remain compatible with the Windows Overlay Filesystem
862          * Filter Driver (WOF).
863          *
864          * TODO: we are currently assuming that the optimization for
865          * single-chunk resources in maybe_rewrite_blob_uncompressed() prevents
866          * this case from being triggered too often.  To fully prevent excessive
867          * decompressions in degenerate cases, we really should obtain the
868          * uncompressed data by decompressing the compressed data we wrote to
869          * the output file.
870          */
871         if (blob->blob_location == BLOB_IN_WIM &&
872             blob->size != blob->rdesc->uncompressed_size &&
873             blob->size != blob->out_reshdr.size_in_wim)
874                 return false;
875
876         return true;
877 }
878
879 static int
880 maybe_rewrite_blob_uncompressed(struct write_blobs_ctx *ctx,
881                                 struct blob_descriptor *blob)
882 {
883         if (!should_rewrite_blob_uncompressed(ctx, blob))
884                 return 0;
885
886         /* Regular (non-solid) WIM resources with exactly one chunk and
887          * compressed size equal to uncompressed size are exactly the same as
888          * the corresponding compressed data --- since there must be 0 entries
889          * in the chunk table and the only chunk must be stored uncompressed.
890          * In this case, there's no need to rewrite anything.  */
891         if (ctx->chunk_index == 1 &&
892             blob->out_reshdr.size_in_wim == blob->out_reshdr.uncompressed_size)
893         {
894                 blob->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
895                 return 0;
896         }
897
898         return write_blob_uncompressed(blob, ctx->out_fd);
899 }
900
901 /* Write the next chunk of (typically compressed) data to the output WIM,
902  * handling the writing of the chunk table.  */
903 static int
904 write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
905             size_t csize, size_t usize)
906 {
907         int ret;
908         struct blob_descriptor *blob;
909         u32 completed_blob_count;
910         u32 completed_size;
911
912         blob = list_entry(ctx->blobs_being_compressed.next,
913                           struct blob_descriptor, write_blobs_list);
914
915         if (ctx->cur_write_blob_offset == 0 &&
916             !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
917         {
918                 /* Starting to write a new blob in non-solid mode.  */
919
920                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
921                         ret = write_pwm_blob_header(blob, ctx->out_fd,
922                                                     ctx->compressor != NULL);
923                         if (ret)
924                                 return ret;
925                 }
926
927                 ret = begin_write_resource(ctx, blob->size);
928                 if (ret)
929                         return ret;
930         }
931
932         if (ctx->compressor != NULL) {
933                 /* Record the compresed chunk size.  */
934                 wimlib_assert(ctx->chunk_index < ctx->num_alloc_chunks);
935                 ctx->chunk_csizes[ctx->chunk_index++] = csize;
936
937                /* If writing a pipable WIM, before the chunk data write a chunk
938                 * header that provides the compressed chunk size.  */
939                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
940                         struct pwm_chunk_hdr chunk_hdr = {
941                                 .compressed_size = cpu_to_le32(csize),
942                         };
943                         ret = full_write(ctx->out_fd, &chunk_hdr,
944                                          sizeof(chunk_hdr));
945                         if (ret)
946                                 goto write_error;
947                 }
948         }
949
950         /* Write the chunk data.  */
951         ret = full_write(ctx->out_fd, cchunk, csize);
952         if (ret)
953                 goto write_error;
954
955         ctx->cur_write_blob_offset += usize;
956
957         completed_size = usize;
958         completed_blob_count = 0;
959         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
960                 /* Wrote chunk in solid mode.  It may have finished multiple
961                  * blobs.  */
962                 struct blob_descriptor *next_blob;
963
964                 while (blob && ctx->cur_write_blob_offset >= blob->size) {
965
966                         ctx->cur_write_blob_offset -= blob->size;
967
968                         if (ctx->cur_write_blob_offset)
969                                 next_blob = list_entry(blob->write_blobs_list.next,
970                                                       struct blob_descriptor,
971                                                       write_blobs_list);
972                         else
973                                 next_blob = NULL;
974
975                         ret = done_with_blob(blob, ctx);
976                         if (ret)
977                                 return ret;
978                         list_move_tail(&blob->write_blobs_list, &ctx->blobs_in_solid_resource);
979                         completed_blob_count++;
980
981                         blob = next_blob;
982                 }
983         } else {
984                 /* Wrote chunk in non-solid mode.  It may have finished a
985                  * blob.  */
986                 if (ctx->cur_write_blob_offset == blob->size) {
987
988                         wimlib_assert(ctx->cur_write_blob_offset ==
989                                       ctx->cur_write_res_size);
990
991                         ret = end_write_resource(ctx, &blob->out_reshdr);
992                         if (ret)
993                                 return ret;
994
995                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
996                         if (ctx->compressor != NULL)
997                                 blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
998
999                         ret = maybe_rewrite_blob_uncompressed(ctx, blob);
1000                         if (ret)
1001                                 return ret;
1002
1003                         wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size);
1004
1005                         ctx->cur_write_blob_offset = 0;
1006
1007                         ret = done_with_blob(blob, ctx);
1008                         if (ret)
1009                                 return ret;
1010                         list_del(&blob->write_blobs_list);
1011                         completed_blob_count++;
1012                 }
1013         }
1014
1015         return do_write_blobs_progress(&ctx->progress_data, completed_size,
1016                                        completed_blob_count, false);
1017
1018 write_error:
1019         ERROR_WITH_ERRNO("Write error");
1020         return ret;
1021 }
1022
1023 static int
1024 prepare_chunk_buffer(struct write_blobs_ctx *ctx)
1025 {
1026         /* While we are unable to get a new chunk buffer due to too many chunks
1027          * already outstanding, retrieve and write the next compressed chunk. */
1028         while (!(ctx->cur_chunk_buf =
1029                  ctx->compressor->get_chunk_buffer(ctx->compressor)))
1030         {
1031                 const void *cchunk;
1032                 u32 csize;
1033                 u32 usize;
1034                 bool bret;
1035                 int ret;
1036
1037                 bret = ctx->compressor->get_compression_result(ctx->compressor,
1038                                                                &cchunk,
1039                                                                &csize,
1040                                                                &usize);
1041                 wimlib_assert(bret);
1042
1043                 ret = write_chunk(ctx, cchunk, csize, usize);
1044                 if (ret)
1045                         return ret;
1046         }
1047         return 0;
1048 }
1049
1050 /* Process the next chunk of data to be written to a WIM resource.  */
1051 static int
1052 write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
1053 {
1054         struct write_blobs_ctx *ctx = _ctx;
1055         int ret;
1056         const u8 *chunkptr, *chunkend;
1057
1058         wimlib_assert(size != 0);
1059
1060         if (ctx->compressor == NULL) {
1061                 /* Write chunk uncompressed.  */
1062                  ret = write_chunk(ctx, chunk, size, size);
1063                  if (ret)
1064                          return ret;
1065                  ctx->cur_read_blob_offset += size;
1066                  return 0;
1067         }
1068
1069         /* Submit the chunk for compression, but take into account that the
1070          * @size the chunk was provided in may not correspond to the
1071          * @out_chunk_size being used for compression.  */
1072         chunkptr = chunk;
1073         chunkend = chunkptr + size;
1074         do {
1075                 size_t needed_chunk_size;
1076                 size_t bytes_consumed;
1077
1078                 if (!ctx->cur_chunk_buf) {
1079                         ret = prepare_chunk_buffer(ctx);
1080                         if (ret)
1081                                 return ret;
1082                 }
1083
1084                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1085                         needed_chunk_size = ctx->out_chunk_size;
1086                 } else {
1087                         needed_chunk_size = min(ctx->out_chunk_size,
1088                                                 ctx->cur_chunk_buf_filled +
1089                                                         (ctx->cur_read_blob_size -
1090                                                          ctx->cur_read_blob_offset));
1091                 }
1092
1093                 bytes_consumed = min(chunkend - chunkptr,
1094                                      needed_chunk_size - ctx->cur_chunk_buf_filled);
1095
1096                 memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled],
1097                        chunkptr, bytes_consumed);
1098
1099                 chunkptr += bytes_consumed;
1100                 ctx->cur_read_blob_offset += bytes_consumed;
1101                 ctx->cur_chunk_buf_filled += bytes_consumed;
1102
1103                 if (ctx->cur_chunk_buf_filled == needed_chunk_size) {
1104                         ctx->compressor->signal_chunk_filled(ctx->compressor,
1105                                                              ctx->cur_chunk_buf_filled);
1106                         ctx->cur_chunk_buf = NULL;
1107                         ctx->cur_chunk_buf_filled = 0;
1108                 }
1109         } while (chunkptr != chunkend);
1110         return 0;
1111 }
1112
1113 /* Finish processing a blob for writing.  It may not have been completely
1114  * written yet, as the chunk_compressor implementation may still have chunks
1115  * buffered or being compressed.  */
1116 static int
1117 write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx)
1118 {
1119         struct write_blobs_ctx *ctx = _ctx;
1120
1121         wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status);
1122
1123         if (!blob->will_be_in_output_wim) {
1124                 /* The blob was a duplicate.  Now that its data has finished
1125                  * being read, it is being discarded in favor of the duplicate
1126                  * entry.  It therefore is no longer needed, and we can fire the
1127                  * DONE_WITH_FILE callback because the file will not be read
1128                  * again.
1129                  *
1130                  * Note: we can't yet fire DONE_WITH_FILE for non-duplicate
1131                  * blobs, since it needs to be possible to re-read the file if
1132                  * it does not compress to less than its original size.  */
1133                 if (!status)
1134                         status = done_with_blob(blob, ctx);
1135                 free_blob_descriptor(blob);
1136         } else if (!status && blob->unhashed && ctx->blob_table != NULL) {
1137                 /* The blob was not a duplicate and was previously unhashed.
1138                  * Since we passed COMPUTE_MISSING_BLOB_HASHES to
1139                  * read_blob_list(), blob->hash is now computed and valid.  So
1140                  * turn this blob into a "hashed" blob.  */
1141                 list_del(&blob->unhashed_list);
1142                 blob_table_insert(ctx->blob_table, blob);
1143                 blob->unhashed = 0;
1144         }
1145         return status;
1146 }
1147
1148 /* Compute statistics about a list of blobs that will be written.
1149  *
1150  * Assumes the blobs are sorted such that all blobs located in each distinct WIM
1151  * (specified by WIMStruct) are together.  */
1152 static void
1153 compute_blob_list_stats(struct list_head *blob_list,
1154                         struct write_blobs_ctx *ctx)
1155 {
1156         struct blob_descriptor *blob;
1157         u64 total_bytes = 0;
1158         u64 num_blobs = 0;
1159         u64 total_parts = 0;
1160         WIMStruct *prev_wim_part = NULL;
1161
1162         list_for_each_entry(blob, blob_list, write_blobs_list) {
1163                 num_blobs++;
1164                 total_bytes += blob->size;
1165                 if (blob->blob_location == BLOB_IN_WIM) {
1166                         if (prev_wim_part != blob->rdesc->wim) {
1167                                 prev_wim_part = blob->rdesc->wim;
1168                                 total_parts++;
1169                         }
1170                 }
1171         }
1172         ctx->progress_data.progress.write_streams.total_bytes       = total_bytes;
1173         ctx->progress_data.progress.write_streams.total_streams     = num_blobs;
1174         ctx->progress_data.progress.write_streams.completed_bytes   = 0;
1175         ctx->progress_data.progress.write_streams.completed_streams = 0;
1176         ctx->progress_data.progress.write_streams.compression_type  = ctx->out_ctype;
1177         ctx->progress_data.progress.write_streams.total_parts       = total_parts;
1178         ctx->progress_data.progress.write_streams.completed_parts   = 0;
1179         ctx->progress_data.next_progress = 0;
1180 }
1181
1182 /* Find blobs in @blob_list that can be copied to the output WIM in raw form
1183  * rather than compressed.  Delete these blobs from @blob_list and move them to
1184  * @raw_copy_blobs.  Return the total uncompressed size of the blobs that need
1185  * to be compressed.  */
1186 static u64
1187 find_raw_copy_blobs(struct list_head *blob_list,
1188                     int write_resource_flags,
1189                     int out_ctype,
1190                     u32 out_chunk_size,
1191                     struct list_head *raw_copy_blobs)
1192 {
1193         struct blob_descriptor *blob, *tmp;
1194         u64 num_bytes_to_compress = 0;
1195
1196         INIT_LIST_HEAD(raw_copy_blobs);
1197
1198         /* Initialize temporary raw_copy_ok flag.  */
1199         list_for_each_entry(blob, blob_list, write_blobs_list)
1200                 if (blob->blob_location == BLOB_IN_WIM)
1201                         blob->rdesc->raw_copy_ok = 0;
1202
1203         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1204                 if (blob->blob_location == BLOB_IN_WIM &&
1205                     blob->rdesc->raw_copy_ok)
1206                 {
1207                         list_move_tail(&blob->write_blobs_list,
1208                                        raw_copy_blobs);
1209                 } else if (can_raw_copy(blob, write_resource_flags,
1210                                         out_ctype, out_chunk_size))
1211                 {
1212                         blob->rdesc->raw_copy_ok = 1;
1213                         list_move_tail(&blob->write_blobs_list,
1214                                        raw_copy_blobs);
1215                 } else {
1216                         num_bytes_to_compress += blob->size;
1217                 }
1218         }
1219
1220         return num_bytes_to_compress;
1221 }
1222
1223 /* Copy a raw compressed resource located in another WIM file to the WIM file
1224  * being written.  */
1225 static int
1226 write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc,
1227                         struct filedes *out_fd)
1228 {
1229         u64 cur_read_offset;
1230         u64 end_read_offset;
1231         u8 buf[BUFFER_SIZE];
1232         size_t bytes_to_read;
1233         int ret;
1234         struct filedes *in_fd;
1235         struct blob_descriptor *blob;
1236         u64 out_offset_in_wim;
1237
1238         /* Copy the raw data.  */
1239         cur_read_offset = in_rdesc->offset_in_wim;
1240         end_read_offset = cur_read_offset + in_rdesc->size_in_wim;
1241
1242         out_offset_in_wim = out_fd->offset;
1243
1244         if (in_rdesc->is_pipable) {
1245                 if (cur_read_offset < sizeof(struct pwm_blob_hdr))
1246                         return WIMLIB_ERR_INVALID_PIPABLE_WIM;
1247                 cur_read_offset -= sizeof(struct pwm_blob_hdr);
1248                 out_offset_in_wim += sizeof(struct pwm_blob_hdr);
1249         }
1250         in_fd = &in_rdesc->wim->in_fd;
1251         wimlib_assert(cur_read_offset != end_read_offset);
1252         do {
1253
1254                 bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
1255
1256                 ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
1257                 if (ret)
1258                         return ret;
1259
1260                 ret = full_write(out_fd, buf, bytes_to_read);
1261                 if (ret)
1262                         return ret;
1263
1264                 cur_read_offset += bytes_to_read;
1265
1266         } while (cur_read_offset != end_read_offset);
1267
1268         list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) {
1269                 if (blob->will_be_in_output_wim) {
1270                         blob_set_out_reshdr_for_reuse(blob);
1271                         if (in_rdesc->flags & WIM_RESHDR_FLAG_SOLID)
1272                                 blob->out_res_offset_in_wim = out_offset_in_wim;
1273                         else
1274                                 blob->out_reshdr.offset_in_wim = out_offset_in_wim;
1275
1276                 }
1277         }
1278         return 0;
1279 }
1280
1281 /* Copy a list of raw compressed resources located in other WIM file(s) to the
1282  * WIM file being written.  */
1283 static int
1284 write_raw_copy_resources(struct list_head *raw_copy_blobs,
1285                          struct filedes *out_fd,
1286                          struct write_blobs_progress_data *progress_data)
1287 {
1288         struct blob_descriptor *blob;
1289         int ret;
1290
1291         list_for_each_entry(blob, raw_copy_blobs, write_blobs_list)
1292                 blob->rdesc->raw_copy_ok = 1;
1293
1294         list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) {
1295                 if (blob->rdesc->raw_copy_ok) {
1296                         /* Write each solid resource only one time.  */
1297                         ret = write_raw_copy_resource(blob->rdesc, out_fd);
1298                         if (ret)
1299                                 return ret;
1300                         blob->rdesc->raw_copy_ok = 0;
1301                 }
1302                 ret = do_write_blobs_progress(progress_data, blob->size,
1303                                               1, false);
1304                 if (ret)
1305                         return ret;
1306         }
1307         return 0;
1308 }
1309
1310 /* Wait for and write all chunks pending in the compressor.  */
1311 static int
1312 finish_remaining_chunks(struct write_blobs_ctx *ctx)
1313 {
1314         const void *cdata;
1315         u32 csize;
1316         u32 usize;
1317         int ret;
1318
1319         if (ctx->compressor == NULL)
1320                 return 0;
1321
1322         if (ctx->cur_chunk_buf_filled != 0) {
1323                 ctx->compressor->signal_chunk_filled(ctx->compressor,
1324                                                      ctx->cur_chunk_buf_filled);
1325         }
1326
1327         while (ctx->compressor->get_compression_result(ctx->compressor, &cdata,
1328                                                        &csize, &usize))
1329         {
1330                 ret = write_chunk(ctx, cdata, csize, usize);
1331                 if (ret)
1332                         return ret;
1333         }
1334         return 0;
1335 }
1336
1337 static void
1338 remove_empty_blobs(struct list_head *blob_list)
1339 {
1340         struct blob_descriptor *blob, *tmp;
1341
1342         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1343                 wimlib_assert(blob->will_be_in_output_wim);
1344                 if (blob->size == 0) {
1345                         list_del(&blob->write_blobs_list);
1346                         blob->out_reshdr.offset_in_wim = 0;
1347                         blob->out_reshdr.size_in_wim = 0;
1348                         blob->out_reshdr.uncompressed_size = 0;
1349                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
1350                 }
1351         }
1352 }
1353
1354 static inline bool
1355 blob_is_in_file(const struct blob_descriptor *blob)
1356 {
1357         return blob->blob_location == BLOB_IN_FILE_ON_DISK
1358 #ifdef __WIN32__
1359             || blob->blob_location == BLOB_IN_WINNT_FILE_ON_DISK
1360             || blob->blob_location == BLOB_WIN32_ENCRYPTED
1361 #endif
1362            ;
1363 }
1364
1365 static void
1366 init_done_with_file_info(struct list_head *blob_list)
1367 {
1368         struct blob_descriptor *blob;
1369
1370         list_for_each_entry(blob, blob_list, write_blobs_list) {
1371                 if (blob_is_in_file(blob)) {
1372                         blob->file_inode->num_remaining_streams = 0;
1373                         blob->may_send_done_with_file = 1;
1374                 } else {
1375                         blob->may_send_done_with_file = 0;
1376                 }
1377         }
1378
1379         list_for_each_entry(blob, blob_list, write_blobs_list)
1380                 if (blob->may_send_done_with_file)
1381                         blob->file_inode->num_remaining_streams++;
1382 }
1383
1384 /*
1385  * Write a list of blobs to the output WIM file.
1386  *
1387  * @blob_list
1388  *      The list of blobs to write, specified by a list of 'struct blob_descriptor' linked
1389  *      by the 'write_blobs_list' member.
1390  *
1391  * @out_fd
1392  *      The file descriptor, opened for writing, to which to write the blobs.
1393  *
1394  * @write_resource_flags
1395  *      Flags to modify how the blobs are written:
1396  *
1397  *      WRITE_RESOURCE_FLAG_RECOMPRESS:
1398  *              Force compression of all resources, even if they could otherwise
1399  *              be re-used by copying the raw data, due to being located in a WIM
1400  *              file with compatible compression parameters.
1401  *
1402  *      WRITE_RESOURCE_FLAG_PIPABLE:
1403  *              Write the resources in the wimlib-specific pipable format, and
1404  *              furthermore do so in such a way that no seeking backwards in
1405  *              @out_fd will be performed (so it may be a pipe).
1406  *
1407  *      WRITE_RESOURCE_FLAG_SOLID:
1408  *              Combine all the blobs into a single resource rather than writing
1409  *              them in separate resources.  This flag is only valid if the WIM
1410  *              version number has been, or will be, set to WIM_VERSION_SOLID.
1411  *              This flag may not be combined with WRITE_RESOURCE_FLAG_PIPABLE.
1412  *
1413  * @out_ctype
1414  *      Compression format to use in the output resources, specified as one of
1415  *      the WIMLIB_COMPRESSION_TYPE_* constants.  WIMLIB_COMPRESSION_TYPE_NONE
1416  *      is allowed.
1417  *
1418  * @out_chunk_size
1419  *      Compression chunk size to use in the output resources.  It must be a
1420  *      valid chunk size for the specified compression format @out_ctype, unless
1421  *      @out_ctype is WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter
1422  *      is ignored.
1423  *
1424  * @num_threads
1425  *      Number of threads to use to compress data.  If 0, a default number of
1426  *      threads will be chosen.  The number of threads still may be decreased
1427  *      from the specified value if insufficient memory is detected.
1428  *
1429  * @blob_table
1430  *      If on-the-fly deduplication of unhashed blobs is desired, this parameter
1431  *      must be pointer to the blob table for the WIMStruct on whose behalf the
1432  *      blobs are being written.  Otherwise, this parameter can be NULL.
1433  *
1434  * @filter_ctx
1435  *      If on-the-fly deduplication of unhashed blobs is desired, this parameter
1436  *      can be a pointer to a context for blob filtering used to detect whether
1437  *      the duplicate blob has been hard-filtered or not.  If no blobs are
1438  *      hard-filtered or no blobs are unhashed, this parameter can be NULL.
1439  *
1440  * This function will write the blobs in @blob_list to resources in
1441  * consecutive positions in the output WIM file, or to a single solid resource
1442  * if WRITE_RESOURCE_FLAG_SOLID was specified in @write_resource_flags.  In both
1443  * cases, the @out_reshdr of the `struct blob_descriptor' for each blob written will be
1444  * updated to specify its location, size, and flags in the output WIM.  In the
1445  * solid resource case, WIM_RESHDR_FLAG_SOLID will be set in the @flags field of
1446  * each @out_reshdr, and furthermore @out_res_offset_in_wim and
1447  * @out_res_size_in_wim of each @out_reshdr will be set to the offset and size,
1448  * respectively, in the output WIM of the solid resource containing the
1449  * corresponding blob.
1450  *
1451  * Each of the blobs to write may be in any location supported by the
1452  * resource-handling code (specifically, read_blob_list()), such as the contents
1453  * of external file that has been logically added to the output WIM, or a blob
1454  * in another WIM file that has been imported, or even a blob in the "same" WIM
1455  * file of which a modified copy is being written.  In the case that a blob is
1456  * already in a WIM file and uses compatible compression parameters, by default
1457  * this function will re-use the raw data instead of decompressing it, then
1458  * recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS
1459  * specified in @write_resource_flags, this is not done.
1460  *
1461  * As a further requirement, this function requires that the
1462  * @will_be_in_output_wim member be set to 1 on all blobs in @blob_list as well
1463  * as any other blobs not in @blob_list that will be in the output WIM file, but
1464  * set to 0 on any other blobs in the output WIM's blob table or sharing a solid
1465  * resource with a blob in @blob_list.  Still furthermore, if on-the-fly
1466  * deduplication of blobs is possible, then all blobs in @blob_list must also be
1467  * linked by @blob_table_list along with any other blobs that have
1468  * @will_be_in_output_wim set.
1469  *
1470  * This function handles on-the-fly deduplication of blobs for which SHA-1
1471  * message digests have not yet been calculated.  Such blobs may or may not need
1472  * to be written.  If @blob_table is non-NULL, then each blob in @blob_list that
1473  * has @unhashed set but not @unique_size set is checksummed immediately before
1474  * it would otherwise be read for writing in order to determine if it is
1475  * identical to another blob already being written or one that would be filtered
1476  * out of the output WIM using blob_filtered() with the context @filter_ctx.
1477  * Each such duplicate blob will be removed from @blob_list, its reference count
1478  * transfered to the pre-existing duplicate blob, its memory freed, and will not
1479  * be written.  Alternatively, if a blob in @blob_list is a duplicate with any
1480  * blob in @blob_table that has not been marked for writing or would not be
1481  * hard-filtered, it is freed and the pre-existing duplicate is written instead,
1482  * taking ownership of the reference count and slot in the @blob_table_list.
1483  *
1484  * Returns 0 if every blob was either written successfully or did not need to be
1485  * written; otherwise returns a non-zero error code.
1486  */
1487 static int
1488 write_blob_list(struct list_head *blob_list,
1489                 struct filedes *out_fd,
1490                 int write_resource_flags,
1491                 int out_ctype,
1492                 u32 out_chunk_size,
1493                 unsigned num_threads,
1494                 struct blob_table *blob_table,
1495                 struct filter_context *filter_ctx,
1496                 wimlib_progress_func_t progfunc,
1497                 void *progctx)
1498 {
1499         int ret;
1500         struct write_blobs_ctx ctx;
1501         struct list_head raw_copy_blobs;
1502
1503         wimlib_assert((write_resource_flags &
1504                        (WRITE_RESOURCE_FLAG_SOLID |
1505                         WRITE_RESOURCE_FLAG_PIPABLE)) !=
1506                                 (WRITE_RESOURCE_FLAG_SOLID |
1507                                  WRITE_RESOURCE_FLAG_PIPABLE));
1508
1509         remove_empty_blobs(blob_list);
1510
1511         if (list_empty(blob_list))
1512                 return 0;
1513
1514         /* If needed, set auxiliary information so that we can detect when the
1515          * library has finished using each external file.  */
1516         if (unlikely(write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE))
1517                 init_done_with_file_info(blob_list);
1518
1519         memset(&ctx, 0, sizeof(ctx));
1520
1521         ctx.out_fd = out_fd;
1522         ctx.blob_table = blob_table;
1523         ctx.out_ctype = out_ctype;
1524         ctx.out_chunk_size = out_chunk_size;
1525         ctx.write_resource_flags = write_resource_flags;
1526         ctx.filter_ctx = filter_ctx;
1527
1528         /*
1529          * We normally sort the blobs to write by a "sequential" order that is
1530          * optimized for reading.  But when using solid compression, we instead
1531          * sort the blobs by file extension and file name (when applicable; and
1532          * we don't do this for blobs from solid resources) so that similar
1533          * files are grouped together, which improves the compression ratio.
1534          * This is somewhat of a hack since a blob does not necessarily
1535          * correspond one-to-one with a filename, nor is there any guarantee
1536          * that two files with similar names or extensions are actually similar
1537          * in content.  A potential TODO is to sort the blobs based on some
1538          * measure of similarity of their actual contents.
1539          */
1540
1541         ret = sort_blob_list_by_sequential_order(blob_list,
1542                                                  offsetof(struct blob_descriptor,
1543                                                           write_blobs_list));
1544         if (ret)
1545                 return ret;
1546
1547         compute_blob_list_stats(blob_list, &ctx);
1548
1549         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) {
1550                 ret = sort_blob_list_for_solid_compression(blob_list);
1551                 if (unlikely(ret))
1552                         WARNING("Failed to sort blobs for solid compression. Continuing anyways.");
1553         }
1554
1555         ctx.progress_data.progfunc = progfunc;
1556         ctx.progress_data.progctx = progctx;
1557
1558         ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list,
1559                                                         write_resource_flags,
1560                                                         out_ctype,
1561                                                         out_chunk_size,
1562                                                         &raw_copy_blobs);
1563
1564         if (ctx.num_bytes_to_compress == 0)
1565                 goto out_write_raw_copy_resources;
1566
1567         /* Unless uncompressed output was required, allocate a chunk_compressor
1568          * to do compression.  There are serial and parallel implementations of
1569          * the chunk_compressor interface.  We default to parallel using the
1570          * specified number of threads, unless the upper bound on the number
1571          * bytes needing to be compressed is less than a heuristic value.  */
1572         if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
1573
1574         #ifdef ENABLE_MULTITHREADED_COMPRESSION
1575                 if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) {
1576                         ret = new_parallel_chunk_compressor(out_ctype,
1577                                                             out_chunk_size,
1578                                                             num_threads, 0,
1579                                                             &ctx.compressor);
1580                         if (ret > 0) {
1581                                 WARNING("Couldn't create parallel chunk compressor: %"TS".\n"
1582                                         "          Falling back to single-threaded compression.",
1583                                         wimlib_get_error_string(ret));
1584                         }
1585                 }
1586         #endif
1587
1588                 if (ctx.compressor == NULL) {
1589                         ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
1590                                                           &ctx.compressor);
1591                         if (ret)
1592                                 goto out_destroy_context;
1593                 }
1594         }
1595
1596         if (ctx.compressor)
1597                 ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
1598         else
1599                 ctx.progress_data.progress.write_streams.num_threads = 1;
1600
1601         INIT_LIST_HEAD(&ctx.blobs_being_compressed);
1602         INIT_LIST_HEAD(&ctx.blobs_in_solid_resource);
1603
1604         ret = call_progress(ctx.progress_data.progfunc,
1605                             WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1606                             &ctx.progress_data.progress,
1607                             ctx.progress_data.progctx);
1608         if (ret)
1609                 goto out_destroy_context;
1610
1611         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1612                 ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
1613                 if (ret)
1614                         goto out_destroy_context;
1615         }
1616
1617         /* Read the list of blobs needing to be compressed, using the specified
1618          * callbacks to execute processing of the data.  */
1619
1620         struct read_blob_list_callbacks cbs = {
1621                 .begin_blob             = write_blob_begin_read,
1622                 .begin_blob_ctx         = &ctx,
1623                 .consume_chunk          = write_blob_process_chunk,
1624                 .consume_chunk_ctx      = &ctx,
1625                 .end_blob               = write_blob_end_read,
1626                 .end_blob_ctx           = &ctx,
1627         };
1628
1629         ret = read_blob_list(blob_list,
1630                              offsetof(struct blob_descriptor, write_blobs_list),
1631                              &cbs,
1632                              BLOB_LIST_ALREADY_SORTED |
1633                                 VERIFY_BLOB_HASHES |
1634                                 COMPUTE_MISSING_BLOB_HASHES);
1635
1636         if (ret)
1637                 goto out_destroy_context;
1638
1639         ret = finish_remaining_chunks(&ctx);
1640         if (ret)
1641                 goto out_destroy_context;
1642
1643         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1644                 struct wim_reshdr reshdr;
1645                 struct blob_descriptor *blob;
1646                 u64 offset_in_res;
1647
1648                 ret = end_write_resource(&ctx, &reshdr);
1649                 if (ret)
1650                         goto out_destroy_context;
1651
1652                 offset_in_res = 0;
1653                 list_for_each_entry(blob, &ctx.blobs_in_solid_resource, write_blobs_list) {
1654                         blob->out_reshdr.size_in_wim = blob->size;
1655                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob) |
1656                                                  WIM_RESHDR_FLAG_SOLID;
1657                         blob->out_reshdr.uncompressed_size = 0;
1658                         blob->out_reshdr.offset_in_wim = offset_in_res;
1659                         blob->out_res_offset_in_wim = reshdr.offset_in_wim;
1660                         blob->out_res_size_in_wim = reshdr.size_in_wim;
1661                         blob->out_res_uncompressed_size = reshdr.uncompressed_size;
1662                         offset_in_res += blob->size;
1663                 }
1664                 wimlib_assert(offset_in_res == reshdr.uncompressed_size);
1665         }
1666
1667 out_write_raw_copy_resources:
1668         /* Copy any compressed resources for which the raw data can be reused
1669          * without decompression.  */
1670         ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd,
1671                                        &ctx.progress_data);
1672
1673 out_destroy_context:
1674         FREE(ctx.chunk_csizes);
1675         if (ctx.compressor)
1676                 ctx.compressor->destroy(ctx.compressor);
1677         return ret;
1678 }
1679
1680
1681 static int
1682 write_file_data_blobs(WIMStruct *wim,
1683                       struct list_head *blob_list,
1684                       int write_flags,
1685                       unsigned num_threads,
1686                       struct filter_context *filter_ctx)
1687 {
1688         int out_ctype;
1689         u32 out_chunk_size;
1690         int write_resource_flags;
1691
1692         write_resource_flags = write_flags_to_resource_flags(write_flags);
1693
1694         /* wimlib v1.7.0: create a solid WIM file by default if the WIM version
1695          * has been set to WIM_VERSION_SOLID and at least one blob in the WIM's
1696          * blob table is located in a solid resource (may be the same WIM, or a
1697          * different one in the case of export).  */
1698         if (wim->out_hdr.wim_version == WIM_VERSION_SOLID &&
1699             wim_has_solid_resources(wim))
1700         {
1701                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
1702         }
1703
1704         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1705                 out_chunk_size = wim->out_solid_chunk_size;
1706                 out_ctype = wim->out_solid_compression_type;
1707         } else {
1708                 out_chunk_size = wim->out_chunk_size;
1709                 out_ctype = wim->out_compression_type;
1710         }
1711
1712         return write_blob_list(blob_list,
1713                                &wim->out_fd,
1714                                write_resource_flags,
1715                                out_ctype,
1716                                out_chunk_size,
1717                                num_threads,
1718                                wim->blob_table,
1719                                filter_ctx,
1720                                wim->progfunc,
1721                                wim->progctx);
1722 }
1723
1724 /* Write the contents of the specified blob as a WIM resource.  */
1725 static int
1726 write_wim_resource(struct blob_descriptor *blob,
1727                    struct filedes *out_fd,
1728                    int out_ctype,
1729                    u32 out_chunk_size,
1730                    int write_resource_flags)
1731 {
1732         LIST_HEAD(blob_list);
1733         list_add(&blob->write_blobs_list, &blob_list);
1734         blob->will_be_in_output_wim = 1;
1735         return write_blob_list(&blob_list,
1736                                out_fd,
1737                                write_resource_flags & ~WRITE_RESOURCE_FLAG_SOLID,
1738                                out_ctype,
1739                                out_chunk_size,
1740                                1,
1741                                NULL,
1742                                NULL,
1743                                NULL,
1744                                NULL);
1745 }
1746
1747 /* Write the contents of the specified buffer as a WIM resource.  */
1748 int
1749 write_wim_resource_from_buffer(const void *buf,
1750                                size_t buf_size,
1751                                bool is_metadata,
1752                                struct filedes *out_fd,
1753                                int out_ctype,
1754                                u32 out_chunk_size,
1755                                struct wim_reshdr *out_reshdr,
1756                                u8 *hash_ret,
1757                                int write_resource_flags)
1758 {
1759         int ret;
1760         struct blob_descriptor blob;
1761
1762         blob_set_is_located_in_attached_buffer(&blob, (void *)buf, buf_size);
1763         sha1_buffer(buf, buf_size, blob.hash);
1764         blob.unhashed = 0;
1765         blob.is_metadata = is_metadata;
1766
1767         ret = write_wim_resource(&blob, out_fd, out_ctype, out_chunk_size,
1768                                  write_resource_flags);
1769         if (ret)
1770                 return ret;
1771
1772         copy_reshdr(out_reshdr, &blob.out_reshdr);
1773
1774         if (hash_ret)
1775                 copy_hash(hash_ret, blob.hash);
1776         return 0;
1777 }
1778
1779 struct blob_size_table {
1780         struct hlist_head *array;
1781         size_t num_entries;
1782         size_t capacity;
1783 };
1784
1785 static int
1786 init_blob_size_table(struct blob_size_table *tab, size_t capacity)
1787 {
1788         tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1789         if (tab->array == NULL)
1790                 return WIMLIB_ERR_NOMEM;
1791         tab->num_entries = 0;
1792         tab->capacity = capacity;
1793         return 0;
1794 }
1795
1796 static void
1797 destroy_blob_size_table(struct blob_size_table *tab)
1798 {
1799         FREE(tab->array);
1800 }
1801
1802 static int
1803 blob_size_table_insert(struct blob_descriptor *blob, void *_tab)
1804 {
1805         struct blob_size_table *tab = _tab;
1806         size_t pos;
1807         struct blob_descriptor *same_size_blob;
1808
1809         pos = hash_u64(blob->size) % tab->capacity;
1810         blob->unique_size = 1;
1811         hlist_for_each_entry(same_size_blob, &tab->array[pos], hash_list_2) {
1812                 if (same_size_blob->size == blob->size) {
1813                         blob->unique_size = 0;
1814                         same_size_blob->unique_size = 0;
1815                         break;
1816                 }
1817         }
1818
1819         hlist_add_head(&blob->hash_list_2, &tab->array[pos]);
1820         tab->num_entries++;
1821         return 0;
1822 }
1823
1824 struct find_blobs_ctx {
1825         WIMStruct *wim;
1826         int write_flags;
1827         struct list_head blob_list;
1828         struct blob_size_table blob_size_tab;
1829 };
1830
1831 static void
1832 reference_blob_for_write(struct blob_descriptor *blob,
1833                          struct list_head *blob_list, u32 nref)
1834 {
1835         if (!blob->will_be_in_output_wim) {
1836                 blob->out_refcnt = 0;
1837                 list_add_tail(&blob->write_blobs_list, blob_list);
1838                 blob->will_be_in_output_wim = 1;
1839         }
1840         blob->out_refcnt += nref;
1841 }
1842
1843 static int
1844 fully_reference_blob_for_write(struct blob_descriptor *blob, void *_blob_list)
1845 {
1846         struct list_head *blob_list = _blob_list;
1847         blob->will_be_in_output_wim = 0;
1848         reference_blob_for_write(blob, blob_list, blob->refcnt);
1849         return 0;
1850 }
1851
1852 static int
1853 inode_find_blobs_to_reference(const struct wim_inode *inode,
1854                               const struct blob_table *table,
1855                               struct list_head *blob_list)
1856 {
1857         wimlib_assert(inode->i_nlink > 0);
1858
1859         for (unsigned i = 0; i < inode->i_num_streams; i++) {
1860                 struct blob_descriptor *blob;
1861                 const u8 *hash;
1862
1863                 blob = stream_blob(&inode->i_streams[i], table);
1864                 if (blob) {
1865                         reference_blob_for_write(blob, blob_list, inode->i_nlink);
1866                 } else {
1867                         hash = stream_hash(&inode->i_streams[i]);
1868                         if (!is_zero_hash(hash))
1869                                 return blob_not_found_error(inode, hash);
1870                 }
1871         }
1872         return 0;
1873 }
1874
1875 static int
1876 do_blob_set_not_in_output_wim(struct blob_descriptor *blob, void *_ignore)
1877 {
1878         blob->will_be_in_output_wim = 0;
1879         return 0;
1880 }
1881
1882 static int
1883 image_find_blobs_to_reference(WIMStruct *wim)
1884 {
1885         struct wim_image_metadata *imd;
1886         struct wim_inode *inode;
1887         struct blob_descriptor *blob;
1888         struct list_head *blob_list;
1889         int ret;
1890
1891         imd = wim_get_current_image_metadata(wim);
1892
1893         image_for_each_unhashed_blob(blob, imd)
1894                 blob->will_be_in_output_wim = 0;
1895
1896         blob_list = wim->private;
1897         image_for_each_inode(inode, imd) {
1898                 ret = inode_find_blobs_to_reference(inode,
1899                                                     wim->blob_table,
1900                                                     blob_list);
1901                 if (ret)
1902                         return ret;
1903         }
1904         return 0;
1905 }
1906
1907 static int
1908 prepare_unfiltered_list_of_blobs_in_output_wim(WIMStruct *wim,
1909                                                int image,
1910                                                int blobs_ok,
1911                                                struct list_head *blob_list_ret)
1912 {
1913         int ret;
1914
1915         INIT_LIST_HEAD(blob_list_ret);
1916
1917         if (blobs_ok && (image == WIMLIB_ALL_IMAGES ||
1918                          (image == 1 && wim->hdr.image_count == 1)))
1919         {
1920                 /* Fast case:  Assume that all blobs are being written and that
1921                  * the reference counts are correct.  */
1922                 struct blob_descriptor *blob;
1923                 struct wim_image_metadata *imd;
1924                 unsigned i;
1925
1926                 for_blob_in_table(wim->blob_table,
1927                                   fully_reference_blob_for_write,
1928                                   blob_list_ret);
1929
1930                 for (i = 0; i < wim->hdr.image_count; i++) {
1931                         imd = wim->image_metadata[i];
1932                         image_for_each_unhashed_blob(blob, imd)
1933                                 fully_reference_blob_for_write(blob, blob_list_ret);
1934                 }
1935         } else {
1936                 /* Slow case:  Walk through the images being written and
1937                  * determine the blobs referenced.  */
1938                 for_blob_in_table(wim->blob_table,
1939                                   do_blob_set_not_in_output_wim, NULL);
1940                 wim->private = blob_list_ret;
1941                 ret = for_image(wim, image, image_find_blobs_to_reference);
1942                 if (ret)
1943                         return ret;
1944         }
1945
1946         return 0;
1947 }
1948
1949 struct insert_other_if_hard_filtered_ctx {
1950         struct blob_size_table *tab;
1951         struct filter_context *filter_ctx;
1952 };
1953
1954 static int
1955 insert_other_if_hard_filtered(struct blob_descriptor *blob, void *_ctx)
1956 {
1957         struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
1958
1959         if (!blob->will_be_in_output_wim &&
1960             blob_hard_filtered(blob, ctx->filter_ctx))
1961                 blob_size_table_insert(blob, ctx->tab);
1962         return 0;
1963 }
1964
1965 static int
1966 determine_blob_size_uniquity(struct list_head *blob_list,
1967                              struct blob_table *lt,
1968                              struct filter_context *filter_ctx)
1969 {
1970         int ret;
1971         struct blob_size_table tab;
1972         struct blob_descriptor *blob;
1973
1974         ret = init_blob_size_table(&tab, 9001);
1975         if (ret)
1976                 return ret;
1977
1978         if (may_hard_filter_blobs(filter_ctx)) {
1979                 struct insert_other_if_hard_filtered_ctx ctx = {
1980                         .tab = &tab,
1981                         .filter_ctx = filter_ctx,
1982                 };
1983                 for_blob_in_table(lt, insert_other_if_hard_filtered, &ctx);
1984         }
1985
1986         list_for_each_entry(blob, blob_list, write_blobs_list)
1987                 blob_size_table_insert(blob, &tab);
1988
1989         destroy_blob_size_table(&tab);
1990         return 0;
1991 }
1992
1993 static void
1994 filter_blob_list_for_write(struct list_head *blob_list,
1995                            struct filter_context *filter_ctx)
1996 {
1997         struct blob_descriptor *blob, *tmp;
1998
1999         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
2000                 int status = blob_filtered(blob, filter_ctx);
2001
2002                 if (status == 0) {
2003                         /* Not filtered.  */
2004                         continue;
2005                 } else {
2006                         if (status > 0) {
2007                                 /* Soft filtered.  */
2008                         } else {
2009                                 /* Hard filtered.  */
2010                                 blob->will_be_in_output_wim = 0;
2011                                 list_del(&blob->blob_table_list);
2012                         }
2013                         list_del(&blob->write_blobs_list);
2014                 }
2015         }
2016 }
2017
2018 /*
2019  * prepare_blob_list_for_write() -
2020  *
2021  * Prepare the list of blobs to write for writing a WIM containing the specified
2022  * image(s) with the specified write flags.
2023  *
2024  * @wim
2025  *      The WIMStruct on whose behalf the write is occurring.
2026  *
2027  * @image
2028  *      Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES.
2029  *
2030  * @write_flags
2031  *      WIMLIB_WRITE_FLAG_* flags for the write operation:
2032  *
2033  *      STREAMS_OK:  For writes of all images, assume that all blobs in the blob
2034  *      table of @wim and the per-image lists of unhashed blobs should be taken
2035  *      as-is, and image metadata should not be searched for references.  This
2036  *      does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below.
2037  *
2038  *      OVERWRITE:  Blobs already present in @wim shall not be returned in
2039  *      @blob_list_ret.
2040  *
2041  *      SKIP_EXTERNAL_WIMS:  Blobs already present in a WIM file, but not @wim,
2042  *      shall be returned in neither @blob_list_ret nor @blob_table_list_ret.
2043  *
2044  * @blob_list_ret
2045  *      List of blobs, linked by write_blobs_list, that need to be written will
2046  *      be returned here.
2047  *
2048  *      Note that this function assumes that unhashed blobs will be written; it
2049  *      does not take into account that they may become duplicates when actually
2050  *      hashed.
2051  *
2052  * @blob_table_list_ret
2053  *      List of blobs, linked by blob_table_list, that need to be included in
2054  *      the WIM's blob table will be returned here.  This will be a superset of
2055  *      the blobs in @blob_list_ret.
2056  *
2057  *      This list will be a proper superset of @blob_list_ret if and only if
2058  *      WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
2059  *      the blobs that would otherwise need to be written were already located
2060  *      in the WIM file.
2061  *
2062  *      All blobs in this list will have @out_refcnt set to the number of
2063  *      references to the blob in the output WIM.  If
2064  *      WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt
2065  *      may be as low as 0.
2066  *
2067  * @filter_ctx_ret
2068  *      A context for queries of blob filter status with blob_filtered() is
2069  *      returned in this location.
2070  *
2071  * In addition, @will_be_in_output_wim will be set to 1 in all blobs inserted
2072  * into @blob_table_list_ret and to 0 in all blobs in the blob table of @wim not
2073  * inserted into @blob_table_list_ret.
2074  *
2075  * Still furthermore, @unique_size will be set to 1 on all blobs in
2076  * @blob_list_ret that have unique size among all blobs in @blob_list_ret and
2077  * among all blobs in the blob table of @wim that are ineligible for being
2078  * written due to filtering.
2079  *
2080  * Returns 0 on success; nonzero on read error, memory allocation error, or
2081  * otherwise.
2082  */
2083 static int
2084 prepare_blob_list_for_write(WIMStruct *wim, int image,
2085                             int write_flags,
2086                             struct list_head *blob_list_ret,
2087                             struct list_head *blob_table_list_ret,
2088                             struct filter_context *filter_ctx_ret)
2089 {
2090         int ret;
2091         struct blob_descriptor *blob;
2092
2093         filter_ctx_ret->write_flags = write_flags;
2094         filter_ctx_ret->wim = wim;
2095
2096         ret = prepare_unfiltered_list_of_blobs_in_output_wim(
2097                                 wim,
2098                                 image,
2099                                 write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK,
2100                                 blob_list_ret);
2101         if (ret)
2102                 return ret;
2103
2104         INIT_LIST_HEAD(blob_table_list_ret);
2105         list_for_each_entry(blob, blob_list_ret, write_blobs_list)
2106                 list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2107
2108         ret = determine_blob_size_uniquity(blob_list_ret, wim->blob_table,
2109                                            filter_ctx_ret);
2110         if (ret)
2111                 return ret;
2112
2113         if (may_filter_blobs(filter_ctx_ret))
2114                 filter_blob_list_for_write(blob_list_ret, filter_ctx_ret);
2115
2116         return 0;
2117 }
2118
2119 static int
2120 write_file_data(WIMStruct *wim, int image, int write_flags,
2121                 unsigned num_threads,
2122                 struct list_head *blob_list_override,
2123                 struct list_head *blob_table_list_ret)
2124 {
2125         int ret;
2126         struct list_head _blob_list;
2127         struct list_head *blob_list;
2128         struct blob_descriptor *blob;
2129         struct filter_context _filter_ctx;
2130         struct filter_context *filter_ctx;
2131
2132         if (blob_list_override == NULL) {
2133                 /* Normal case: prepare blob list from image(s) being written.
2134                  */
2135                 blob_list = &_blob_list;
2136                 filter_ctx = &_filter_ctx;
2137                 ret = prepare_blob_list_for_write(wim, image, write_flags,
2138                                                   blob_list,
2139                                                   blob_table_list_ret,
2140                                                   filter_ctx);
2141                 if (ret)
2142                         return ret;
2143         } else {
2144                 /* Currently only as a result of wimlib_split() being called:
2145                  * use blob list already explicitly provided.  Use existing
2146                  * reference counts.  */
2147                 blob_list = blob_list_override;
2148                 filter_ctx = NULL;
2149                 INIT_LIST_HEAD(blob_table_list_ret);
2150                 list_for_each_entry(blob, blob_list, write_blobs_list) {
2151                         blob->out_refcnt = blob->refcnt;
2152                         blob->will_be_in_output_wim = 1;
2153                         blob->unique_size = 0;
2154                         list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2155                 }
2156         }
2157
2158         return write_file_data_blobs(wim,
2159                                      blob_list,
2160                                      write_flags,
2161                                      num_threads,
2162                                      filter_ctx);
2163 }
2164
2165 static int
2166 write_metadata_resources(WIMStruct *wim, int image, int write_flags)
2167 {
2168         int ret;
2169         int start_image;
2170         int end_image;
2171         int write_resource_flags;
2172
2173         if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
2174                 return 0;
2175
2176         write_resource_flags = write_flags_to_resource_flags(write_flags);
2177
2178         write_resource_flags &= ~WRITE_RESOURCE_FLAG_SOLID;
2179
2180         ret = call_progress(wim->progfunc,
2181                             WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN,
2182                             NULL, wim->progctx);
2183         if (ret)
2184                 return ret;
2185
2186         if (image == WIMLIB_ALL_IMAGES) {
2187                 start_image = 1;
2188                 end_image = wim->hdr.image_count;
2189         } else {
2190                 start_image = image;
2191                 end_image = image;
2192         }
2193
2194         for (int i = start_image; i <= end_image; i++) {
2195                 struct wim_image_metadata *imd;
2196
2197                 imd = wim->image_metadata[i - 1];
2198                 /* Build a new metadata resource only if image was modified from
2199                  * the original (or was newly added).  Otherwise just copy the
2200                  * existing one.  */
2201                 if (imd->modified) {
2202                         ret = write_metadata_resource(wim, i,
2203                                                       write_resource_flags);
2204                 } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2205                         blob_set_out_reshdr_for_reuse(imd->metadata_blob);
2206                         ret = 0;
2207                 } else {
2208                         ret = write_wim_resource(imd->metadata_blob,
2209                                                  &wim->out_fd,
2210                                                  wim->out_compression_type,
2211                                                  wim->out_chunk_size,
2212                                                  write_resource_flags);
2213                 }
2214                 if (ret)
2215                         return ret;
2216         }
2217
2218         return call_progress(wim->progfunc,
2219                              WIMLIB_PROGRESS_MSG_WRITE_METADATA_END,
2220                              NULL, wim->progctx);
2221 }
2222
2223 static int
2224 open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
2225 {
2226         int raw_fd = topen(path, open_flags | O_BINARY, 0644);
2227         if (raw_fd < 0) {
2228                 ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
2229                 return WIMLIB_ERR_OPEN;
2230         }
2231         filedes_init(&wim->out_fd, raw_fd);
2232         return 0;
2233 }
2234
2235 static int
2236 close_wim_writable(WIMStruct *wim, int write_flags)
2237 {
2238         int ret = 0;
2239
2240         if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR))
2241                 if (filedes_valid(&wim->out_fd))
2242                         if (filedes_close(&wim->out_fd))
2243                                 ret = WIMLIB_ERR_WRITE;
2244         filedes_invalidate(&wim->out_fd);
2245         return ret;
2246 }
2247
2248 static int
2249 cmp_blobs_by_out_rdesc(const void *p1, const void *p2)
2250 {
2251         const struct blob_descriptor *blob1, *blob2;
2252
2253         blob1 = *(const struct blob_descriptor**)p1;
2254         blob2 = *(const struct blob_descriptor**)p2;
2255
2256         if (blob1->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2257                 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2258                         if (blob1->out_res_offset_in_wim != blob2->out_res_offset_in_wim)
2259                                 return cmp_u64(blob1->out_res_offset_in_wim,
2260                                                blob2->out_res_offset_in_wim);
2261                 } else {
2262                         return 1;
2263                 }
2264         } else {
2265                 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID)
2266                         return -1;
2267         }
2268         return cmp_u64(blob1->out_reshdr.offset_in_wim,
2269                        blob2->out_reshdr.offset_in_wim);
2270 }
2271
2272 static int
2273 write_blob_table(WIMStruct *wim, int image, int write_flags,
2274                  struct list_head *blob_table_list)
2275 {
2276         int ret;
2277
2278         /* Set output resource metadata for blobs already present in WIM.  */
2279         if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2280                 struct blob_descriptor *blob;
2281                 list_for_each_entry(blob, blob_table_list, blob_table_list) {
2282                         if (blob->blob_location == BLOB_IN_WIM &&
2283                             blob->rdesc->wim == wim)
2284                         {
2285                                 blob_set_out_reshdr_for_reuse(blob);
2286                         }
2287                 }
2288         }
2289
2290         ret = sort_blob_list(blob_table_list,
2291                              offsetof(struct blob_descriptor, blob_table_list),
2292                              cmp_blobs_by_out_rdesc);
2293         if (ret)
2294                 return ret;
2295
2296         /* Add entries for metadata resources.  */
2297         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)) {
2298                 int start_image;
2299                 int end_image;
2300
2301                 if (image == WIMLIB_ALL_IMAGES) {
2302                         start_image = 1;
2303                         end_image = wim->hdr.image_count;
2304                 } else {
2305                         start_image = image;
2306                         end_image = image;
2307                 }
2308
2309                 /* Push metadata blob table entries onto the front of the list
2310                  * in reverse order, so that they're written in order.
2311                  */
2312                 for (int i = end_image; i >= start_image; i--) {
2313                         struct blob_descriptor *metadata_blob;
2314
2315                         metadata_blob = wim->image_metadata[i - 1]->metadata_blob;
2316                         wimlib_assert(metadata_blob->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA);
2317                         metadata_blob->out_refcnt = 1;
2318                         list_add(&metadata_blob->blob_table_list, blob_table_list);
2319                 }
2320         }
2321
2322         return write_blob_table_from_blob_list(blob_table_list,
2323                                                &wim->out_fd,
2324                                                wim->out_hdr.part_number,
2325                                                &wim->out_hdr.blob_table_reshdr,
2326                                                write_flags_to_resource_flags(write_flags));
2327 }
2328
2329 /*
2330  * Finish writing a WIM file: write the blob table, xml data, and integrity
2331  * table, then overwrite the WIM header.
2332  *
2333  * The output file descriptor is closed on success, except when writing to a
2334  * user-specified file descriptor (WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR set).
2335  */
2336 static int
2337 finish_write(WIMStruct *wim, int image, int write_flags,
2338              struct list_head *blob_table_list)
2339 {
2340         int write_resource_flags;
2341         off_t old_blob_table_end = 0;
2342         struct integrity_table *old_integrity_table = NULL;
2343         off_t new_blob_table_end;
2344         u64 xml_totalbytes;
2345         int ret;
2346
2347         write_resource_flags = write_flags_to_resource_flags(write_flags);
2348
2349         /* In the WIM header, there is room for the resource entry for a
2350          * metadata resource labeled as the "boot metadata".  This entry should
2351          * be zeroed out if there is no bootable image (boot_idx 0).  Otherwise,
2352          * it should be a copy of the resource entry for the image that is
2353          * marked as bootable.  */
2354         if (wim->out_hdr.boot_idx == 0) {
2355                 zero_reshdr(&wim->out_hdr.boot_metadata_reshdr);
2356         } else {
2357                 copy_reshdr(&wim->out_hdr.boot_metadata_reshdr,
2358                             &wim->image_metadata[
2359                                 wim->out_hdr.boot_idx - 1]->metadata_blob->out_reshdr);
2360         }
2361
2362         /* If overwriting the WIM file containing an integrity table in-place,
2363          * we'd like to re-use the information in the old integrity table
2364          * instead of recalculating it.  But we might overwrite the old
2365          * integrity table when we expand the XML data.  Read it into memory
2366          * just in case.  */
2367         if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
2368                             WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) ==
2369                 (WIMLIB_WRITE_FLAG_OVERWRITE |
2370                  WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2371             && wim_has_integrity_table(wim))
2372         {
2373                 old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
2374                                      wim->hdr.blob_table_reshdr.size_in_wim;
2375                 (void)read_integrity_table(wim,
2376                                            old_blob_table_end - WIM_HEADER_DISK_SIZE,
2377                                            &old_integrity_table);
2378                 /* If we couldn't read the old integrity table, we can still
2379                  * re-calculate the full integrity table ourselves.  Hence the
2380                  * ignoring of the return value.  */
2381         }
2382
2383         /* Write blob table if needed.  */
2384         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) {
2385                 ret = write_blob_table(wim, image, write_flags,
2386                                        blob_table_list);
2387                 if (ret) {
2388                         free_integrity_table(old_integrity_table);
2389                         return ret;
2390                 }
2391         }
2392
2393         /* Write XML data.  */
2394         xml_totalbytes = wim->out_fd.offset;
2395         if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2396                 xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING;
2397         ret = write_wim_xml_data(wim, image, xml_totalbytes,
2398                                  &wim->out_hdr.xml_data_reshdr,
2399                                  write_resource_flags);
2400         if (ret) {
2401                 free_integrity_table(old_integrity_table);
2402                 return ret;
2403         }
2404
2405         /* Write integrity table if needed.  */
2406         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2407                 if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS) {
2408                         /* The XML data we wrote may have overwritten part of
2409                          * the old integrity table, so while calculating the new
2410                          * integrity table we should temporarily update the WIM
2411                          * header to remove the integrity table reference.   */
2412                         struct wim_header checkpoint_hdr;
2413                         memcpy(&checkpoint_hdr, &wim->out_hdr, sizeof(struct wim_header));
2414                         zero_reshdr(&checkpoint_hdr.integrity_table_reshdr);
2415                         checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2416                         ret = write_wim_header(&checkpoint_hdr, &wim->out_fd, 0);
2417                         if (ret) {
2418                                 free_integrity_table(old_integrity_table);
2419                                 return ret;
2420                         }
2421                 }
2422
2423                 new_blob_table_end = wim->out_hdr.blob_table_reshdr.offset_in_wim +
2424                                      wim->out_hdr.blob_table_reshdr.size_in_wim;
2425
2426                 ret = write_integrity_table(wim,
2427                                             new_blob_table_end,
2428                                             old_blob_table_end,
2429                                             old_integrity_table);
2430                 free_integrity_table(old_integrity_table);
2431                 if (ret)
2432                         return ret;
2433         } else {
2434                 /* No integrity table.  */
2435                 zero_reshdr(&wim->out_hdr.integrity_table_reshdr);
2436         }
2437
2438         /* Now that all information in the WIM header has been determined, the
2439          * preliminary header written earlier can be overwritten, the header of
2440          * the existing WIM file can be overwritten, or the final header can be
2441          * written to the end of the pipable WIM.  */
2442         wim->out_hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2443         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2444                 ret = write_wim_header(&wim->out_hdr, &wim->out_fd, wim->out_fd.offset);
2445         else
2446                 ret = write_wim_header(&wim->out_hdr, &wim->out_fd, 0);
2447         if (ret)
2448                 return ret;
2449
2450         /* Possibly sync file data to disk before closing.  On POSIX systems, it
2451          * is necessary to do this before using rename() to overwrite an
2452          * existing file with a new file.  Otherwise, data loss would occur if
2453          * the system is abruptly terminated when the metadata for the rename
2454          * operation has been written to disk, but the new file data has not.
2455          */
2456         if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
2457                 if (fsync(wim->out_fd.fd)) {
2458                         ERROR_WITH_ERRNO("Error syncing data to WIM file");
2459                         return WIMLIB_ERR_WRITE;
2460                 }
2461         }
2462
2463         if (close_wim_writable(wim, write_flags)) {
2464                 ERROR_WITH_ERRNO("Failed to close the output WIM file");
2465                 return WIMLIB_ERR_WRITE;
2466         }
2467
2468         return 0;
2469 }
2470
2471 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
2472
2473 /* Set advisory lock on WIM file (if not already done so)  */
2474 int
2475 lock_wim_for_append(WIMStruct *wim)
2476 {
2477         if (wim->locked_for_append)
2478                 return 0;
2479         if (!flock(wim->in_fd.fd, LOCK_EX | LOCK_NB)) {
2480                 wim->locked_for_append = 1;
2481                 return 0;
2482         }
2483         if (errno != EWOULDBLOCK)
2484                 return 0;
2485         return WIMLIB_ERR_ALREADY_LOCKED;
2486 }
2487
2488 /* Remove advisory lock on WIM file (if present)  */
2489 void
2490 unlock_wim_for_append(WIMStruct *wim)
2491 {
2492         if (wim->locked_for_append) {
2493                 flock(wim->in_fd.fd, LOCK_UN);
2494                 wim->locked_for_append = 0;
2495         }
2496 }
2497 #endif
2498
2499 /*
2500  * write_pipable_wim():
2501  *
2502  * Perform the intermediate stages of creating a "pipable" WIM (i.e. a WIM
2503  * capable of being applied from a pipe).
2504  *
2505  * Pipable WIMs are a wimlib-specific modification of the WIM format such that
2506  * images can be applied from them sequentially when the file data is sent over
2507  * a pipe.  In addition, a pipable WIM can be written sequentially to a pipe.
2508  * The modifications made to the WIM format for pipable WIMs are:
2509  *
2510  * - Magic characters in header are "WLPWM\0\0\0" (wimlib pipable WIM) instead
2511  *   of "MSWIM\0\0\0".  This lets wimlib know that the WIM is pipable and also
2512  *   stops other software from trying to read the file as a normal WIM.
2513  *
2514  * - The header at the beginning of the file does not contain all the normal
2515  *   information; in particular it will have all 0's for the blob table and XML
2516  *   data resource entries.  This is because this information cannot be
2517  *   determined until the blob table and XML data have been written.
2518  *   Consequently, wimlib will write the full header at the very end of the
2519  *   file.  The header at the end, however, is only used when reading the WIM
2520  *   from a seekable file (not a pipe).
2521  *
2522  * - An extra copy of the XML data is placed directly after the header.  This
2523  *   allows image names and sizes to be determined at an appropriate time when
2524  *   reading the WIM from a pipe.  This copy of the XML data is ignored if the
2525  *   WIM is read from a seekable file (not a pipe).
2526  *
2527  * - Solid resources are not allowed.  Each blob is always stored in its own
2528  *   resource.
2529  *
2530  * - The format of resources, or blobs, has been modified to allow them to be
2531  *   used before the "blob table" has been read.  Each blob is prefixed with a
2532  *   `struct pwm_blob_hdr' that is basically an abbreviated form of `struct
2533  *   blob_descriptor_disk' that only contains the SHA-1 message digest,
2534  *   uncompressed blob size, and flags that indicate whether the blob is
2535  *   compressed.  The data of uncompressed blobs then follows literally, while
2536  *   the data of compressed blobs follows in a modified format.  Compressed
2537  *   blobs do not begin with a chunk table, since the chunk table cannot be
2538  *   written until all chunks have been compressed.  Instead, each compressed
2539  *   chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size.
2540  *   Furthermore, the chunk table is written at the end of the resource instead
2541  *   of the start.  Note: chunk offsets are given in the chunk table as if the
2542  *   `struct pwm_chunk_hdr's were not present; also, the chunk table is only
2543  *   used if the WIM is being read from a seekable file (not a pipe).
2544  *
2545  * - Metadata blobs always come before non-metadata blobs.  (This does not by
2546  *   itself constitute an incompatibility with normal WIMs, since this is valid
2547  *   in normal WIMs.)
2548  *
2549  * - At least up to the end of the blobs, all components must be packed as
2550  *   tightly as possible; there cannot be any "holes" in the WIM.  (This does
2551  *   not by itself consititute an incompatibility with normal WIMs, since this
2552  *   is valid in normal WIMs.)
2553  *
2554  * Note: the blob table, XML data, and header at the end are not used when
2555  * applying from a pipe.  They exist to support functionality such as image
2556  * application and export when the WIM is *not* read from a pipe.
2557  *
2558  *   Layout of pipable WIM:
2559  *
2560  * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2561  * | Header | XML data | Metadata resources | File resources |  Blob table  | XML data  | Header |
2562  * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2563  *
2564  *   Layout of normal WIM:
2565  *
2566  * +--------+-----------------------------+-------------------------+
2567  * | Header | File and metadata resources |  Blob table  | XML data |
2568  * +--------+-----------------------------+-------------------------+
2569  *
2570  * An optional integrity table can follow the final XML data in both normal and
2571  * pipable WIMs.  However, due to implementation details, wimlib currently can
2572  * only include an integrity table in a pipable WIM when writing it to a
2573  * seekable file (not a pipe).
2574  *
2575  * Do note that since pipable WIMs are not supported by Microsoft's software,
2576  * wimlib does not create them unless explicitly requested (with
2577  * WIMLIB_WRITE_FLAG_PIPABLE) and as stated above they use different magic
2578  * characters to identify the file.
2579  */
2580 static int
2581 write_pipable_wim(WIMStruct *wim, int image, int write_flags,
2582                   unsigned num_threads,
2583                   struct list_head *blob_list_override,
2584                   struct list_head *blob_table_list_ret)
2585 {
2586         int ret;
2587         struct wim_reshdr xml_reshdr;
2588
2589         WARNING("Creating a pipable WIM, which will "
2590                 "be incompatible\n"
2591                 "          with Microsoft's software (WIMGAPI/ImageX/DISM).");
2592
2593         /* At this point, the header at the beginning of the file has already
2594          * been written.  */
2595
2596         /* For efficiency, when wimlib adds an image to the WIM with
2597          * wimlib_add_image(), the SHA-1 message digests of files are not
2598          * calculated; instead, they are calculated while the files are being
2599          * written.  However, this does not work when writing a pipable WIM,
2600          * since when writing a blob to a pipable WIM, its SHA-1 message digest
2601          * needs to be known before the blob data is written.  Therefore, before
2602          * getting much farther, we need to pre-calculate the SHA-1 message
2603          * digests of all blobs that will be written.  */
2604         ret = wim_checksum_unhashed_blobs(wim);
2605         if (ret)
2606                 return ret;
2607
2608         /* Write extra copy of the XML data.  */
2609         ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT,
2610                                  &xml_reshdr, WRITE_RESOURCE_FLAG_PIPABLE);
2611         if (ret)
2612                 return ret;
2613
2614         /* Write metadata resources for the image(s) being included in the
2615          * output WIM.  */
2616         ret = write_metadata_resources(wim, image, write_flags);
2617         if (ret)
2618                 return ret;
2619
2620         /* Write file data needed for the image(s) being included in the output
2621          * WIM, or file data needed for the split WIM part.  */
2622         return write_file_data(wim, image, write_flags,
2623                                num_threads, blob_list_override,
2624                                blob_table_list_ret);
2625
2626         /* The blob table, XML data, and header at end are handled by
2627          * finish_write().  */
2628 }
2629
2630 /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file
2631  * descriptor.  */
2632 int
2633 write_wim_part(WIMStruct *wim,
2634                const void *path_or_fd,
2635                int image,
2636                int write_flags,
2637                unsigned num_threads,
2638                unsigned part_number,
2639                unsigned total_parts,
2640                struct list_head *blob_list_override,
2641                const u8 *guid)
2642 {
2643         int ret;
2644         struct list_head blob_table_list;
2645
2646         /* Internally, this is always called with a valid part number and total
2647          * parts.  */
2648         wimlib_assert(total_parts >= 1);
2649         wimlib_assert(part_number >= 1 && part_number <= total_parts);
2650
2651         /* A valid image (or all images) must be specified.  */
2652         if (image != WIMLIB_ALL_IMAGES &&
2653              (image < 1 || image > wim->hdr.image_count))
2654                 return WIMLIB_ERR_INVALID_IMAGE;
2655
2656         /* If we need to write metadata resources, make sure the ::WIMStruct has
2657          * the needed information attached (e.g. is not a resource-only WIM,
2658          * such as a non-first part of a split WIM).  */
2659         if (!wim_has_metadata(wim) &&
2660             !(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA))
2661                 return WIMLIB_ERR_METADATA_NOT_FOUND;
2662
2663         /* Check for contradictory flags.  */
2664         if ((write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2665                             WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2666                                 == (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2667                                     WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2668                 return WIMLIB_ERR_INVALID_PARAM;
2669
2670         if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2671                             WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2672                                 == (WIMLIB_WRITE_FLAG_PIPABLE |
2673                                     WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2674                 return WIMLIB_ERR_INVALID_PARAM;
2675
2676         /* Include an integrity table by default if no preference was given and
2677          * the WIM already had an integrity table.  */
2678         if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2679                              WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))) {
2680                 if (wim_has_integrity_table(wim))
2681                         write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2682         }
2683
2684         /* Write a pipable WIM by default if no preference was given and the WIM
2685          * was already pipable.  */
2686         if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2687                              WIMLIB_WRITE_FLAG_NOT_PIPABLE))) {
2688                 if (wim_is_pipable(wim))
2689                         write_flags |= WIMLIB_WRITE_FLAG_PIPABLE;
2690         }
2691
2692         if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2693                             WIMLIB_WRITE_FLAG_SOLID))
2694                                     == (WIMLIB_WRITE_FLAG_PIPABLE |
2695                                         WIMLIB_WRITE_FLAG_SOLID))
2696         {
2697                 ERROR("Solid compression is unsupported in pipable WIMs");
2698                 return WIMLIB_ERR_INVALID_PARAM;
2699         }
2700
2701         /* Start initializing the new file header.  */
2702         memset(&wim->out_hdr, 0, sizeof(wim->out_hdr));
2703
2704         /* Set the magic number.  */
2705         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2706                 wim->out_hdr.magic = PWM_MAGIC;
2707         else
2708                 wim->out_hdr.magic = WIM_MAGIC;
2709
2710         /* Set the version number.  */
2711         if ((write_flags & WIMLIB_WRITE_FLAG_SOLID) ||
2712             wim->out_compression_type == WIMLIB_COMPRESSION_TYPE_LZMS)
2713                 wim->out_hdr.wim_version = WIM_VERSION_SOLID;
2714         else
2715                 wim->out_hdr.wim_version = WIM_VERSION_DEFAULT;
2716
2717         /* Set the header flags.  */
2718         wim->out_hdr.flags = (wim->hdr.flags & (WIM_HDR_FLAG_RP_FIX |
2719                                                 WIM_HDR_FLAG_READONLY));
2720         if (total_parts != 1)
2721                 wim->out_hdr.flags |= WIM_HDR_FLAG_SPANNED;
2722         if (wim->out_compression_type != WIMLIB_COMPRESSION_TYPE_NONE) {
2723                 wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESSION;
2724                 switch (wim->out_compression_type) {
2725                 case WIMLIB_COMPRESSION_TYPE_XPRESS:
2726                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_XPRESS;
2727                         break;
2728                 case WIMLIB_COMPRESSION_TYPE_LZX:
2729                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_LZX;
2730                         break;
2731                 case WIMLIB_COMPRESSION_TYPE_LZMS:
2732                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_LZMS;
2733                         break;
2734                 }
2735         }
2736
2737         /* Set the chunk size.  */
2738         wim->out_hdr.chunk_size = wim->out_chunk_size;
2739
2740         /* Set the GUID.  */
2741         if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)
2742                 guid = wim->hdr.guid;
2743         if (guid)
2744                 memcpy(wim->out_hdr.guid, guid, WIMLIB_GUID_LEN);
2745         else
2746                 randomize_byte_array(wim->out_hdr.guid, WIMLIB_GUID_LEN);
2747
2748         /* Set the part number and total parts.  */
2749         wim->out_hdr.part_number = part_number;
2750         wim->out_hdr.total_parts = total_parts;
2751
2752         /* Set the image count.  */
2753         if (image == WIMLIB_ALL_IMAGES)
2754                 wim->out_hdr.image_count = wim->hdr.image_count;
2755         else
2756                 wim->out_hdr.image_count = 1;
2757
2758         /* Set the boot index.  */
2759         wim->out_hdr.boot_idx = 0;
2760         if (total_parts == 1) {
2761                 if (image == WIMLIB_ALL_IMAGES)
2762                         wim->out_hdr.boot_idx = wim->hdr.boot_idx;
2763                 else if (image == wim->hdr.boot_idx)
2764                         wim->out_hdr.boot_idx = 1;
2765         }
2766
2767         /* Set up the output file descriptor.  */
2768         if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) {
2769                 /* File descriptor was explicitly provided.  */
2770                 filedes_init(&wim->out_fd, *(const int *)path_or_fd);
2771                 if (!filedes_is_seekable(&wim->out_fd)) {
2772                         /* The file descriptor is a pipe.  */
2773                         ret = WIMLIB_ERR_INVALID_PARAM;
2774                         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2775                                 goto out_cleanup;
2776                         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2777                                 ERROR("Can't include integrity check when "
2778                                       "writing pipable WIM to pipe!");
2779                                 goto out_cleanup;
2780                         }
2781                 }
2782         } else {
2783                 /* Filename of WIM to write was provided; open file descriptor
2784                  * to it.  */
2785                 ret = open_wim_writable(wim, (const tchar*)path_or_fd,
2786                                         O_TRUNC | O_CREAT | O_RDWR);
2787                 if (ret)
2788                         goto out_cleanup;
2789         }
2790
2791         /* Write initial header.  This is merely a "dummy" header since it
2792          * doesn't have resource entries filled in yet, so it will be
2793          * overwritten later (unless writing a pipable WIM).  */
2794         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2795                 wim->out_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2796         ret = write_wim_header(&wim->out_hdr, &wim->out_fd, wim->out_fd.offset);
2797         wim->out_hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2798         if (ret)
2799                 goto out_cleanup;
2800
2801         /* Write file data and metadata resources.  */
2802         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) {
2803                 /* Default case: create a normal (non-pipable) WIM.  */
2804                 ret = write_file_data(wim, image, write_flags,
2805                                       num_threads,
2806                                       blob_list_override,
2807                                       &blob_table_list);
2808                 if (ret)
2809                         goto out_cleanup;
2810
2811                 ret = write_metadata_resources(wim, image, write_flags);
2812                 if (ret)
2813                         goto out_cleanup;
2814         } else {
2815                 /* Non-default case: create pipable WIM.  */
2816                 ret = write_pipable_wim(wim, image, write_flags, num_threads,
2817                                         blob_list_override,
2818                                         &blob_table_list);
2819                 if (ret)
2820                         goto out_cleanup;
2821         }
2822
2823         /* Write blob table, XML data, and (optional) integrity table.  */
2824         ret = finish_write(wim, image, write_flags, &blob_table_list);
2825 out_cleanup:
2826         (void)close_wim_writable(wim, write_flags);
2827         return ret;
2828 }
2829
2830 /* Write a standalone WIM to a file or file descriptor.  */
2831 static int
2832 write_standalone_wim(WIMStruct *wim, const void *path_or_fd,
2833                      int image, int write_flags, unsigned num_threads)
2834 {
2835         return write_wim_part(wim, path_or_fd, image, write_flags,
2836                               num_threads, 1, 1, NULL, NULL);
2837 }
2838
2839 /* API function documented in wimlib.h  */
2840 WIMLIBAPI int
2841 wimlib_write(WIMStruct *wim, const tchar *path,
2842              int image, int write_flags, unsigned num_threads)
2843 {
2844         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
2845                 return WIMLIB_ERR_INVALID_PARAM;
2846
2847         if (path == NULL || path[0] == T('\0'))
2848                 return WIMLIB_ERR_INVALID_PARAM;
2849
2850         return write_standalone_wim(wim, path, image, write_flags, num_threads);
2851 }
2852
2853 /* API function documented in wimlib.h  */
2854 WIMLIBAPI int
2855 wimlib_write_to_fd(WIMStruct *wim, int fd,
2856                    int image, int write_flags, unsigned num_threads)
2857 {
2858         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
2859                 return WIMLIB_ERR_INVALID_PARAM;
2860
2861         if (fd < 0)
2862                 return WIMLIB_ERR_INVALID_PARAM;
2863
2864         write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR;
2865
2866         return write_standalone_wim(wim, &fd, image, write_flags, num_threads);
2867 }
2868
2869 static bool
2870 any_images_modified(WIMStruct *wim)
2871 {
2872         for (int i = 0; i < wim->hdr.image_count; i++)
2873                 if (wim->image_metadata[i]->modified)
2874                         return true;
2875         return false;
2876 }
2877
2878 static int
2879 check_resource_offset(struct blob_descriptor *blob, void *_wim)
2880 {
2881         const WIMStruct *wim = _wim;
2882         off_t end_offset = *(const off_t*)wim->private;
2883
2884         if (blob->blob_location == BLOB_IN_WIM &&
2885             blob->rdesc->wim == wim &&
2886             blob->rdesc->offset_in_wim + blob->rdesc->size_in_wim > end_offset)
2887                 return WIMLIB_ERR_RESOURCE_ORDER;
2888         return 0;
2889 }
2890
2891 /* Make sure no file or metadata resources are located after the XML data (or
2892  * integrity table if present)--- otherwise we can't safely overwrite the WIM in
2893  * place and we return WIMLIB_ERR_RESOURCE_ORDER.  */
2894 static int
2895 check_resource_offsets(WIMStruct *wim, off_t end_offset)
2896 {
2897         int ret;
2898         unsigned i;
2899
2900         wim->private = &end_offset;
2901         ret = for_blob_in_table(wim->blob_table, check_resource_offset, wim);
2902         if (ret)
2903                 return ret;
2904
2905         for (i = 0; i < wim->hdr.image_count; i++) {
2906                 ret = check_resource_offset(wim->image_metadata[i]->metadata_blob, wim);
2907                 if (ret)
2908                         return ret;
2909         }
2910         return 0;
2911 }
2912
2913 /*
2914  * Overwrite a WIM, possibly appending new resources to it.
2915  *
2916  * A WIM looks like (or is supposed to look like) the following:
2917  *
2918  *                   Header (212 bytes)
2919  *                   Resources for metadata and files (variable size)
2920  *                   Blob table (variable size)
2921  *                   XML data (variable size)
2922  *                   Integrity table (optional) (variable size)
2923  *
2924  * If we are not adding any new files or metadata, then the blob table is
2925  * unchanged--- so we only need to overwrite the XML data, integrity table, and
2926  * header.  This operation is potentially unsafe if the program is abruptly
2927  * terminated while the XML data or integrity table are being overwritten, but
2928  * before the new header has been written.  To partially alleviate this problem,
2929  * we write a temporary header after the XML data has been written.  This may
2930  * prevent the WIM from becoming corrupted if the program is terminated while
2931  * the integrity table is being calculated (but no guarantees, due to write
2932  * re-ordering...).
2933  *
2934  * If we are adding new blobs, including new file data as well as any metadata
2935  * for any new images, then the blob table needs to be changed, and those blobs
2936  * need to be written.  In this case, we try to perform a safe update of the WIM
2937  * file by writing the blobs *after* the end of the previous WIM, then writing
2938  * the new blob table, XML data, and (optionally) integrity table following the
2939  * new blobs.  This will produce a layout like the following:
2940  *
2941  *                   Header (212 bytes)
2942  *                   (OLD) Resources for metadata and files (variable size)
2943  *                   (OLD) Blob table (variable size)
2944  *                   (OLD) XML data (variable size)
2945  *                   (OLD) Integrity table (optional) (variable size)
2946  *                   (NEW) Resources for metadata and files (variable size)
2947  *                   (NEW) Blob table (variable size)
2948  *                   (NEW) XML data (variable size)
2949  *                   (NEW) Integrity table (optional) (variable size)
2950  *
2951  * At all points, the WIM is valid as nothing points to the new data yet.  Then,
2952  * the header is overwritten to point to the new blob table, XML data, and
2953  * integrity table, to produce the following layout:
2954  *
2955  *                   Header (212 bytes)
2956  *                   Resources for metadata and files (variable size)
2957  *                   Nothing (variable size)
2958  *                   Resources for metadata and files (variable size)
2959  *                   Blob table (variable size)
2960  *                   XML data (variable size)
2961  *                   Integrity table (optional) (variable size)
2962  *
2963  * This method allows an image to be appended to a large WIM very quickly, and
2964  * is crash-safe except in the case of write re-ordering, but the disadvantage
2965  * is that a small hole is left in the WIM where the old blob table, xml data,
2966  * and integrity table were.  (These usually only take up a small amount of
2967  * space compared to the blobs, however.)
2968  */
2969 static int
2970 overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads)
2971 {
2972         int ret;
2973         off_t old_wim_end;
2974         u64 old_blob_table_end, old_xml_begin, old_xml_end;
2975         struct list_head blob_list;
2976         struct list_head blob_table_list;
2977         struct filter_context filter_ctx;
2978
2979         /* Include an integrity table by default if no preference was given and
2980          * the WIM already had an integrity table.  */
2981         if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2982                              WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2983                 if (wim_has_integrity_table(wim))
2984                         write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2985
2986         /* Start preparing the updated file header.  */
2987         memcpy(&wim->out_hdr, &wim->hdr, sizeof(wim->out_hdr));
2988
2989         /* If using solid compression, the version number must be set to
2990          * WIM_VERSION_SOLID.  */
2991         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
2992                 wim->out_hdr.wim_version = WIM_VERSION_SOLID;
2993
2994         /* Set additional flags for overwrite.  */
2995         write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
2996                        WIMLIB_WRITE_FLAG_STREAMS_OK;
2997
2998         /* Make sure there is no data after the XML data, except possibily an
2999          * integrity table.  If this were the case, then this data would be
3000          * overwritten.  */
3001         old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
3002         old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
3003         old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
3004                              wim->hdr.blob_table_reshdr.size_in_wim;
3005         if (wim_has_integrity_table(wim) &&
3006             wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
3007                 WARNING("Didn't expect the integrity table to be before the XML data");
3008                 ret = WIMLIB_ERR_RESOURCE_ORDER;
3009                 goto out;
3010         }
3011
3012         if (old_blob_table_end > old_xml_begin) {
3013                 WARNING("Didn't expect the blob table to be after the XML data");
3014                 ret = WIMLIB_ERR_RESOURCE_ORDER;
3015                 goto out;
3016         }
3017
3018         /* Set @old_wim_end, which indicates the point beyond which we don't
3019          * allow any file and metadata resources to appear without returning
3020          * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
3021          * overwrite these resources). */
3022         if (!wim->image_deletion_occurred && !any_images_modified(wim)) {
3023                 /* If no images have been modified and no images have been
3024                  * deleted, a new blob table does not need to be written.  We
3025                  * shall write the new XML data and optional integrity table
3026                  * immediately after the blob table.  Note that this may
3027                  * overwrite an existing integrity table. */
3028                 old_wim_end = old_blob_table_end;
3029                 write_flags |= WIMLIB_WRITE_FLAG_NO_NEW_BLOBS;
3030         } else if (wim_has_integrity_table(wim)) {
3031                 /* Old WIM has an integrity table; begin writing new blobs after
3032                  * it. */
3033                 old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
3034                               wim->hdr.integrity_table_reshdr.size_in_wim;
3035         } else {
3036                 /* No existing integrity table; begin writing new blobs after
3037                  * the old XML data. */
3038                 old_wim_end = old_xml_end;
3039         }
3040
3041         ret = check_resource_offsets(wim, old_wim_end);
3042         if (ret)
3043                 goto out;
3044
3045         ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3046                                           &blob_list, &blob_table_list,
3047                                           &filter_ctx);
3048         if (ret)
3049                 goto out;
3050
3051         if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)
3052                 wimlib_assert(list_empty(&blob_list));
3053
3054         ret = open_wim_writable(wim, wim->filename, O_RDWR);
3055         if (ret)
3056                 goto out;
3057
3058         ret = lock_wim_for_append(wim);
3059         if (ret)
3060                 goto out_close_wim;
3061
3062         /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */
3063         wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3064         ret = write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3065         wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3066         if (ret) {
3067                 ERROR_WITH_ERRNO("Error updating WIM header flags");
3068                 goto out_unlock_wim;
3069         }
3070
3071         if (filedes_seek(&wim->out_fd, old_wim_end) == -1) {
3072                 ERROR_WITH_ERRNO("Can't seek to end of WIM");
3073                 ret = WIMLIB_ERR_WRITE;
3074                 goto out_restore_hdr;
3075         }
3076
3077         ret = write_file_data_blobs(wim, &blob_list, write_flags,
3078                                     num_threads, &filter_ctx);
3079         if (ret)
3080                 goto out_truncate;
3081
3082         ret = write_metadata_resources(wim, WIMLIB_ALL_IMAGES, write_flags);
3083         if (ret)
3084                 goto out_truncate;
3085
3086         ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3087                            &blob_table_list);
3088         if (ret)
3089                 goto out_truncate;
3090
3091         unlock_wim_for_append(wim);
3092         return 0;
3093
3094 out_truncate:
3095         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) {
3096                 WARNING("Truncating \"%"TS"\" to its original size "
3097                         "(%"PRIu64" bytes)", wim->filename, old_wim_end);
3098                 /* Return value of ftruncate() is ignored because this is
3099                  * already an error path.  */
3100                 (void)ftruncate(wim->out_fd.fd, old_wim_end);
3101         }
3102 out_restore_hdr:
3103         (void)write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3104 out_unlock_wim:
3105         unlock_wim_for_append(wim);
3106 out_close_wim:
3107         (void)close_wim_writable(wim, write_flags);
3108 out:
3109         return ret;
3110 }
3111
3112 static int
3113 overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, unsigned num_threads)
3114 {
3115         size_t wim_name_len;
3116         int ret;
3117
3118         /* Write the WIM to a temporary file in the same directory as the
3119          * original WIM. */
3120         wim_name_len = tstrlen(wim->filename);
3121         tchar tmpfile[wim_name_len + 10];
3122         tmemcpy(tmpfile, wim->filename, wim_name_len);
3123         randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
3124         tmpfile[wim_name_len + 9] = T('\0');
3125
3126         ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
3127                            write_flags |
3128                                 WIMLIB_WRITE_FLAG_FSYNC |
3129                                 WIMLIB_WRITE_FLAG_RETAIN_GUID,
3130                            num_threads);
3131         if (ret) {
3132                 tunlink(tmpfile);
3133                 return ret;
3134         }
3135
3136         if (filedes_valid(&wim->in_fd)) {
3137                 filedes_close(&wim->in_fd);
3138                 filedes_invalidate(&wim->in_fd);
3139         }
3140
3141         /* Rename the new WIM file to the original WIM file.  Note: on Windows
3142          * this actually calls win32_rename_replacement(), not _wrename(), so
3143          * that removing the existing destination file can be handled.  */
3144         ret = trename(tmpfile, wim->filename);
3145         if (ret) {
3146                 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
3147                                  tmpfile, wim->filename);
3148         #ifdef __WIN32__
3149                 if (ret < 0)
3150         #endif
3151                 {
3152                         tunlink(tmpfile);
3153                 }
3154                 return WIMLIB_ERR_RENAME;
3155         }
3156
3157         union wimlib_progress_info progress;
3158         progress.rename.from = tmpfile;
3159         progress.rename.to = wim->filename;
3160         return call_progress(wim->progfunc, WIMLIB_PROGRESS_MSG_RENAME,
3161                              &progress, wim->progctx);
3162 }
3163
3164 /* Determine if the specified WIM file may be updated by appending in-place
3165  * rather than writing and replacing it with an entirely new file.  */
3166 static bool
3167 can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags)
3168 {
3169         /* REBUILD flag forces full rebuild.  */
3170         if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
3171                 return false;
3172
3173         /* Image deletions cause full rebuild by default.  */
3174         if (wim->image_deletion_occurred &&
3175             !(write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
3176                 return false;
3177
3178         /* Pipable WIMs cannot be updated in place, nor can a non-pipable WIM be
3179          * turned into a pipable WIM in-place.  */
3180         if (wim_is_pipable(wim) || (write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
3181                 return false;
3182
3183         /* The default compression type and compression chunk size selected for
3184          * the output WIM must be the same as those currently used for the WIM.
3185          */
3186         if (wim->compression_type != wim->out_compression_type)
3187                 return false;
3188         if (wim->chunk_size != wim->out_chunk_size)
3189                 return false;
3190
3191         return true;
3192 }
3193
3194 /* API function documented in wimlib.h  */
3195 WIMLIBAPI int
3196 wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads)
3197 {
3198         int ret;
3199         u32 orig_hdr_flags;
3200
3201         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3202                 return WIMLIB_ERR_INVALID_PARAM;
3203
3204         if (!wim->filename)
3205                 return WIMLIB_ERR_NO_FILENAME;
3206
3207         orig_hdr_flags = wim->hdr.flags;
3208         if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
3209                 wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
3210         ret = can_modify_wim(wim);
3211         wim->hdr.flags = orig_hdr_flags;
3212         if (ret)
3213                 return ret;
3214
3215         if (can_overwrite_wim_inplace(wim, write_flags)) {
3216                 ret = overwrite_wim_inplace(wim, write_flags, num_threads);
3217                 if (ret != WIMLIB_ERR_RESOURCE_ORDER)
3218                         return ret;
3219                 WARNING("Falling back to re-building entire WIM");
3220         }
3221         return overwrite_wim_via_tmpfile(wim, write_flags, num_threads);
3222 }