]> wimlib.net Git - wimlib/blob - src/write.c
security.c: align total_length immediately after reading
[wimlib] / src / write.c
1 /*
2  * write.c
3  *
4  * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5  * compressed file resources, etc.
6  */
7
8 /*
9  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
10  *
11  * This file is free software; you can redistribute it and/or modify it under
12  * the terms of the GNU Lesser General Public License as published by the Free
13  * Software Foundation; either version 3 of the License, or (at your option) any
14  * later version.
15  *
16  * This file is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
19  * details.
20  *
21  * You should have received a copy of the GNU Lesser General Public License
22  * along with this file; if not, see http://www.gnu.org/licenses/.
23  */
24
25 #ifdef HAVE_CONFIG_H
26 #  include "config.h"
27 #endif
28
29 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
30 /* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
31  * overwrite the LIST_HEAD macro. */
32 #  include <sys/file.h>
33 #endif
34
35 #include <errno.h>
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39
40 #include "wimlib/alloca.h"
41 #include "wimlib/assert.h"
42 #include "wimlib/blob_table.h"
43 #include "wimlib/chunk_compressor.h"
44 #include "wimlib/endianness.h"
45 #include "wimlib/error.h"
46 #include "wimlib/file_io.h"
47 #include "wimlib/header.h"
48 #include "wimlib/inode.h"
49 #include "wimlib/integrity.h"
50 #include "wimlib/metadata.h"
51 #include "wimlib/paths.h"
52 #include "wimlib/progress.h"
53 #include "wimlib/resource.h"
54 #include "wimlib/solid.h"
55 #include "wimlib/win32.h" /* win32_rename_replacement() */
56 #include "wimlib/write.h"
57 #include "wimlib/xml.h"
58
59
60 /* wimlib internal flags used when writing resources.  */
61 #define WRITE_RESOURCE_FLAG_RECOMPRESS          0x00000001
62 #define WRITE_RESOURCE_FLAG_PIPABLE             0x00000002
63 #define WRITE_RESOURCE_FLAG_SOLID               0x00000004
64 #define WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE 0x00000008
65 #define WRITE_RESOURCE_FLAG_SOLID_SORT          0x00000010
66
67 static int
68 write_flags_to_resource_flags(int write_flags)
69 {
70         int write_resource_flags = 0;
71
72         if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
73                 write_resource_flags |= WRITE_RESOURCE_FLAG_RECOMPRESS;
74
75         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
76                 write_resource_flags |= WRITE_RESOURCE_FLAG_PIPABLE;
77
78         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
79                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
80
81         if (write_flags & WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES)
82                 write_resource_flags |= WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE;
83
84         if ((write_flags & (WIMLIB_WRITE_FLAG_SOLID |
85                             WIMLIB_WRITE_FLAG_NO_SOLID_SORT)) ==
86             WIMLIB_WRITE_FLAG_SOLID)
87                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID_SORT;
88
89         return write_resource_flags;
90 }
91
92 struct filter_context {
93         int write_flags;
94         WIMStruct *wim;
95 };
96
97 /*
98  * Determine whether the specified blob should be filtered out from the write.
99  *
100  * Return values:
101  *
102  *  < 0 : The blob should be hard-filtered; that is, not included in the output
103  *        WIM file at all.
104  *    0 : The blob should not be filtered out.
105  *  > 0 : The blob should be soft-filtered; that is, it already exists in the
106  *        WIM file and may not need to be written again.
107  */
108 static int
109 blob_filtered(const struct blob_descriptor *blob,
110               const struct filter_context *ctx)
111 {
112         int write_flags;
113         WIMStruct *wim;
114
115         if (ctx == NULL)
116                 return 0;
117
118         write_flags = ctx->write_flags;
119         wim = ctx->wim;
120
121         if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE &&
122             blob->blob_location == BLOB_IN_WIM &&
123             blob->rdesc->wim == wim)
124                 return 1;
125
126         if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS &&
127             blob->blob_location == BLOB_IN_WIM &&
128             blob->rdesc->wim != wim)
129                 return -1;
130
131         return 0;
132 }
133
134 static bool
135 blob_hard_filtered(const struct blob_descriptor *blob,
136                    struct filter_context *ctx)
137 {
138         return blob_filtered(blob, ctx) < 0;
139 }
140
141 static inline int
142 may_soft_filter_blobs(const struct filter_context *ctx)
143 {
144         if (ctx == NULL)
145                 return 0;
146         return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE;
147 }
148
149 static inline int
150 may_hard_filter_blobs(const struct filter_context *ctx)
151 {
152         if (ctx == NULL)
153                 return 0;
154         return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS;
155 }
156
157 static inline int
158 may_filter_blobs(const struct filter_context *ctx)
159 {
160         return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx));
161 }
162
163 /* Return true if the specified resource is compressed and the compressed data
164  * can be reused with the specified output parameters.  */
165 static bool
166 can_raw_copy(const struct blob_descriptor *blob,
167              int write_resource_flags, int out_ctype, u32 out_chunk_size)
168 {
169         const struct wim_resource_descriptor *rdesc;
170
171         if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS)
172                 return false;
173
174         if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
175                 return false;
176
177         if (blob->blob_location != BLOB_IN_WIM)
178                 return false;
179
180         rdesc = blob->rdesc;
181
182         if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
183                 return false;
184
185         if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
186                 /* Normal compressed resource: Must use same compression type
187                  * and chunk size.  */
188                 return (rdesc->compression_type == out_ctype &&
189                         rdesc->chunk_size == out_chunk_size);
190         }
191
192         if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) &&
193             (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
194         {
195                 /* Solid resource: Such resources may contain multiple blobs,
196                  * and in general only a subset of them need to be written.  As
197                  * a heuristic, re-use the raw data if more than two-thirds the
198                  * uncompressed size is being written.  */
199
200                 /* Note: solid resources contain a header that specifies the
201                  * compression type and chunk size; therefore we don't need to
202                  * check if they are compatible with @out_ctype and
203                  * @out_chunk_size.  */
204
205                 struct blob_descriptor *res_blob;
206                 u64 write_size = 0;
207
208                 list_for_each_entry(res_blob, &rdesc->blob_list, rdesc_node)
209                         if (res_blob->will_be_in_output_wim)
210                                 write_size += res_blob->size;
211
212                 return (write_size > rdesc->uncompressed_size * 2 / 3);
213         }
214
215         return false;
216 }
217
218 static u32
219 reshdr_flags_for_blob(const struct blob_descriptor *blob)
220 {
221         u32 reshdr_flags = 0;
222         if (blob->is_metadata)
223                 reshdr_flags |= WIM_RESHDR_FLAG_METADATA;
224         return reshdr_flags;
225 }
226
227 static void
228 blob_set_out_reshdr_for_reuse(struct blob_descriptor *blob)
229 {
230         const struct wim_resource_descriptor *rdesc;
231
232         wimlib_assert(blob->blob_location == BLOB_IN_WIM);
233         rdesc = blob->rdesc;
234
235         if (rdesc->flags & WIM_RESHDR_FLAG_SOLID) {
236                 blob->out_reshdr.offset_in_wim = blob->offset_in_res;
237                 blob->out_reshdr.uncompressed_size = 0;
238                 blob->out_reshdr.size_in_wim = blob->size;
239
240                 blob->out_res_offset_in_wim = rdesc->offset_in_wim;
241                 blob->out_res_size_in_wim = rdesc->size_in_wim;
242                 blob->out_res_uncompressed_size = rdesc->uncompressed_size;
243         } else {
244                 blob->out_reshdr.offset_in_wim = rdesc->offset_in_wim;
245                 blob->out_reshdr.uncompressed_size = rdesc->uncompressed_size;
246                 blob->out_reshdr.size_in_wim = rdesc->size_in_wim;
247         }
248         blob->out_reshdr.flags = rdesc->flags;
249 }
250
251
252 /* Write the header for a blob in a pipable WIM.  */
253 static int
254 write_pwm_blob_header(const struct blob_descriptor *blob,
255                       struct filedes *out_fd, bool compressed)
256 {
257         struct pwm_blob_hdr blob_hdr;
258         u32 reshdr_flags;
259         int ret;
260
261         wimlib_assert(!blob->unhashed);
262
263         blob_hdr.magic = cpu_to_le64(PWM_BLOB_MAGIC);
264         blob_hdr.uncompressed_size = cpu_to_le64(blob->size);
265         copy_hash(blob_hdr.hash, blob->hash);
266         reshdr_flags = reshdr_flags_for_blob(blob);
267         if (compressed)
268                 reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
269         blob_hdr.flags = cpu_to_le32(reshdr_flags);
270         ret = full_write(out_fd, &blob_hdr, sizeof(blob_hdr));
271         if (ret)
272                 ERROR_WITH_ERRNO("Write error");
273         return ret;
274 }
275
276 struct write_blobs_progress_data {
277         wimlib_progress_func_t progfunc;
278         void *progctx;
279         union wimlib_progress_info progress;
280         u64 next_progress;
281 };
282
283 static int
284 do_write_blobs_progress(struct write_blobs_progress_data *progress_data,
285                         u64 complete_size, u32 complete_count, bool discarded)
286 {
287         union wimlib_progress_info *progress = &progress_data->progress;
288         int ret;
289
290         if (discarded) {
291                 progress->write_streams.total_bytes -= complete_size;
292                 progress->write_streams.total_streams -= complete_count;
293                 if (progress_data->next_progress != ~(u64)0 &&
294                     progress_data->next_progress > progress->write_streams.total_bytes)
295                 {
296                         progress_data->next_progress = progress->write_streams.total_bytes;
297                 }
298         } else {
299                 progress->write_streams.completed_bytes += complete_size;
300                 progress->write_streams.completed_streams += complete_count;
301         }
302
303         if (progress->write_streams.completed_bytes >= progress_data->next_progress) {
304
305                 ret = call_progress(progress_data->progfunc,
306                                     WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
307                                     progress,
308                                     progress_data->progctx);
309                 if (ret)
310                         return ret;
311
312                 set_next_progress(progress->write_streams.completed_bytes,
313                                   progress->write_streams.total_bytes,
314                                   &progress_data->next_progress);
315         }
316         return 0;
317 }
318
319 struct write_blobs_ctx {
320         /* File descriptor to which the blobs are being written.  */
321         struct filedes *out_fd;
322
323         /* Blob table for the WIMStruct on whose behalf the blobs are being
324          * written.  */
325         struct blob_table *blob_table;
326
327         /* Compression format to use.  */
328         int out_ctype;
329
330         /* Maximum uncompressed chunk size in compressed resources to use.  */
331         u32 out_chunk_size;
332
333         /* Flags that affect how the blobs will be written.  */
334         int write_resource_flags;
335
336         /* Data used for issuing WRITE_STREAMS progress.  */
337         struct write_blobs_progress_data progress_data;
338
339         struct filter_context *filter_ctx;
340
341         /* Upper bound on the total number of bytes that need to be compressed.
342          * */
343         u64 num_bytes_to_compress;
344
345         /* Pointer to the chunk_compressor implementation being used for
346          * compressing chunks of data, or NULL if chunks are being written
347          * uncompressed.  */
348         struct chunk_compressor *compressor;
349
350         /* A buffer of size @out_chunk_size that has been loaned out from the
351          * chunk compressor and is currently being filled with the uncompressed
352          * data of the next chunk.  */
353         u8 *cur_chunk_buf;
354
355         /* Number of bytes in @cur_chunk_buf that are currently filled.  */
356         size_t cur_chunk_buf_filled;
357
358         /* List of blobs that currently have chunks being compressed.  */
359         struct list_head blobs_being_compressed;
360
361         /* List of blobs in the solid resource.  Blobs are moved here after
362          * @blobs_being_compressed only when writing a solid resource.  */
363         struct list_head blobs_in_solid_resource;
364
365         /* Current uncompressed offset in the blob being read.  */
366         u64 cur_read_blob_offset;
367
368         /* Uncompressed size of the blob currently being read.  */
369         u64 cur_read_blob_size;
370
371         /* Current uncompressed offset in the blob being written.  */
372         u64 cur_write_blob_offset;
373
374         /* Uncompressed size of resource currently being written.  */
375         u64 cur_write_res_size;
376
377         /* Array that is filled in with compressed chunk sizes as a resource is
378          * being written.  */
379         u64 *chunk_csizes;
380
381         /* Index of next entry in @chunk_csizes to fill in.  */
382         size_t chunk_index;
383
384         /* Number of entries in @chunk_csizes currently allocated.  */
385         size_t num_alloc_chunks;
386
387         /* Offset in the output file of the start of the chunks of the resource
388          * currently being written.  */
389         u64 chunks_start_offset;
390 };
391
392 /* Reserve space for the chunk table and prepare to accumulate the chunk table
393  * in memory.  */
394 static int
395 begin_chunk_table(struct write_blobs_ctx *ctx, u64 res_expected_size)
396 {
397         u64 expected_num_chunks;
398         u64 expected_num_chunk_entries;
399         size_t reserve_size;
400         int ret;
401
402         /* Calculate the number of chunks and chunk entries that should be
403          * needed for the resource.  These normally will be the final values,
404          * but in SOLID mode some of the blobs we're planning to write into the
405          * resource may be duplicates, and therefore discarded, potentially
406          * decreasing the number of chunk entries needed.  */
407         expected_num_chunks = DIV_ROUND_UP(res_expected_size, ctx->out_chunk_size);
408         expected_num_chunk_entries = expected_num_chunks;
409         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
410                 expected_num_chunk_entries--;
411
412         /* Make sure the chunk_csizes array is long enough to store the
413          * compressed size of each chunk.  */
414         if (expected_num_chunks > ctx->num_alloc_chunks) {
415                 u64 new_length = expected_num_chunks + 50;
416
417                 if ((size_t)new_length != new_length) {
418                         ERROR("Resource size too large (%"PRIu64" bytes!",
419                               res_expected_size);
420                         return WIMLIB_ERR_NOMEM;
421                 }
422
423                 FREE(ctx->chunk_csizes);
424                 ctx->chunk_csizes = MALLOC(new_length * sizeof(ctx->chunk_csizes[0]));
425                 if (ctx->chunk_csizes == NULL) {
426                         ctx->num_alloc_chunks = 0;
427                         return WIMLIB_ERR_NOMEM;
428                 }
429                 ctx->num_alloc_chunks = new_length;
430         }
431
432         ctx->chunk_index = 0;
433
434         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) {
435                 /* Reserve space for the chunk table in the output file.  In the
436                  * case of solid resources this reserves the upper bound for the
437                  * needed space, not necessarily the exact space which will
438                  * prove to be needed.  At this point, we just use @chunk_csizes
439                  * for a buffer of 0's because the actual compressed chunk sizes
440                  * are unknown.  */
441                 reserve_size = expected_num_chunk_entries *
442                                get_chunk_entry_size(res_expected_size,
443                                                     0 != (ctx->write_resource_flags &
444                                                           WRITE_RESOURCE_FLAG_SOLID));
445                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
446                         reserve_size += sizeof(struct alt_chunk_table_header_disk);
447                 memset(ctx->chunk_csizes, 0, reserve_size);
448                 ret = full_write(ctx->out_fd, ctx->chunk_csizes, reserve_size);
449                 if (ret)
450                         return ret;
451         }
452         return 0;
453 }
454
455 static int
456 begin_write_resource(struct write_blobs_ctx *ctx, u64 res_expected_size)
457 {
458         int ret;
459
460         wimlib_assert(res_expected_size != 0);
461
462         if (ctx->compressor != NULL) {
463                 ret = begin_chunk_table(ctx, res_expected_size);
464                 if (ret)
465                         return ret;
466         }
467
468         /* Output file descriptor is now positioned at the offset at which to
469          * write the first chunk of the resource.  */
470         ctx->chunks_start_offset = ctx->out_fd->offset;
471         ctx->cur_write_blob_offset = 0;
472         ctx->cur_write_res_size = res_expected_size;
473         return 0;
474 }
475
476 static int
477 end_chunk_table(struct write_blobs_ctx *ctx, u64 res_actual_size,
478                 u64 *res_start_offset_ret, u64 *res_store_size_ret)
479 {
480         size_t actual_num_chunks;
481         size_t actual_num_chunk_entries;
482         size_t chunk_entry_size;
483         int ret;
484
485         actual_num_chunks = ctx->chunk_index;
486         actual_num_chunk_entries = actual_num_chunks;
487         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
488                 actual_num_chunk_entries--;
489
490         chunk_entry_size = get_chunk_entry_size(res_actual_size,
491                                                 0 != (ctx->write_resource_flags &
492                                                       WRITE_RESOURCE_FLAG_SOLID));
493
494         typedef le64 _may_alias_attribute aliased_le64_t;
495         typedef le32 _may_alias_attribute aliased_le32_t;
496
497         if (chunk_entry_size == 4) {
498                 aliased_le32_t *entries = (aliased_le32_t*)ctx->chunk_csizes;
499
500                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
501                         for (size_t i = 0; i < actual_num_chunk_entries; i++)
502                                 entries[i] = cpu_to_le32(ctx->chunk_csizes[i]);
503                 } else {
504                         u32 offset = ctx->chunk_csizes[0];
505                         for (size_t i = 0; i < actual_num_chunk_entries; i++) {
506                                 u32 next_size = ctx->chunk_csizes[i + 1];
507                                 entries[i] = cpu_to_le32(offset);
508                                 offset += next_size;
509                         }
510                 }
511         } else {
512                 aliased_le64_t *entries = (aliased_le64_t*)ctx->chunk_csizes;
513
514                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
515                         for (size_t i = 0; i < actual_num_chunk_entries; i++)
516                                 entries[i] = cpu_to_le64(ctx->chunk_csizes[i]);
517                 } else {
518                         u64 offset = ctx->chunk_csizes[0];
519                         for (size_t i = 0; i < actual_num_chunk_entries; i++) {
520                                 u64 next_size = ctx->chunk_csizes[i + 1];
521                                 entries[i] = cpu_to_le64(offset);
522                                 offset += next_size;
523                         }
524                 }
525         }
526
527         size_t chunk_table_size = actual_num_chunk_entries * chunk_entry_size;
528         u64 res_start_offset;
529         u64 res_end_offset;
530
531         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
532                 ret = full_write(ctx->out_fd, ctx->chunk_csizes, chunk_table_size);
533                 if (ret)
534                         goto write_error;
535                 res_end_offset = ctx->out_fd->offset;
536                 res_start_offset = ctx->chunks_start_offset;
537         } else {
538                 res_end_offset = ctx->out_fd->offset;
539
540                 u64 chunk_table_offset;
541
542                 chunk_table_offset = ctx->chunks_start_offset - chunk_table_size;
543
544                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
545                         struct alt_chunk_table_header_disk hdr;
546
547                         hdr.res_usize = cpu_to_le64(res_actual_size);
548                         hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size);
549                         hdr.compression_format = cpu_to_le32(ctx->out_ctype);
550
551                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1);
552                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2);
553                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
554
555                         ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr),
556                                           chunk_table_offset - sizeof(hdr));
557                         if (ret)
558                                 goto write_error;
559                         res_start_offset = chunk_table_offset - sizeof(hdr);
560                 } else {
561                         res_start_offset = chunk_table_offset;
562                 }
563
564                 ret = full_pwrite(ctx->out_fd, ctx->chunk_csizes,
565                                   chunk_table_size, chunk_table_offset);
566                 if (ret)
567                         goto write_error;
568         }
569
570         *res_start_offset_ret = res_start_offset;
571         *res_store_size_ret = res_end_offset - res_start_offset;
572
573         return 0;
574
575 write_error:
576         ERROR_WITH_ERRNO("Write error");
577         return ret;
578 }
579
580 /* Finish writing a WIM resource by writing or updating the chunk table (if not
581  * writing the data uncompressed) and loading its metadata into @out_reshdr.  */
582 static int
583 end_write_resource(struct write_blobs_ctx *ctx, struct wim_reshdr *out_reshdr)
584 {
585         int ret;
586         u64 res_size_in_wim;
587         u64 res_uncompressed_size;
588         u64 res_offset_in_wim;
589
590         wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size ||
591                       (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID));
592         res_uncompressed_size = ctx->cur_write_res_size;
593
594         if (ctx->compressor) {
595                 ret = end_chunk_table(ctx, res_uncompressed_size,
596                                       &res_offset_in_wim, &res_size_in_wim);
597                 if (ret)
598                         return ret;
599         } else {
600                 res_offset_in_wim = ctx->chunks_start_offset;
601                 res_size_in_wim = ctx->out_fd->offset - res_offset_in_wim;
602         }
603         out_reshdr->uncompressed_size = res_uncompressed_size;
604         out_reshdr->size_in_wim = res_size_in_wim;
605         out_reshdr->offset_in_wim = res_offset_in_wim;
606         return 0;
607 }
608
609 /* Call when no more data from the file at @path is needed.  */
610 static int
611 done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx)
612 {
613         union wimlib_progress_info info;
614
615         info.done_with_file.path_to_file = path;
616
617         return call_progress(progfunc, WIMLIB_PROGRESS_MSG_DONE_WITH_FILE,
618                              &info, progctx);
619 }
620
621 static int
622 do_done_with_blob(struct blob_descriptor *blob,
623                   wimlib_progress_func_t progfunc, void *progctx)
624 {
625         int ret;
626         struct wim_inode *inode;
627
628         if (!blob->may_send_done_with_file)
629                 return 0;
630
631         inode = blob->file_inode;
632
633         wimlib_assert(inode != NULL);
634         wimlib_assert(inode->i_num_remaining_streams > 0);
635         if (--inode->i_num_remaining_streams > 0)
636                 return 0;
637
638 #ifdef __WIN32__
639         /* XXX: This logic really should be somewhere else.  */
640
641         /* We want the path to the file, but blob->file_on_disk might actually
642          * refer to a named data stream.  Temporarily strip the named data
643          * stream from the path.  */
644         wchar_t *p_colon = NULL;
645         wchar_t *p_question_mark = NULL;
646         const wchar_t *p_stream_name;
647
648         p_stream_name = path_stream_name(blob->file_on_disk);
649         if (unlikely(p_stream_name)) {
650                 p_colon = (wchar_t *)(p_stream_name - 1);
651                 wimlib_assert(*p_colon == L':');
652                 *p_colon = L'\0';
653         }
654
655         /* We also should use a fake Win32 path instead of a NT path  */
656         if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) {
657                 p_question_mark = &blob->file_on_disk[1];
658                 *p_question_mark = L'\\';
659         }
660 #endif
661
662         ret = done_with_file(blob->file_on_disk, progfunc, progctx);
663
664 #ifdef __WIN32__
665         if (p_colon)
666                 *p_colon = L':';
667         if (p_question_mark)
668                 *p_question_mark = L'?';
669 #endif
670         return ret;
671 }
672
673 /* Handle WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES mode.  */
674 static inline int
675 done_with_blob(struct blob_descriptor *blob, struct write_blobs_ctx *ctx)
676 {
677         if (likely(!(ctx->write_resource_flags &
678                      WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE)))
679                 return 0;
680         return do_done_with_blob(blob, ctx->progress_data.progfunc,
681                                  ctx->progress_data.progctx);
682 }
683
684 /* Begin processing a blob for writing.  */
685 static int
686 write_blob_begin_read(struct blob_descriptor *blob, void *_ctx)
687 {
688         struct write_blobs_ctx *ctx = _ctx;
689         int ret;
690
691         wimlib_assert(blob->size > 0);
692
693         ctx->cur_read_blob_offset = 0;
694         ctx->cur_read_blob_size = blob->size;
695
696         /* As an optimization, we allow some blobs to be "unhashed", meaning
697          * their SHA-1 message digests are unknown.  This is the case with blobs
698          * that are added by scanning a directory tree with wimlib_add_image(),
699          * for example.  Since WIM uses single-instance blobs, we don't know
700          * whether such each such blob really need to written until it is
701          * actually checksummed, unless it has a unique size.  In such cases we
702          * read and checksum the blob in this function, thereby advancing ahead
703          * of read_blob_list(), which will still provide the data again to
704          * write_blob_process_chunk().  This is okay because an unhashed blob
705          * cannot be in a WIM resource, which might be costly to decompress.  */
706         if (ctx->blob_table != NULL && blob->unhashed && !blob->unique_size) {
707
708                 struct blob_descriptor *new_blob;
709
710                 ret = hash_unhashed_blob(blob, ctx->blob_table, &new_blob);
711                 if (ret)
712                         return ret;
713                 if (new_blob != blob) {
714                         /* Duplicate blob detected.  */
715
716                         if (new_blob->will_be_in_output_wim ||
717                             blob_filtered(new_blob, ctx->filter_ctx))
718                         {
719                                 /* The duplicate blob is already being included
720                                  * in the output WIM, or it would be filtered
721                                  * out if it had been.  Skip writing this blob
722                                  * (and reading it again) entirely, passing its
723                                  * output reference count to the duplicate blob
724                                  * in the former case.  */
725                                 ret = do_write_blobs_progress(&ctx->progress_data,
726                                                               blob->size, 1, true);
727                                 list_del(&blob->write_blobs_list);
728                                 list_del(&blob->blob_table_list);
729                                 if (new_blob->will_be_in_output_wim)
730                                         new_blob->out_refcnt += blob->out_refcnt;
731                                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
732                                         ctx->cur_write_res_size -= blob->size;
733                                 if (!ret)
734                                         ret = done_with_blob(blob, ctx);
735                                 free_blob_descriptor(blob);
736                                 if (ret)
737                                         return ret;
738                                 return BEGIN_BLOB_STATUS_SKIP_BLOB;
739                         } else {
740                                 /* The duplicate blob can validly be written,
741                                  * but was not marked as such.  Discard the
742                                  * current blob descriptor and use the
743                                  * duplicate, but actually freeing the current
744                                  * blob descriptor must wait until
745                                  * read_blob_list() has finished reading its
746                                  * data.  */
747                                 list_replace(&blob->write_blobs_list,
748                                              &new_blob->write_blobs_list);
749                                 list_replace(&blob->blob_table_list,
750                                              &new_blob->blob_table_list);
751                                 blob->will_be_in_output_wim = 0;
752                                 new_blob->out_refcnt = blob->out_refcnt;
753                                 new_blob->will_be_in_output_wim = 1;
754                                 new_blob->may_send_done_with_file = 0;
755                                 blob = new_blob;
756                         }
757                 }
758         }
759         list_move_tail(&blob->write_blobs_list, &ctx->blobs_being_compressed);
760         return 0;
761 }
762
763 /* Rewrite a blob that was just written compressed (as a non-solid WIM resource)
764  * as uncompressed instead.  */
765 static int
766 write_blob_uncompressed(struct blob_descriptor *blob, struct filedes *out_fd)
767 {
768         int ret;
769         u64 begin_offset = blob->out_reshdr.offset_in_wim;
770         u64 end_offset = out_fd->offset;
771
772         if (filedes_seek(out_fd, begin_offset) == -1)
773                 return 0;
774
775         ret = extract_blob_to_fd(blob, out_fd);
776         if (ret) {
777                 /* Error reading the uncompressed data.  */
778                 if (out_fd->offset == begin_offset &&
779                     filedes_seek(out_fd, end_offset) != -1)
780                 {
781                         /* Nothing was actually written yet, and we successfully
782                          * seeked to the end of the compressed resource, so
783                          * don't issue a hard error; just keep the compressed
784                          * resource instead.  */
785                         WARNING("Recovered compressed resource of "
786                                 "size %"PRIu64", continuing on.", blob->size);
787                         return 0;
788                 }
789                 return ret;
790         }
791
792         wimlib_assert(out_fd->offset - begin_offset == blob->size);
793
794         if (out_fd->offset < end_offset &&
795             0 != ftruncate(out_fd->fd, out_fd->offset))
796         {
797                 ERROR_WITH_ERRNO("Can't truncate output file to "
798                                  "offset %"PRIu64, out_fd->offset);
799                 return WIMLIB_ERR_WRITE;
800         }
801
802         blob->out_reshdr.size_in_wim = blob->size;
803         blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED |
804                                     WIM_RESHDR_FLAG_SOLID);
805         return 0;
806 }
807
808 /* Returns true if the specified blob, which was written as a non-solid
809  * resource, should be truncated from the WIM file and re-written uncompressed.
810  * blob->out_reshdr must be filled in from the initial write of the blob.  */
811 static bool
812 should_rewrite_blob_uncompressed(const struct write_blobs_ctx *ctx,
813                                  const struct blob_descriptor *blob)
814 {
815         /* If the compressed data is smaller than the uncompressed data, prefer
816          * the compressed data.  */
817         if (blob->out_reshdr.size_in_wim < blob->out_reshdr.uncompressed_size)
818                 return false;
819
820         /* If we're not actually writing compressed data, then there's no need
821          * for re-writing.  */
822         if (!ctx->compressor)
823                 return false;
824
825         /* If writing a pipable WIM, everything we write to the output is final
826          * (it might actually be a pipe!).  */
827         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)
828                 return false;
829
830         /* If the blob that would need to be re-read is located in a solid
831          * resource in another WIM file, then re-reading it would be costly.  So
832          * don't do it.
833          *
834          * Exception: if the compressed size happens to be *exactly* the same as
835          * the uncompressed size, then the blob *must* be written uncompressed
836          * in order to remain compatible with the Windows Overlay Filesystem
837          * Filter Driver (WOF).
838          *
839          * TODO: we are currently assuming that the optimization for
840          * single-chunk resources in maybe_rewrite_blob_uncompressed() prevents
841          * this case from being triggered too often.  To fully prevent excessive
842          * decompressions in degenerate cases, we really should obtain the
843          * uncompressed data by decompressing the compressed data we wrote to
844          * the output file.
845          */
846         if (blob->blob_location == BLOB_IN_WIM &&
847             blob->size != blob->rdesc->uncompressed_size &&
848             blob->size != blob->out_reshdr.size_in_wim)
849                 return false;
850
851         return true;
852 }
853
854 static int
855 maybe_rewrite_blob_uncompressed(struct write_blobs_ctx *ctx,
856                                 struct blob_descriptor *blob)
857 {
858         if (!should_rewrite_blob_uncompressed(ctx, blob))
859                 return 0;
860
861         /* Regular (non-solid) WIM resources with exactly one chunk and
862          * compressed size equal to uncompressed size are exactly the same as
863          * the corresponding compressed data --- since there must be 0 entries
864          * in the chunk table and the only chunk must be stored uncompressed.
865          * In this case, there's no need to rewrite anything.  */
866         if (ctx->chunk_index == 1 &&
867             blob->out_reshdr.size_in_wim == blob->out_reshdr.uncompressed_size)
868         {
869                 blob->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
870                 return 0;
871         }
872
873         return write_blob_uncompressed(blob, ctx->out_fd);
874 }
875
876 /* Write the next chunk of (typically compressed) data to the output WIM,
877  * handling the writing of the chunk table.  */
878 static int
879 write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
880             size_t csize, size_t usize)
881 {
882         int ret;
883         struct blob_descriptor *blob;
884         u32 completed_blob_count;
885         u32 completed_size;
886
887         blob = list_entry(ctx->blobs_being_compressed.next,
888                           struct blob_descriptor, write_blobs_list);
889
890         if (ctx->cur_write_blob_offset == 0 &&
891             !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
892         {
893                 /* Starting to write a new blob in non-solid mode.  */
894
895                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
896                         ret = write_pwm_blob_header(blob, ctx->out_fd,
897                                                     ctx->compressor != NULL);
898                         if (ret)
899                                 return ret;
900                 }
901
902                 ret = begin_write_resource(ctx, blob->size);
903                 if (ret)
904                         return ret;
905         }
906
907         if (ctx->compressor != NULL) {
908                 /* Record the compresed chunk size.  */
909                 wimlib_assert(ctx->chunk_index < ctx->num_alloc_chunks);
910                 ctx->chunk_csizes[ctx->chunk_index++] = csize;
911
912                /* If writing a pipable WIM, before the chunk data write a chunk
913                 * header that provides the compressed chunk size.  */
914                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
915                         struct pwm_chunk_hdr chunk_hdr = {
916                                 .compressed_size = cpu_to_le32(csize),
917                         };
918                         ret = full_write(ctx->out_fd, &chunk_hdr,
919                                          sizeof(chunk_hdr));
920                         if (ret)
921                                 goto write_error;
922                 }
923         }
924
925         /* Write the chunk data.  */
926         ret = full_write(ctx->out_fd, cchunk, csize);
927         if (ret)
928                 goto write_error;
929
930         ctx->cur_write_blob_offset += usize;
931
932         completed_size = usize;
933         completed_blob_count = 0;
934         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
935                 /* Wrote chunk in solid mode.  It may have finished multiple
936                  * blobs.  */
937                 struct blob_descriptor *next_blob;
938
939                 while (blob && ctx->cur_write_blob_offset >= blob->size) {
940
941                         ctx->cur_write_blob_offset -= blob->size;
942
943                         if (ctx->cur_write_blob_offset)
944                                 next_blob = list_entry(blob->write_blobs_list.next,
945                                                       struct blob_descriptor,
946                                                       write_blobs_list);
947                         else
948                                 next_blob = NULL;
949
950                         ret = done_with_blob(blob, ctx);
951                         if (ret)
952                                 return ret;
953                         list_move_tail(&blob->write_blobs_list, &ctx->blobs_in_solid_resource);
954                         completed_blob_count++;
955
956                         blob = next_blob;
957                 }
958         } else {
959                 /* Wrote chunk in non-solid mode.  It may have finished a
960                  * blob.  */
961                 if (ctx->cur_write_blob_offset == blob->size) {
962
963                         wimlib_assert(ctx->cur_write_blob_offset ==
964                                       ctx->cur_write_res_size);
965
966                         ret = end_write_resource(ctx, &blob->out_reshdr);
967                         if (ret)
968                                 return ret;
969
970                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
971                         if (ctx->compressor != NULL)
972                                 blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
973
974                         ret = maybe_rewrite_blob_uncompressed(ctx, blob);
975                         if (ret)
976                                 return ret;
977
978                         wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size);
979
980                         ctx->cur_write_blob_offset = 0;
981
982                         ret = done_with_blob(blob, ctx);
983                         if (ret)
984                                 return ret;
985                         list_del(&blob->write_blobs_list);
986                         completed_blob_count++;
987                 }
988         }
989
990         return do_write_blobs_progress(&ctx->progress_data, completed_size,
991                                        completed_blob_count, false);
992
993 write_error:
994         ERROR_WITH_ERRNO("Write error");
995         return ret;
996 }
997
998 static int
999 prepare_chunk_buffer(struct write_blobs_ctx *ctx)
1000 {
1001         /* While we are unable to get a new chunk buffer due to too many chunks
1002          * already outstanding, retrieve and write the next compressed chunk. */
1003         while (!(ctx->cur_chunk_buf =
1004                  ctx->compressor->get_chunk_buffer(ctx->compressor)))
1005         {
1006                 const void *cchunk;
1007                 u32 csize;
1008                 u32 usize;
1009                 bool bret;
1010                 int ret;
1011
1012                 bret = ctx->compressor->get_compression_result(ctx->compressor,
1013                                                                &cchunk,
1014                                                                &csize,
1015                                                                &usize);
1016                 wimlib_assert(bret);
1017
1018                 ret = write_chunk(ctx, cchunk, csize, usize);
1019                 if (ret)
1020                         return ret;
1021         }
1022         return 0;
1023 }
1024
1025 /* Process the next chunk of data to be written to a WIM resource.  */
1026 static int
1027 write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
1028 {
1029         struct write_blobs_ctx *ctx = _ctx;
1030         int ret;
1031         const u8 *chunkptr, *chunkend;
1032
1033         wimlib_assert(size != 0);
1034
1035         if (ctx->compressor == NULL) {
1036                 /* Write chunk uncompressed.  */
1037                  ret = write_chunk(ctx, chunk, size, size);
1038                  if (ret)
1039                          return ret;
1040                  ctx->cur_read_blob_offset += size;
1041                  return 0;
1042         }
1043
1044         /* Submit the chunk for compression, but take into account that the
1045          * @size the chunk was provided in may not correspond to the
1046          * @out_chunk_size being used for compression.  */
1047         chunkptr = chunk;
1048         chunkend = chunkptr + size;
1049         do {
1050                 size_t needed_chunk_size;
1051                 size_t bytes_consumed;
1052
1053                 if (!ctx->cur_chunk_buf) {
1054                         ret = prepare_chunk_buffer(ctx);
1055                         if (ret)
1056                                 return ret;
1057                 }
1058
1059                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1060                         needed_chunk_size = ctx->out_chunk_size;
1061                 } else {
1062                         needed_chunk_size = min(ctx->out_chunk_size,
1063                                                 ctx->cur_chunk_buf_filled +
1064                                                         (ctx->cur_read_blob_size -
1065                                                          ctx->cur_read_blob_offset));
1066                 }
1067
1068                 bytes_consumed = min(chunkend - chunkptr,
1069                                      needed_chunk_size - ctx->cur_chunk_buf_filled);
1070
1071                 memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled],
1072                        chunkptr, bytes_consumed);
1073
1074                 chunkptr += bytes_consumed;
1075                 ctx->cur_read_blob_offset += bytes_consumed;
1076                 ctx->cur_chunk_buf_filled += bytes_consumed;
1077
1078                 if (ctx->cur_chunk_buf_filled == needed_chunk_size) {
1079                         ctx->compressor->signal_chunk_filled(ctx->compressor,
1080                                                              ctx->cur_chunk_buf_filled);
1081                         ctx->cur_chunk_buf = NULL;
1082                         ctx->cur_chunk_buf_filled = 0;
1083                 }
1084         } while (chunkptr != chunkend);
1085         return 0;
1086 }
1087
1088 /* Finish processing a blob for writing.  It may not have been completely
1089  * written yet, as the chunk_compressor implementation may still have chunks
1090  * buffered or being compressed.  */
1091 static int
1092 write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx)
1093 {
1094         struct write_blobs_ctx *ctx = _ctx;
1095
1096         wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status);
1097
1098         if (!blob->will_be_in_output_wim) {
1099                 /* The blob was a duplicate.  Now that its data has finished
1100                  * being read, it is being discarded in favor of the duplicate
1101                  * entry.  It therefore is no longer needed, and we can fire the
1102                  * DONE_WITH_FILE callback because the file will not be read
1103                  * again.
1104                  *
1105                  * Note: we can't yet fire DONE_WITH_FILE for non-duplicate
1106                  * blobs, since it needs to be possible to re-read the file if
1107                  * it does not compress to less than its original size.  */
1108                 if (!status)
1109                         status = done_with_blob(blob, ctx);
1110                 free_blob_descriptor(blob);
1111         } else if (!status && blob->unhashed && ctx->blob_table != NULL) {
1112                 /* The blob was not a duplicate and was previously unhashed.
1113                  * Since we passed COMPUTE_MISSING_BLOB_HASHES to
1114                  * read_blob_list(), blob->hash is now computed and valid.  So
1115                  * turn this blob into a "hashed" blob.  */
1116                 list_del(&blob->unhashed_list);
1117                 blob_table_insert(ctx->blob_table, blob);
1118                 blob->unhashed = 0;
1119         }
1120         return status;
1121 }
1122
1123 /* Compute statistics about a list of blobs that will be written.
1124  *
1125  * Assumes the blobs are sorted such that all blobs located in each distinct WIM
1126  * (specified by WIMStruct) are together.  */
1127 static void
1128 compute_blob_list_stats(struct list_head *blob_list,
1129                         struct write_blobs_ctx *ctx)
1130 {
1131         struct blob_descriptor *blob;
1132         u64 total_bytes = 0;
1133         u64 num_blobs = 0;
1134         u64 total_parts = 0;
1135         WIMStruct *prev_wim_part = NULL;
1136
1137         list_for_each_entry(blob, blob_list, write_blobs_list) {
1138                 num_blobs++;
1139                 total_bytes += blob->size;
1140                 if (blob->blob_location == BLOB_IN_WIM) {
1141                         if (prev_wim_part != blob->rdesc->wim) {
1142                                 prev_wim_part = blob->rdesc->wim;
1143                                 total_parts++;
1144                         }
1145                 }
1146         }
1147         ctx->progress_data.progress.write_streams.total_bytes       = total_bytes;
1148         ctx->progress_data.progress.write_streams.total_streams     = num_blobs;
1149         ctx->progress_data.progress.write_streams.completed_bytes   = 0;
1150         ctx->progress_data.progress.write_streams.completed_streams = 0;
1151         ctx->progress_data.progress.write_streams.compression_type  = ctx->out_ctype;
1152         ctx->progress_data.progress.write_streams.total_parts       = total_parts;
1153         ctx->progress_data.progress.write_streams.completed_parts   = 0;
1154         ctx->progress_data.next_progress = 0;
1155 }
1156
1157 /* Find blobs in @blob_list that can be copied to the output WIM in raw form
1158  * rather than compressed.  Delete these blobs from @blob_list and move them to
1159  * @raw_copy_blobs.  Return the total uncompressed size of the blobs that need
1160  * to be compressed.  */
1161 static u64
1162 find_raw_copy_blobs(struct list_head *blob_list,
1163                     int write_resource_flags,
1164                     int out_ctype,
1165                     u32 out_chunk_size,
1166                     struct list_head *raw_copy_blobs)
1167 {
1168         struct blob_descriptor *blob, *tmp;
1169         u64 num_bytes_to_compress = 0;
1170
1171         INIT_LIST_HEAD(raw_copy_blobs);
1172
1173         /* Initialize temporary raw_copy_ok flag.  */
1174         list_for_each_entry(blob, blob_list, write_blobs_list)
1175                 if (blob->blob_location == BLOB_IN_WIM)
1176                         blob->rdesc->raw_copy_ok = 0;
1177
1178         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1179                 if (blob->blob_location == BLOB_IN_WIM &&
1180                     blob->rdesc->raw_copy_ok)
1181                 {
1182                         list_move_tail(&blob->write_blobs_list,
1183                                        raw_copy_blobs);
1184                 } else if (can_raw_copy(blob, write_resource_flags,
1185                                         out_ctype, out_chunk_size))
1186                 {
1187                         blob->rdesc->raw_copy_ok = 1;
1188                         list_move_tail(&blob->write_blobs_list,
1189                                        raw_copy_blobs);
1190                 } else {
1191                         num_bytes_to_compress += blob->size;
1192                 }
1193         }
1194
1195         return num_bytes_to_compress;
1196 }
1197
1198 /* Copy a raw compressed resource located in another WIM file to the WIM file
1199  * being written.  */
1200 static int
1201 write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc,
1202                         struct filedes *out_fd)
1203 {
1204         u64 cur_read_offset;
1205         u64 end_read_offset;
1206         u8 buf[BUFFER_SIZE];
1207         size_t bytes_to_read;
1208         int ret;
1209         struct filedes *in_fd;
1210         struct blob_descriptor *blob;
1211         u64 out_offset_in_wim;
1212
1213         /* Copy the raw data.  */
1214         cur_read_offset = in_rdesc->offset_in_wim;
1215         end_read_offset = cur_read_offset + in_rdesc->size_in_wim;
1216
1217         out_offset_in_wim = out_fd->offset;
1218
1219         if (in_rdesc->is_pipable) {
1220                 if (cur_read_offset < sizeof(struct pwm_blob_hdr))
1221                         return WIMLIB_ERR_INVALID_PIPABLE_WIM;
1222                 cur_read_offset -= sizeof(struct pwm_blob_hdr);
1223                 out_offset_in_wim += sizeof(struct pwm_blob_hdr);
1224         }
1225         in_fd = &in_rdesc->wim->in_fd;
1226         wimlib_assert(cur_read_offset != end_read_offset);
1227         do {
1228
1229                 bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
1230
1231                 ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
1232                 if (ret)
1233                         return ret;
1234
1235                 ret = full_write(out_fd, buf, bytes_to_read);
1236                 if (ret)
1237                         return ret;
1238
1239                 cur_read_offset += bytes_to_read;
1240
1241         } while (cur_read_offset != end_read_offset);
1242
1243         list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) {
1244                 if (blob->will_be_in_output_wim) {
1245                         blob_set_out_reshdr_for_reuse(blob);
1246                         if (in_rdesc->flags & WIM_RESHDR_FLAG_SOLID)
1247                                 blob->out_res_offset_in_wim = out_offset_in_wim;
1248                         else
1249                                 blob->out_reshdr.offset_in_wim = out_offset_in_wim;
1250
1251                 }
1252         }
1253         return 0;
1254 }
1255
1256 /* Copy a list of raw compressed resources located in other WIM file(s) to the
1257  * WIM file being written.  */
1258 static int
1259 write_raw_copy_resources(struct list_head *raw_copy_blobs,
1260                          struct filedes *out_fd,
1261                          struct write_blobs_progress_data *progress_data)
1262 {
1263         struct blob_descriptor *blob;
1264         int ret;
1265
1266         list_for_each_entry(blob, raw_copy_blobs, write_blobs_list)
1267                 blob->rdesc->raw_copy_ok = 1;
1268
1269         list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) {
1270                 if (blob->rdesc->raw_copy_ok) {
1271                         /* Write each solid resource only one time.  */
1272                         ret = write_raw_copy_resource(blob->rdesc, out_fd);
1273                         if (ret)
1274                                 return ret;
1275                         blob->rdesc->raw_copy_ok = 0;
1276                 }
1277                 ret = do_write_blobs_progress(progress_data, blob->size,
1278                                               1, false);
1279                 if (ret)
1280                         return ret;
1281         }
1282         return 0;
1283 }
1284
1285 /* Wait for and write all chunks pending in the compressor.  */
1286 static int
1287 finish_remaining_chunks(struct write_blobs_ctx *ctx)
1288 {
1289         const void *cdata;
1290         u32 csize;
1291         u32 usize;
1292         int ret;
1293
1294         if (ctx->compressor == NULL)
1295                 return 0;
1296
1297         if (ctx->cur_chunk_buf_filled != 0) {
1298                 ctx->compressor->signal_chunk_filled(ctx->compressor,
1299                                                      ctx->cur_chunk_buf_filled);
1300         }
1301
1302         while (ctx->compressor->get_compression_result(ctx->compressor, &cdata,
1303                                                        &csize, &usize))
1304         {
1305                 ret = write_chunk(ctx, cdata, csize, usize);
1306                 if (ret)
1307                         return ret;
1308         }
1309         return 0;
1310 }
1311
1312 static void
1313 validate_blob_list(struct list_head *blob_list)
1314 {
1315         struct blob_descriptor *blob;
1316
1317         list_for_each_entry(blob, blob_list, write_blobs_list) {
1318                 wimlib_assert(blob->will_be_in_output_wim);
1319                 wimlib_assert(blob->size != 0);
1320         }
1321 }
1322
1323 static inline bool
1324 blob_is_in_file(const struct blob_descriptor *blob)
1325 {
1326         return blob->blob_location == BLOB_IN_FILE_ON_DISK
1327 #ifdef __WIN32__
1328             || blob->blob_location == BLOB_IN_WINNT_FILE_ON_DISK
1329             || blob->blob_location == BLOB_WIN32_ENCRYPTED
1330 #endif
1331            ;
1332 }
1333
1334 static void
1335 init_done_with_file_info(struct list_head *blob_list)
1336 {
1337         struct blob_descriptor *blob;
1338
1339         list_for_each_entry(blob, blob_list, write_blobs_list) {
1340                 if (blob_is_in_file(blob)) {
1341                         blob->file_inode->i_num_remaining_streams = 0;
1342                         blob->may_send_done_with_file = 1;
1343                 } else {
1344                         blob->may_send_done_with_file = 0;
1345                 }
1346         }
1347
1348         list_for_each_entry(blob, blob_list, write_blobs_list)
1349                 if (blob->may_send_done_with_file)
1350                         blob->file_inode->i_num_remaining_streams++;
1351 }
1352
1353 /*
1354  * Write a list of blobs to the output WIM file.
1355  *
1356  * @blob_list
1357  *      The list of blobs to write, specified by a list of 'struct blob_descriptor' linked
1358  *      by the 'write_blobs_list' member.
1359  *
1360  * @out_fd
1361  *      The file descriptor, opened for writing, to which to write the blobs.
1362  *
1363  * @write_resource_flags
1364  *      Flags to modify how the blobs are written:
1365  *
1366  *      WRITE_RESOURCE_FLAG_RECOMPRESS:
1367  *              Force compression of all resources, even if they could otherwise
1368  *              be re-used by copying the raw data, due to being located in a WIM
1369  *              file with compatible compression parameters.
1370  *
1371  *      WRITE_RESOURCE_FLAG_PIPABLE:
1372  *              Write the resources in the wimlib-specific pipable format, and
1373  *              furthermore do so in such a way that no seeking backwards in
1374  *              @out_fd will be performed (so it may be a pipe).
1375  *
1376  *      WRITE_RESOURCE_FLAG_SOLID:
1377  *              Combine all the blobs into a single resource rather than writing
1378  *              them in separate resources.  This flag is only valid if the WIM
1379  *              version number has been, or will be, set to WIM_VERSION_SOLID.
1380  *              This flag may not be combined with WRITE_RESOURCE_FLAG_PIPABLE.
1381  *
1382  * @out_ctype
1383  *      Compression format to use in the output resources, specified as one of
1384  *      the WIMLIB_COMPRESSION_TYPE_* constants.  WIMLIB_COMPRESSION_TYPE_NONE
1385  *      is allowed.
1386  *
1387  * @out_chunk_size
1388  *      Compression chunk size to use in the output resources.  It must be a
1389  *      valid chunk size for the specified compression format @out_ctype, unless
1390  *      @out_ctype is WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter
1391  *      is ignored.
1392  *
1393  * @num_threads
1394  *      Number of threads to use to compress data.  If 0, a default number of
1395  *      threads will be chosen.  The number of threads still may be decreased
1396  *      from the specified value if insufficient memory is detected.
1397  *
1398  * @blob_table
1399  *      If on-the-fly deduplication of unhashed blobs is desired, this parameter
1400  *      must be pointer to the blob table for the WIMStruct on whose behalf the
1401  *      blobs are being written.  Otherwise, this parameter can be NULL.
1402  *
1403  * @filter_ctx
1404  *      If on-the-fly deduplication of unhashed blobs is desired, this parameter
1405  *      can be a pointer to a context for blob filtering used to detect whether
1406  *      the duplicate blob has been hard-filtered or not.  If no blobs are
1407  *      hard-filtered or no blobs are unhashed, this parameter can be NULL.
1408  *
1409  * This function will write the blobs in @blob_list to resources in
1410  * consecutive positions in the output WIM file, or to a single solid resource
1411  * if WRITE_RESOURCE_FLAG_SOLID was specified in @write_resource_flags.  In both
1412  * cases, the @out_reshdr of the `struct blob_descriptor' for each blob written will be
1413  * updated to specify its location, size, and flags in the output WIM.  In the
1414  * solid resource case, WIM_RESHDR_FLAG_SOLID will be set in the @flags field of
1415  * each @out_reshdr, and furthermore @out_res_offset_in_wim and
1416  * @out_res_size_in_wim of each @out_reshdr will be set to the offset and size,
1417  * respectively, in the output WIM of the solid resource containing the
1418  * corresponding blob.
1419  *
1420  * Each of the blobs to write may be in any location supported by the
1421  * resource-handling code (specifically, read_blob_list()), such as the contents
1422  * of external file that has been logically added to the output WIM, or a blob
1423  * in another WIM file that has been imported, or even a blob in the "same" WIM
1424  * file of which a modified copy is being written.  In the case that a blob is
1425  * already in a WIM file and uses compatible compression parameters, by default
1426  * this function will re-use the raw data instead of decompressing it, then
1427  * recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS
1428  * specified in @write_resource_flags, this is not done.
1429  *
1430  * As a further requirement, this function requires that the
1431  * @will_be_in_output_wim member be set to 1 on all blobs in @blob_list as well
1432  * as any other blobs not in @blob_list that will be in the output WIM file, but
1433  * set to 0 on any other blobs in the output WIM's blob table or sharing a solid
1434  * resource with a blob in @blob_list.  Still furthermore, if on-the-fly
1435  * deduplication of blobs is possible, then all blobs in @blob_list must also be
1436  * linked by @blob_table_list along with any other blobs that have
1437  * @will_be_in_output_wim set.
1438  *
1439  * This function handles on-the-fly deduplication of blobs for which SHA-1
1440  * message digests have not yet been calculated.  Such blobs may or may not need
1441  * to be written.  If @blob_table is non-NULL, then each blob in @blob_list that
1442  * has @unhashed set but not @unique_size set is checksummed immediately before
1443  * it would otherwise be read for writing in order to determine if it is
1444  * identical to another blob already being written or one that would be filtered
1445  * out of the output WIM using blob_filtered() with the context @filter_ctx.
1446  * Each such duplicate blob will be removed from @blob_list, its reference count
1447  * transfered to the pre-existing duplicate blob, its memory freed, and will not
1448  * be written.  Alternatively, if a blob in @blob_list is a duplicate with any
1449  * blob in @blob_table that has not been marked for writing or would not be
1450  * hard-filtered, it is freed and the pre-existing duplicate is written instead,
1451  * taking ownership of the reference count and slot in the @blob_table_list.
1452  *
1453  * Returns 0 if every blob was either written successfully or did not need to be
1454  * written; otherwise returns a non-zero error code.
1455  */
1456 static int
1457 write_blob_list(struct list_head *blob_list,
1458                 struct filedes *out_fd,
1459                 int write_resource_flags,
1460                 int out_ctype,
1461                 u32 out_chunk_size,
1462                 unsigned num_threads,
1463                 struct blob_table *blob_table,
1464                 struct filter_context *filter_ctx,
1465                 wimlib_progress_func_t progfunc,
1466                 void *progctx)
1467 {
1468         int ret;
1469         struct write_blobs_ctx ctx;
1470         struct list_head raw_copy_blobs;
1471
1472         wimlib_assert((write_resource_flags &
1473                        (WRITE_RESOURCE_FLAG_SOLID |
1474                         WRITE_RESOURCE_FLAG_PIPABLE)) !=
1475                                 (WRITE_RESOURCE_FLAG_SOLID |
1476                                  WRITE_RESOURCE_FLAG_PIPABLE));
1477
1478         validate_blob_list(blob_list);
1479
1480         if (list_empty(blob_list))
1481                 return 0;
1482
1483         /* If needed, set auxiliary information so that we can detect when the
1484          * library has finished using each external file.  */
1485         if (unlikely(write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE))
1486                 init_done_with_file_info(blob_list);
1487
1488         memset(&ctx, 0, sizeof(ctx));
1489
1490         ctx.out_fd = out_fd;
1491         ctx.blob_table = blob_table;
1492         ctx.out_ctype = out_ctype;
1493         ctx.out_chunk_size = out_chunk_size;
1494         ctx.write_resource_flags = write_resource_flags;
1495         ctx.filter_ctx = filter_ctx;
1496
1497         /*
1498          * We normally sort the blobs to write by a "sequential" order that is
1499          * optimized for reading.  But when using solid compression, we instead
1500          * sort the blobs by file extension and file name (when applicable; and
1501          * we don't do this for blobs from solid resources) so that similar
1502          * files are grouped together, which improves the compression ratio.
1503          * This is somewhat of a hack since a blob does not necessarily
1504          * correspond one-to-one with a filename, nor is there any guarantee
1505          * that two files with similar names or extensions are actually similar
1506          * in content.  A potential TODO is to sort the blobs based on some
1507          * measure of similarity of their actual contents.
1508          */
1509
1510         ret = sort_blob_list_by_sequential_order(blob_list,
1511                                                  offsetof(struct blob_descriptor,
1512                                                           write_blobs_list));
1513         if (ret)
1514                 return ret;
1515
1516         compute_blob_list_stats(blob_list, &ctx);
1517
1518         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) {
1519                 ret = sort_blob_list_for_solid_compression(blob_list);
1520                 if (unlikely(ret))
1521                         WARNING("Failed to sort blobs for solid compression. Continuing anyways.");
1522         }
1523
1524         ctx.progress_data.progfunc = progfunc;
1525         ctx.progress_data.progctx = progctx;
1526
1527         ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list,
1528                                                         write_resource_flags,
1529                                                         out_ctype,
1530                                                         out_chunk_size,
1531                                                         &raw_copy_blobs);
1532
1533         if (ctx.num_bytes_to_compress == 0)
1534                 goto out_write_raw_copy_resources;
1535
1536         /* Unless uncompressed output was required, allocate a chunk_compressor
1537          * to do compression.  There are serial and parallel implementations of
1538          * the chunk_compressor interface.  We default to parallel using the
1539          * specified number of threads, unless the upper bound on the number
1540          * bytes needing to be compressed is less than a heuristic value.  */
1541         if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
1542
1543         #ifdef ENABLE_MULTITHREADED_COMPRESSION
1544                 if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) {
1545                         ret = new_parallel_chunk_compressor(out_ctype,
1546                                                             out_chunk_size,
1547                                                             num_threads, 0,
1548                                                             &ctx.compressor);
1549                         if (ret > 0) {
1550                                 WARNING("Couldn't create parallel chunk compressor: %"TS".\n"
1551                                         "          Falling back to single-threaded compression.",
1552                                         wimlib_get_error_string(ret));
1553                         }
1554                 }
1555         #endif
1556
1557                 if (ctx.compressor == NULL) {
1558                         ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
1559                                                           &ctx.compressor);
1560                         if (ret)
1561                                 goto out_destroy_context;
1562                 }
1563         }
1564
1565         if (ctx.compressor)
1566                 ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
1567         else
1568                 ctx.progress_data.progress.write_streams.num_threads = 1;
1569
1570         INIT_LIST_HEAD(&ctx.blobs_being_compressed);
1571         INIT_LIST_HEAD(&ctx.blobs_in_solid_resource);
1572
1573         ret = call_progress(ctx.progress_data.progfunc,
1574                             WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1575                             &ctx.progress_data.progress,
1576                             ctx.progress_data.progctx);
1577         if (ret)
1578                 goto out_destroy_context;
1579
1580         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1581                 ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
1582                 if (ret)
1583                         goto out_destroy_context;
1584         }
1585
1586         /* Read the list of blobs needing to be compressed, using the specified
1587          * callbacks to execute processing of the data.  */
1588
1589         struct read_blob_callbacks cbs = {
1590                 .begin_blob     = write_blob_begin_read,
1591                 .consume_chunk  = write_blob_process_chunk,
1592                 .end_blob       = write_blob_end_read,
1593                 .ctx            = &ctx,
1594         };
1595
1596         ret = read_blob_list(blob_list,
1597                              offsetof(struct blob_descriptor, write_blobs_list),
1598                              &cbs,
1599                              BLOB_LIST_ALREADY_SORTED |
1600                                 VERIFY_BLOB_HASHES |
1601                                 COMPUTE_MISSING_BLOB_HASHES);
1602
1603         if (ret)
1604                 goto out_destroy_context;
1605
1606         ret = finish_remaining_chunks(&ctx);
1607         if (ret)
1608                 goto out_destroy_context;
1609
1610         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1611                 struct wim_reshdr reshdr;
1612                 struct blob_descriptor *blob;
1613                 u64 offset_in_res;
1614
1615                 ret = end_write_resource(&ctx, &reshdr);
1616                 if (ret)
1617                         goto out_destroy_context;
1618
1619                 offset_in_res = 0;
1620                 list_for_each_entry(blob, &ctx.blobs_in_solid_resource, write_blobs_list) {
1621                         blob->out_reshdr.size_in_wim = blob->size;
1622                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob) |
1623                                                  WIM_RESHDR_FLAG_SOLID;
1624                         blob->out_reshdr.uncompressed_size = 0;
1625                         blob->out_reshdr.offset_in_wim = offset_in_res;
1626                         blob->out_res_offset_in_wim = reshdr.offset_in_wim;
1627                         blob->out_res_size_in_wim = reshdr.size_in_wim;
1628                         blob->out_res_uncompressed_size = reshdr.uncompressed_size;
1629                         offset_in_res += blob->size;
1630                 }
1631                 wimlib_assert(offset_in_res == reshdr.uncompressed_size);
1632         }
1633
1634 out_write_raw_copy_resources:
1635         /* Copy any compressed resources for which the raw data can be reused
1636          * without decompression.  */
1637         ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd,
1638                                        &ctx.progress_data);
1639
1640 out_destroy_context:
1641         FREE(ctx.chunk_csizes);
1642         if (ctx.compressor)
1643                 ctx.compressor->destroy(ctx.compressor);
1644         return ret;
1645 }
1646
1647
1648 static int
1649 write_file_data_blobs(WIMStruct *wim,
1650                       struct list_head *blob_list,
1651                       int write_flags,
1652                       unsigned num_threads,
1653                       struct filter_context *filter_ctx)
1654 {
1655         int out_ctype;
1656         u32 out_chunk_size;
1657         int write_resource_flags;
1658
1659         write_resource_flags = write_flags_to_resource_flags(write_flags);
1660
1661         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1662                 out_chunk_size = wim->out_solid_chunk_size;
1663                 out_ctype = wim->out_solid_compression_type;
1664         } else {
1665                 out_chunk_size = wim->out_chunk_size;
1666                 out_ctype = wim->out_compression_type;
1667         }
1668
1669         return write_blob_list(blob_list,
1670                                &wim->out_fd,
1671                                write_resource_flags,
1672                                out_ctype,
1673                                out_chunk_size,
1674                                num_threads,
1675                                wim->blob_table,
1676                                filter_ctx,
1677                                wim->progfunc,
1678                                wim->progctx);
1679 }
1680
1681 /* Write the contents of the specified blob as a WIM resource.  */
1682 static int
1683 write_wim_resource(struct blob_descriptor *blob,
1684                    struct filedes *out_fd,
1685                    int out_ctype,
1686                    u32 out_chunk_size,
1687                    int write_resource_flags)
1688 {
1689         LIST_HEAD(blob_list);
1690         list_add(&blob->write_blobs_list, &blob_list);
1691         blob->will_be_in_output_wim = 1;
1692         return write_blob_list(&blob_list,
1693                                out_fd,
1694                                write_resource_flags & ~WRITE_RESOURCE_FLAG_SOLID,
1695                                out_ctype,
1696                                out_chunk_size,
1697                                1,
1698                                NULL,
1699                                NULL,
1700                                NULL,
1701                                NULL);
1702 }
1703
1704 /* Write the contents of the specified buffer as a WIM resource.  */
1705 int
1706 write_wim_resource_from_buffer(const void *buf,
1707                                size_t buf_size,
1708                                bool is_metadata,
1709                                struct filedes *out_fd,
1710                                int out_ctype,
1711                                u32 out_chunk_size,
1712                                struct wim_reshdr *out_reshdr,
1713                                u8 *hash_ret,
1714                                int write_resource_flags)
1715 {
1716         int ret;
1717         struct blob_descriptor blob;
1718
1719         if (unlikely(buf_size == 0)) {
1720                 zero_reshdr(out_reshdr);
1721                 if (hash_ret)
1722                         copy_hash(hash_ret, zero_hash);
1723                 return 0;
1724         }
1725
1726         blob_set_is_located_in_attached_buffer(&blob, (void *)buf, buf_size);
1727         sha1_buffer(buf, buf_size, blob.hash);
1728         blob.unhashed = 0;
1729         blob.is_metadata = is_metadata;
1730
1731         ret = write_wim_resource(&blob, out_fd, out_ctype, out_chunk_size,
1732                                  write_resource_flags);
1733         if (ret)
1734                 return ret;
1735
1736         copy_reshdr(out_reshdr, &blob.out_reshdr);
1737
1738         if (hash_ret)
1739                 copy_hash(hash_ret, blob.hash);
1740         return 0;
1741 }
1742
1743 struct blob_size_table {
1744         struct hlist_head *array;
1745         size_t num_entries;
1746         size_t capacity;
1747 };
1748
1749 static int
1750 init_blob_size_table(struct blob_size_table *tab, size_t capacity)
1751 {
1752         tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1753         if (tab->array == NULL)
1754                 return WIMLIB_ERR_NOMEM;
1755         tab->num_entries = 0;
1756         tab->capacity = capacity;
1757         return 0;
1758 }
1759
1760 static void
1761 destroy_blob_size_table(struct blob_size_table *tab)
1762 {
1763         FREE(tab->array);
1764 }
1765
1766 static int
1767 blob_size_table_insert(struct blob_descriptor *blob, void *_tab)
1768 {
1769         struct blob_size_table *tab = _tab;
1770         size_t pos;
1771         struct blob_descriptor *same_size_blob;
1772
1773         pos = hash_u64(blob->size) % tab->capacity;
1774         blob->unique_size = 1;
1775         hlist_for_each_entry(same_size_blob, &tab->array[pos], hash_list_2) {
1776                 if (same_size_blob->size == blob->size) {
1777                         blob->unique_size = 0;
1778                         same_size_blob->unique_size = 0;
1779                         break;
1780                 }
1781         }
1782
1783         hlist_add_head(&blob->hash_list_2, &tab->array[pos]);
1784         tab->num_entries++;
1785         return 0;
1786 }
1787
1788 struct find_blobs_ctx {
1789         WIMStruct *wim;
1790         int write_flags;
1791         struct list_head blob_list;
1792         struct blob_size_table blob_size_tab;
1793 };
1794
1795 static void
1796 reference_blob_for_write(struct blob_descriptor *blob,
1797                          struct list_head *blob_list, u32 nref)
1798 {
1799         if (!blob->will_be_in_output_wim) {
1800                 blob->out_refcnt = 0;
1801                 list_add_tail(&blob->write_blobs_list, blob_list);
1802                 blob->will_be_in_output_wim = 1;
1803         }
1804         blob->out_refcnt += nref;
1805 }
1806
1807 static int
1808 fully_reference_blob_for_write(struct blob_descriptor *blob, void *_blob_list)
1809 {
1810         struct list_head *blob_list = _blob_list;
1811         blob->will_be_in_output_wim = 0;
1812         reference_blob_for_write(blob, blob_list, blob->refcnt);
1813         return 0;
1814 }
1815
1816 static int
1817 inode_find_blobs_to_reference(const struct wim_inode *inode,
1818                               const struct blob_table *table,
1819                               struct list_head *blob_list)
1820 {
1821         wimlib_assert(inode->i_nlink > 0);
1822
1823         for (unsigned i = 0; i < inode->i_num_streams; i++) {
1824                 struct blob_descriptor *blob;
1825                 const u8 *hash;
1826
1827                 blob = stream_blob(&inode->i_streams[i], table);
1828                 if (blob) {
1829                         reference_blob_for_write(blob, blob_list, inode->i_nlink);
1830                 } else {
1831                         hash = stream_hash(&inode->i_streams[i]);
1832                         if (!is_zero_hash(hash))
1833                                 return blob_not_found_error(inode, hash);
1834                 }
1835         }
1836         return 0;
1837 }
1838
1839 static int
1840 do_blob_set_not_in_output_wim(struct blob_descriptor *blob, void *_ignore)
1841 {
1842         blob->will_be_in_output_wim = 0;
1843         return 0;
1844 }
1845
1846 static int
1847 image_find_blobs_to_reference(WIMStruct *wim)
1848 {
1849         struct wim_image_metadata *imd;
1850         struct wim_inode *inode;
1851         struct blob_descriptor *blob;
1852         struct list_head *blob_list;
1853         int ret;
1854
1855         imd = wim_get_current_image_metadata(wim);
1856
1857         image_for_each_unhashed_blob(blob, imd)
1858                 blob->will_be_in_output_wim = 0;
1859
1860         blob_list = wim->private;
1861         image_for_each_inode(inode, imd) {
1862                 ret = inode_find_blobs_to_reference(inode,
1863                                                     wim->blob_table,
1864                                                     blob_list);
1865                 if (ret)
1866                         return ret;
1867         }
1868         return 0;
1869 }
1870
1871 static int
1872 prepare_unfiltered_list_of_blobs_in_output_wim(WIMStruct *wim,
1873                                                int image,
1874                                                int blobs_ok,
1875                                                struct list_head *blob_list_ret)
1876 {
1877         int ret;
1878
1879         INIT_LIST_HEAD(blob_list_ret);
1880
1881         if (blobs_ok && (image == WIMLIB_ALL_IMAGES ||
1882                          (image == 1 && wim->hdr.image_count == 1)))
1883         {
1884                 /* Fast case:  Assume that all blobs are being written and that
1885                  * the reference counts are correct.  */
1886                 struct blob_descriptor *blob;
1887                 struct wim_image_metadata *imd;
1888                 unsigned i;
1889
1890                 for_blob_in_table(wim->blob_table,
1891                                   fully_reference_blob_for_write,
1892                                   blob_list_ret);
1893
1894                 for (i = 0; i < wim->hdr.image_count; i++) {
1895                         imd = wim->image_metadata[i];
1896                         image_for_each_unhashed_blob(blob, imd)
1897                                 fully_reference_blob_for_write(blob, blob_list_ret);
1898                 }
1899         } else {
1900                 /* Slow case:  Walk through the images being written and
1901                  * determine the blobs referenced.  */
1902                 for_blob_in_table(wim->blob_table,
1903                                   do_blob_set_not_in_output_wim, NULL);
1904                 wim->private = blob_list_ret;
1905                 ret = for_image(wim, image, image_find_blobs_to_reference);
1906                 if (ret)
1907                         return ret;
1908         }
1909
1910         return 0;
1911 }
1912
1913 struct insert_other_if_hard_filtered_ctx {
1914         struct blob_size_table *tab;
1915         struct filter_context *filter_ctx;
1916 };
1917
1918 static int
1919 insert_other_if_hard_filtered(struct blob_descriptor *blob, void *_ctx)
1920 {
1921         struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
1922
1923         if (!blob->will_be_in_output_wim &&
1924             blob_hard_filtered(blob, ctx->filter_ctx))
1925                 blob_size_table_insert(blob, ctx->tab);
1926         return 0;
1927 }
1928
1929 static int
1930 determine_blob_size_uniquity(struct list_head *blob_list,
1931                              struct blob_table *lt,
1932                              struct filter_context *filter_ctx)
1933 {
1934         int ret;
1935         struct blob_size_table tab;
1936         struct blob_descriptor *blob;
1937
1938         ret = init_blob_size_table(&tab, 9001);
1939         if (ret)
1940                 return ret;
1941
1942         if (may_hard_filter_blobs(filter_ctx)) {
1943                 struct insert_other_if_hard_filtered_ctx ctx = {
1944                         .tab = &tab,
1945                         .filter_ctx = filter_ctx,
1946                 };
1947                 for_blob_in_table(lt, insert_other_if_hard_filtered, &ctx);
1948         }
1949
1950         list_for_each_entry(blob, blob_list, write_blobs_list)
1951                 blob_size_table_insert(blob, &tab);
1952
1953         destroy_blob_size_table(&tab);
1954         return 0;
1955 }
1956
1957 static void
1958 filter_blob_list_for_write(struct list_head *blob_list,
1959                            struct filter_context *filter_ctx)
1960 {
1961         struct blob_descriptor *blob, *tmp;
1962
1963         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1964                 int status = blob_filtered(blob, filter_ctx);
1965
1966                 if (status == 0) {
1967                         /* Not filtered.  */
1968                         continue;
1969                 } else {
1970                         if (status > 0) {
1971                                 /* Soft filtered.  */
1972                         } else {
1973                                 /* Hard filtered.  */
1974                                 blob->will_be_in_output_wim = 0;
1975                                 list_del(&blob->blob_table_list);
1976                         }
1977                         list_del(&blob->write_blobs_list);
1978                 }
1979         }
1980 }
1981
1982 /*
1983  * prepare_blob_list_for_write() -
1984  *
1985  * Prepare the list of blobs to write for writing a WIM containing the specified
1986  * image(s) with the specified write flags.
1987  *
1988  * @wim
1989  *      The WIMStruct on whose behalf the write is occurring.
1990  *
1991  * @image
1992  *      Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES.
1993  *
1994  * @write_flags
1995  *      WIMLIB_WRITE_FLAG_* flags for the write operation:
1996  *
1997  *      STREAMS_OK:  For writes of all images, assume that all blobs in the blob
1998  *      table of @wim and the per-image lists of unhashed blobs should be taken
1999  *      as-is, and image metadata should not be searched for references.  This
2000  *      does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below.
2001  *
2002  *      OVERWRITE:  Blobs already present in @wim shall not be returned in
2003  *      @blob_list_ret.
2004  *
2005  *      SKIP_EXTERNAL_WIMS:  Blobs already present in a WIM file, but not @wim,
2006  *      shall be returned in neither @blob_list_ret nor @blob_table_list_ret.
2007  *
2008  * @blob_list_ret
2009  *      List of blobs, linked by write_blobs_list, that need to be written will
2010  *      be returned here.
2011  *
2012  *      Note that this function assumes that unhashed blobs will be written; it
2013  *      does not take into account that they may become duplicates when actually
2014  *      hashed.
2015  *
2016  * @blob_table_list_ret
2017  *      List of blobs, linked by blob_table_list, that need to be included in
2018  *      the WIM's blob table will be returned here.  This will be a superset of
2019  *      the blobs in @blob_list_ret.
2020  *
2021  *      This list will be a proper superset of @blob_list_ret if and only if
2022  *      WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
2023  *      the blobs that would otherwise need to be written were already located
2024  *      in the WIM file.
2025  *
2026  *      All blobs in this list will have @out_refcnt set to the number of
2027  *      references to the blob in the output WIM.  If
2028  *      WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt
2029  *      may be as low as 0.
2030  *
2031  * @filter_ctx_ret
2032  *      A context for queries of blob filter status with blob_filtered() is
2033  *      returned in this location.
2034  *
2035  * In addition, @will_be_in_output_wim will be set to 1 in all blobs inserted
2036  * into @blob_table_list_ret and to 0 in all blobs in the blob table of @wim not
2037  * inserted into @blob_table_list_ret.
2038  *
2039  * Still furthermore, @unique_size will be set to 1 on all blobs in
2040  * @blob_list_ret that have unique size among all blobs in @blob_list_ret and
2041  * among all blobs in the blob table of @wim that are ineligible for being
2042  * written due to filtering.
2043  *
2044  * Returns 0 on success; nonzero on read error, memory allocation error, or
2045  * otherwise.
2046  */
2047 static int
2048 prepare_blob_list_for_write(WIMStruct *wim, int image,
2049                             int write_flags,
2050                             struct list_head *blob_list_ret,
2051                             struct list_head *blob_table_list_ret,
2052                             struct filter_context *filter_ctx_ret)
2053 {
2054         int ret;
2055         struct blob_descriptor *blob;
2056
2057         filter_ctx_ret->write_flags = write_flags;
2058         filter_ctx_ret->wim = wim;
2059
2060         ret = prepare_unfiltered_list_of_blobs_in_output_wim(
2061                                 wim,
2062                                 image,
2063                                 write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK,
2064                                 blob_list_ret);
2065         if (ret)
2066                 return ret;
2067
2068         INIT_LIST_HEAD(blob_table_list_ret);
2069         list_for_each_entry(blob, blob_list_ret, write_blobs_list)
2070                 list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2071
2072         ret = determine_blob_size_uniquity(blob_list_ret, wim->blob_table,
2073                                            filter_ctx_ret);
2074         if (ret)
2075                 return ret;
2076
2077         if (may_filter_blobs(filter_ctx_ret))
2078                 filter_blob_list_for_write(blob_list_ret, filter_ctx_ret);
2079
2080         return 0;
2081 }
2082
2083 static int
2084 write_file_data(WIMStruct *wim, int image, int write_flags,
2085                 unsigned num_threads,
2086                 struct list_head *blob_list_override,
2087                 struct list_head *blob_table_list_ret)
2088 {
2089         int ret;
2090         struct list_head _blob_list;
2091         struct list_head *blob_list;
2092         struct blob_descriptor *blob;
2093         struct filter_context _filter_ctx;
2094         struct filter_context *filter_ctx;
2095
2096         if (blob_list_override == NULL) {
2097                 /* Normal case: prepare blob list from image(s) being written.
2098                  */
2099                 blob_list = &_blob_list;
2100                 filter_ctx = &_filter_ctx;
2101                 ret = prepare_blob_list_for_write(wim, image, write_flags,
2102                                                   blob_list,
2103                                                   blob_table_list_ret,
2104                                                   filter_ctx);
2105                 if (ret)
2106                         return ret;
2107         } else {
2108                 /* Currently only as a result of wimlib_split() being called:
2109                  * use blob list already explicitly provided.  Use existing
2110                  * reference counts.  */
2111                 blob_list = blob_list_override;
2112                 filter_ctx = NULL;
2113                 INIT_LIST_HEAD(blob_table_list_ret);
2114                 list_for_each_entry(blob, blob_list, write_blobs_list) {
2115                         blob->out_refcnt = blob->refcnt;
2116                         blob->will_be_in_output_wim = 1;
2117                         blob->unique_size = 0;
2118                         list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2119                 }
2120         }
2121
2122         return write_file_data_blobs(wim,
2123                                      blob_list,
2124                                      write_flags,
2125                                      num_threads,
2126                                      filter_ctx);
2127 }
2128
2129 static int
2130 write_metadata_resources(WIMStruct *wim, int image, int write_flags)
2131 {
2132         int ret;
2133         int start_image;
2134         int end_image;
2135         int write_resource_flags;
2136
2137         if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
2138                 return 0;
2139
2140         write_resource_flags = write_flags_to_resource_flags(write_flags);
2141
2142         write_resource_flags &= ~WRITE_RESOURCE_FLAG_SOLID;
2143
2144         ret = call_progress(wim->progfunc,
2145                             WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN,
2146                             NULL, wim->progctx);
2147         if (ret)
2148                 return ret;
2149
2150         if (image == WIMLIB_ALL_IMAGES) {
2151                 start_image = 1;
2152                 end_image = wim->hdr.image_count;
2153         } else {
2154                 start_image = image;
2155                 end_image = image;
2156         }
2157
2158         for (int i = start_image; i <= end_image; i++) {
2159                 struct wim_image_metadata *imd;
2160
2161                 imd = wim->image_metadata[i - 1];
2162                 /* Build a new metadata resource only if image was modified from
2163                  * the original (or was newly added).  Otherwise just copy the
2164                  * existing one.  */
2165                 if (imd->modified) {
2166                         ret = write_metadata_resource(wim, i,
2167                                                       write_resource_flags);
2168                 } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2169                         blob_set_out_reshdr_for_reuse(imd->metadata_blob);
2170                         ret = 0;
2171                 } else {
2172                         ret = write_wim_resource(imd->metadata_blob,
2173                                                  &wim->out_fd,
2174                                                  wim->out_compression_type,
2175                                                  wim->out_chunk_size,
2176                                                  write_resource_flags);
2177                 }
2178                 if (ret)
2179                         return ret;
2180         }
2181
2182         return call_progress(wim->progfunc,
2183                              WIMLIB_PROGRESS_MSG_WRITE_METADATA_END,
2184                              NULL, wim->progctx);
2185 }
2186
2187 static int
2188 open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
2189 {
2190         int raw_fd = topen(path, open_flags | O_BINARY, 0644);
2191         if (raw_fd < 0) {
2192                 ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
2193                 return WIMLIB_ERR_OPEN;
2194         }
2195         filedes_init(&wim->out_fd, raw_fd);
2196         return 0;
2197 }
2198
2199 static int
2200 close_wim_writable(WIMStruct *wim, int write_flags)
2201 {
2202         int ret = 0;
2203
2204         if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR))
2205                 if (filedes_valid(&wim->out_fd))
2206                         if (filedes_close(&wim->out_fd))
2207                                 ret = WIMLIB_ERR_WRITE;
2208         filedes_invalidate(&wim->out_fd);
2209         return ret;
2210 }
2211
2212 static int
2213 cmp_blobs_by_out_rdesc(const void *p1, const void *p2)
2214 {
2215         const struct blob_descriptor *blob1, *blob2;
2216
2217         blob1 = *(const struct blob_descriptor**)p1;
2218         blob2 = *(const struct blob_descriptor**)p2;
2219
2220         if (blob1->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2221                 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2222                         if (blob1->out_res_offset_in_wim != blob2->out_res_offset_in_wim)
2223                                 return cmp_u64(blob1->out_res_offset_in_wim,
2224                                                blob2->out_res_offset_in_wim);
2225                 } else {
2226                         return 1;
2227                 }
2228         } else {
2229                 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID)
2230                         return -1;
2231         }
2232         return cmp_u64(blob1->out_reshdr.offset_in_wim,
2233                        blob2->out_reshdr.offset_in_wim);
2234 }
2235
2236 static int
2237 write_blob_table(WIMStruct *wim, int image, int write_flags,
2238                  struct list_head *blob_table_list)
2239 {
2240         int ret;
2241
2242         /* Set output resource metadata for blobs already present in WIM.  */
2243         if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2244                 struct blob_descriptor *blob;
2245                 list_for_each_entry(blob, blob_table_list, blob_table_list) {
2246                         if (blob->blob_location == BLOB_IN_WIM &&
2247                             blob->rdesc->wim == wim)
2248                         {
2249                                 blob_set_out_reshdr_for_reuse(blob);
2250                         }
2251                 }
2252         }
2253
2254         ret = sort_blob_list(blob_table_list,
2255                              offsetof(struct blob_descriptor, blob_table_list),
2256                              cmp_blobs_by_out_rdesc);
2257         if (ret)
2258                 return ret;
2259
2260         /* Add entries for metadata resources.  */
2261         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)) {
2262                 int start_image;
2263                 int end_image;
2264
2265                 if (image == WIMLIB_ALL_IMAGES) {
2266                         start_image = 1;
2267                         end_image = wim->hdr.image_count;
2268                 } else {
2269                         start_image = image;
2270                         end_image = image;
2271                 }
2272
2273                 /* Push metadata blob table entries onto the front of the list
2274                  * in reverse order, so that they're written in order.
2275                  */
2276                 for (int i = end_image; i >= start_image; i--) {
2277                         struct blob_descriptor *metadata_blob;
2278
2279                         metadata_blob = wim->image_metadata[i - 1]->metadata_blob;
2280                         wimlib_assert(metadata_blob->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA);
2281                         metadata_blob->out_refcnt = 1;
2282                         list_add(&metadata_blob->blob_table_list, blob_table_list);
2283                 }
2284         }
2285
2286         return write_blob_table_from_blob_list(blob_table_list,
2287                                                &wim->out_fd,
2288                                                wim->out_hdr.part_number,
2289                                                &wim->out_hdr.blob_table_reshdr,
2290                                                write_flags_to_resource_flags(write_flags));
2291 }
2292
2293 /*
2294  * Finish writing a WIM file: write the blob table, xml data, and integrity
2295  * table, then overwrite the WIM header.
2296  *
2297  * The output file descriptor is closed on success, except when writing to a
2298  * user-specified file descriptor (WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR set).
2299  */
2300 static int
2301 finish_write(WIMStruct *wim, int image, int write_flags,
2302              struct list_head *blob_table_list)
2303 {
2304         int write_resource_flags;
2305         off_t old_blob_table_end = 0;
2306         struct integrity_table *old_integrity_table = NULL;
2307         off_t new_blob_table_end;
2308         u64 xml_totalbytes;
2309         int ret;
2310
2311         write_resource_flags = write_flags_to_resource_flags(write_flags);
2312
2313         /* In the WIM header, there is room for the resource entry for a
2314          * metadata resource labeled as the "boot metadata".  This entry should
2315          * be zeroed out if there is no bootable image (boot_idx 0).  Otherwise,
2316          * it should be a copy of the resource entry for the image that is
2317          * marked as bootable.  */
2318         if (wim->out_hdr.boot_idx == 0) {
2319                 zero_reshdr(&wim->out_hdr.boot_metadata_reshdr);
2320         } else {
2321                 copy_reshdr(&wim->out_hdr.boot_metadata_reshdr,
2322                             &wim->image_metadata[
2323                                 wim->out_hdr.boot_idx - 1]->metadata_blob->out_reshdr);
2324         }
2325
2326         /* If overwriting the WIM file containing an integrity table in-place,
2327          * we'd like to re-use the information in the old integrity table
2328          * instead of recalculating it.  But we might overwrite the old
2329          * integrity table when we expand the XML data.  Read it into memory
2330          * just in case.  */
2331         if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
2332                             WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) ==
2333                 (WIMLIB_WRITE_FLAG_OVERWRITE |
2334                  WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2335             && wim_has_integrity_table(wim))
2336         {
2337                 old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
2338                                      wim->hdr.blob_table_reshdr.size_in_wim;
2339                 (void)read_integrity_table(wim,
2340                                            old_blob_table_end - WIM_HEADER_DISK_SIZE,
2341                                            &old_integrity_table);
2342                 /* If we couldn't read the old integrity table, we can still
2343                  * re-calculate the full integrity table ourselves.  Hence the
2344                  * ignoring of the return value.  */
2345         }
2346
2347         /* Write blob table if needed.  */
2348         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) {
2349                 ret = write_blob_table(wim, image, write_flags,
2350                                        blob_table_list);
2351                 if (ret) {
2352                         free_integrity_table(old_integrity_table);
2353                         return ret;
2354                 }
2355         }
2356
2357         /* Write XML data.  */
2358         xml_totalbytes = wim->out_fd.offset;
2359         if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2360                 xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING;
2361         ret = write_wim_xml_data(wim, image, xml_totalbytes,
2362                                  &wim->out_hdr.xml_data_reshdr,
2363                                  write_resource_flags);
2364         if (ret) {
2365                 free_integrity_table(old_integrity_table);
2366                 return ret;
2367         }
2368
2369         /* Write integrity table if needed.  */
2370         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2371                 if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS) {
2372                         /* The XML data we wrote may have overwritten part of
2373                          * the old integrity table, so while calculating the new
2374                          * integrity table we should temporarily update the WIM
2375                          * header to remove the integrity table reference.   */
2376                         struct wim_header checkpoint_hdr;
2377                         memcpy(&checkpoint_hdr, &wim->out_hdr, sizeof(struct wim_header));
2378                         zero_reshdr(&checkpoint_hdr.integrity_table_reshdr);
2379                         checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2380                         ret = write_wim_header(&checkpoint_hdr, &wim->out_fd, 0);
2381                         if (ret) {
2382                                 free_integrity_table(old_integrity_table);
2383                                 return ret;
2384                         }
2385                 }
2386
2387                 new_blob_table_end = wim->out_hdr.blob_table_reshdr.offset_in_wim +
2388                                      wim->out_hdr.blob_table_reshdr.size_in_wim;
2389
2390                 ret = write_integrity_table(wim,
2391                                             new_blob_table_end,
2392                                             old_blob_table_end,
2393                                             old_integrity_table);
2394                 free_integrity_table(old_integrity_table);
2395                 if (ret)
2396                         return ret;
2397         } else {
2398                 /* No integrity table.  */
2399                 zero_reshdr(&wim->out_hdr.integrity_table_reshdr);
2400         }
2401
2402         /* Now that all information in the WIM header has been determined, the
2403          * preliminary header written earlier can be overwritten, the header of
2404          * the existing WIM file can be overwritten, or the final header can be
2405          * written to the end of the pipable WIM.  */
2406         wim->out_hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2407         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2408                 ret = write_wim_header(&wim->out_hdr, &wim->out_fd, wim->out_fd.offset);
2409         else
2410                 ret = write_wim_header(&wim->out_hdr, &wim->out_fd, 0);
2411         if (ret)
2412                 return ret;
2413
2414         /* Possibly sync file data to disk before closing.  On POSIX systems, it
2415          * is necessary to do this before using rename() to overwrite an
2416          * existing file with a new file.  Otherwise, data loss would occur if
2417          * the system is abruptly terminated when the metadata for the rename
2418          * operation has been written to disk, but the new file data has not.
2419          */
2420         if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
2421                 if (fsync(wim->out_fd.fd)) {
2422                         ERROR_WITH_ERRNO("Error syncing data to WIM file");
2423                         return WIMLIB_ERR_WRITE;
2424                 }
2425         }
2426
2427         if (close_wim_writable(wim, write_flags)) {
2428                 ERROR_WITH_ERRNO("Failed to close the output WIM file");
2429                 return WIMLIB_ERR_WRITE;
2430         }
2431
2432         return 0;
2433 }
2434
2435 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
2436
2437 /* Set advisory lock on WIM file (if not already done so)  */
2438 int
2439 lock_wim_for_append(WIMStruct *wim)
2440 {
2441         if (wim->locked_for_append)
2442                 return 0;
2443         if (!flock(wim->in_fd.fd, LOCK_EX | LOCK_NB)) {
2444                 wim->locked_for_append = 1;
2445                 return 0;
2446         }
2447         if (errno != EWOULDBLOCK)
2448                 return 0;
2449         return WIMLIB_ERR_ALREADY_LOCKED;
2450 }
2451
2452 /* Remove advisory lock on WIM file (if present)  */
2453 void
2454 unlock_wim_for_append(WIMStruct *wim)
2455 {
2456         if (wim->locked_for_append) {
2457                 flock(wim->in_fd.fd, LOCK_UN);
2458                 wim->locked_for_append = 0;
2459         }
2460 }
2461 #endif
2462
2463 /*
2464  * write_pipable_wim():
2465  *
2466  * Perform the intermediate stages of creating a "pipable" WIM (i.e. a WIM
2467  * capable of being applied from a pipe).
2468  *
2469  * Pipable WIMs are a wimlib-specific modification of the WIM format such that
2470  * images can be applied from them sequentially when the file data is sent over
2471  * a pipe.  In addition, a pipable WIM can be written sequentially to a pipe.
2472  * The modifications made to the WIM format for pipable WIMs are:
2473  *
2474  * - Magic characters in header are "WLPWM\0\0\0" (wimlib pipable WIM) instead
2475  *   of "MSWIM\0\0\0".  This lets wimlib know that the WIM is pipable and also
2476  *   stops other software from trying to read the file as a normal WIM.
2477  *
2478  * - The header at the beginning of the file does not contain all the normal
2479  *   information; in particular it will have all 0's for the blob table and XML
2480  *   data resource entries.  This is because this information cannot be
2481  *   determined until the blob table and XML data have been written.
2482  *   Consequently, wimlib will write the full header at the very end of the
2483  *   file.  The header at the end, however, is only used when reading the WIM
2484  *   from a seekable file (not a pipe).
2485  *
2486  * - An extra copy of the XML data is placed directly after the header.  This
2487  *   allows image names and sizes to be determined at an appropriate time when
2488  *   reading the WIM from a pipe.  This copy of the XML data is ignored if the
2489  *   WIM is read from a seekable file (not a pipe).
2490  *
2491  * - Solid resources are not allowed.  Each blob is always stored in its own
2492  *   resource.
2493  *
2494  * - The format of resources, or blobs, has been modified to allow them to be
2495  *   used before the "blob table" has been read.  Each blob is prefixed with a
2496  *   `struct pwm_blob_hdr' that is basically an abbreviated form of `struct
2497  *   blob_descriptor_disk' that only contains the SHA-1 message digest,
2498  *   uncompressed blob size, and flags that indicate whether the blob is
2499  *   compressed.  The data of uncompressed blobs then follows literally, while
2500  *   the data of compressed blobs follows in a modified format.  Compressed
2501  *   blobs do not begin with a chunk table, since the chunk table cannot be
2502  *   written until all chunks have been compressed.  Instead, each compressed
2503  *   chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size.
2504  *   Furthermore, the chunk table is written at the end of the resource instead
2505  *   of the start.  Note: chunk offsets are given in the chunk table as if the
2506  *   `struct pwm_chunk_hdr's were not present; also, the chunk table is only
2507  *   used if the WIM is being read from a seekable file (not a pipe).
2508  *
2509  * - Metadata blobs always come before non-metadata blobs.  (This does not by
2510  *   itself constitute an incompatibility with normal WIMs, since this is valid
2511  *   in normal WIMs.)
2512  *
2513  * - At least up to the end of the blobs, all components must be packed as
2514  *   tightly as possible; there cannot be any "holes" in the WIM.  (This does
2515  *   not by itself consititute an incompatibility with normal WIMs, since this
2516  *   is valid in normal WIMs.)
2517  *
2518  * Note: the blob table, XML data, and header at the end are not used when
2519  * applying from a pipe.  They exist to support functionality such as image
2520  * application and export when the WIM is *not* read from a pipe.
2521  *
2522  *   Layout of pipable WIM:
2523  *
2524  * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2525  * | Header | XML data | Metadata resources | File resources |  Blob table  | XML data  | Header |
2526  * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2527  *
2528  *   Layout of normal WIM:
2529  *
2530  * +--------+-----------------------------+-------------------------+
2531  * | Header | File and metadata resources |  Blob table  | XML data |
2532  * +--------+-----------------------------+-------------------------+
2533  *
2534  * An optional integrity table can follow the final XML data in both normal and
2535  * pipable WIMs.  However, due to implementation details, wimlib currently can
2536  * only include an integrity table in a pipable WIM when writing it to a
2537  * seekable file (not a pipe).
2538  *
2539  * Do note that since pipable WIMs are not supported by Microsoft's software,
2540  * wimlib does not create them unless explicitly requested (with
2541  * WIMLIB_WRITE_FLAG_PIPABLE) and as stated above they use different magic
2542  * characters to identify the file.
2543  */
2544 static int
2545 write_pipable_wim(WIMStruct *wim, int image, int write_flags,
2546                   unsigned num_threads,
2547                   struct list_head *blob_list_override,
2548                   struct list_head *blob_table_list_ret)
2549 {
2550         int ret;
2551         struct wim_reshdr xml_reshdr;
2552
2553         WARNING("Creating a pipable WIM, which will "
2554                 "be incompatible\n"
2555                 "          with Microsoft's software (WIMGAPI/ImageX/DISM).");
2556
2557         /* At this point, the header at the beginning of the file has already
2558          * been written.  */
2559
2560         /* For efficiency, when wimlib adds an image to the WIM with
2561          * wimlib_add_image(), the SHA-1 message digests of files are not
2562          * calculated; instead, they are calculated while the files are being
2563          * written.  However, this does not work when writing a pipable WIM,
2564          * since when writing a blob to a pipable WIM, its SHA-1 message digest
2565          * needs to be known before the blob data is written.  Therefore, before
2566          * getting much farther, we need to pre-calculate the SHA-1 message
2567          * digests of all blobs that will be written.  */
2568         ret = wim_checksum_unhashed_blobs(wim);
2569         if (ret)
2570                 return ret;
2571
2572         /* Write extra copy of the XML data.  */
2573         ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT,
2574                                  &xml_reshdr, WRITE_RESOURCE_FLAG_PIPABLE);
2575         if (ret)
2576                 return ret;
2577
2578         /* Write metadata resources for the image(s) being included in the
2579          * output WIM.  */
2580         ret = write_metadata_resources(wim, image, write_flags);
2581         if (ret)
2582                 return ret;
2583
2584         /* Write file data needed for the image(s) being included in the output
2585          * WIM, or file data needed for the split WIM part.  */
2586         return write_file_data(wim, image, write_flags,
2587                                num_threads, blob_list_override,
2588                                blob_table_list_ret);
2589
2590         /* The blob table, XML data, and header at end are handled by
2591          * finish_write().  */
2592 }
2593
2594 static bool
2595 should_default_to_solid_compression(WIMStruct *wim, int write_flags)
2596 {
2597         return wim->out_hdr.wim_version == WIM_VERSION_SOLID &&
2598                 !(write_flags & (WIMLIB_WRITE_FLAG_SOLID |
2599                                  WIMLIB_WRITE_FLAG_PIPABLE)) &&
2600                 wim_has_solid_resources(wim);
2601 }
2602
2603 /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file
2604  * descriptor.  */
2605 int
2606 write_wim_part(WIMStruct *wim,
2607                const void *path_or_fd,
2608                int image,
2609                int write_flags,
2610                unsigned num_threads,
2611                unsigned part_number,
2612                unsigned total_parts,
2613                struct list_head *blob_list_override,
2614                const u8 *guid)
2615 {
2616         int ret;
2617         struct list_head blob_table_list;
2618
2619         /* Internally, this is always called with a valid part number and total
2620          * parts.  */
2621         wimlib_assert(total_parts >= 1);
2622         wimlib_assert(part_number >= 1 && part_number <= total_parts);
2623
2624         /* A valid image (or all images) must be specified.  */
2625         if (image != WIMLIB_ALL_IMAGES &&
2626              (image < 1 || image > wim->hdr.image_count))
2627                 return WIMLIB_ERR_INVALID_IMAGE;
2628
2629         /* If we need to write metadata resources, make sure the ::WIMStruct has
2630          * the needed information attached (e.g. is not a resource-only WIM,
2631          * such as a non-first part of a split WIM).  */
2632         if (!wim_has_metadata(wim) &&
2633             !(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA))
2634                 return WIMLIB_ERR_METADATA_NOT_FOUND;
2635
2636         /* Check for contradictory flags.  */
2637         if ((write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2638                             WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2639                                 == (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2640                                     WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2641                 return WIMLIB_ERR_INVALID_PARAM;
2642
2643         if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2644                             WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2645                                 == (WIMLIB_WRITE_FLAG_PIPABLE |
2646                                     WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2647                 return WIMLIB_ERR_INVALID_PARAM;
2648
2649         /* Include an integrity table by default if no preference was given and
2650          * the WIM already had an integrity table.  */
2651         if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2652                              WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))) {
2653                 if (wim_has_integrity_table(wim))
2654                         write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2655         }
2656
2657         /* Write a pipable WIM by default if no preference was given and the WIM
2658          * was already pipable.  */
2659         if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2660                              WIMLIB_WRITE_FLAG_NOT_PIPABLE))) {
2661                 if (wim_is_pipable(wim))
2662                         write_flags |= WIMLIB_WRITE_FLAG_PIPABLE;
2663         }
2664
2665         if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2666                             WIMLIB_WRITE_FLAG_SOLID))
2667                                     == (WIMLIB_WRITE_FLAG_PIPABLE |
2668                                         WIMLIB_WRITE_FLAG_SOLID))
2669         {
2670                 ERROR("Solid compression is unsupported in pipable WIMs");
2671                 return WIMLIB_ERR_INVALID_PARAM;
2672         }
2673
2674         /* Start initializing the new file header.  */
2675         memset(&wim->out_hdr, 0, sizeof(wim->out_hdr));
2676
2677         /* Set the magic number.  */
2678         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2679                 wim->out_hdr.magic = PWM_MAGIC;
2680         else
2681                 wim->out_hdr.magic = WIM_MAGIC;
2682
2683         /* Set the version number.  */
2684         if ((write_flags & WIMLIB_WRITE_FLAG_SOLID) ||
2685             wim->out_compression_type == WIMLIB_COMPRESSION_TYPE_LZMS)
2686                 wim->out_hdr.wim_version = WIM_VERSION_SOLID;
2687         else
2688                 wim->out_hdr.wim_version = WIM_VERSION_DEFAULT;
2689
2690         /* Default to solid compression if it is valid in the chosen WIM file
2691          * format and the WIMStruct references any solid resources.  This is
2692          * useful when exporting an image from a solid WIM.  */
2693         if (should_default_to_solid_compression(wim, write_flags))
2694                 write_flags |= WIMLIB_WRITE_FLAG_SOLID;
2695
2696         /* Set the header flags.  */
2697         wim->out_hdr.flags = (wim->hdr.flags & (WIM_HDR_FLAG_RP_FIX |
2698                                                 WIM_HDR_FLAG_READONLY));
2699         if (total_parts != 1)
2700                 wim->out_hdr.flags |= WIM_HDR_FLAG_SPANNED;
2701         if (wim->out_compression_type != WIMLIB_COMPRESSION_TYPE_NONE) {
2702                 wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESSION;
2703                 switch (wim->out_compression_type) {
2704                 case WIMLIB_COMPRESSION_TYPE_XPRESS:
2705                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_XPRESS;
2706                         break;
2707                 case WIMLIB_COMPRESSION_TYPE_LZX:
2708                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_LZX;
2709                         break;
2710                 case WIMLIB_COMPRESSION_TYPE_LZMS:
2711                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_LZMS;
2712                         break;
2713                 }
2714         }
2715
2716         /* Set the chunk size.  */
2717         wim->out_hdr.chunk_size = wim->out_chunk_size;
2718
2719         /* Set the GUID.  */
2720         if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)
2721                 guid = wim->hdr.guid;
2722         if (guid)
2723                 copy_guid(wim->out_hdr.guid, guid);
2724         else
2725                 generate_guid(wim->out_hdr.guid);
2726
2727         /* Set the part number and total parts.  */
2728         wim->out_hdr.part_number = part_number;
2729         wim->out_hdr.total_parts = total_parts;
2730
2731         /* Set the image count.  */
2732         if (image == WIMLIB_ALL_IMAGES)
2733                 wim->out_hdr.image_count = wim->hdr.image_count;
2734         else
2735                 wim->out_hdr.image_count = 1;
2736
2737         /* Set the boot index.  */
2738         wim->out_hdr.boot_idx = 0;
2739         if (total_parts == 1) {
2740                 if (image == WIMLIB_ALL_IMAGES)
2741                         wim->out_hdr.boot_idx = wim->hdr.boot_idx;
2742                 else if (image == wim->hdr.boot_idx)
2743                         wim->out_hdr.boot_idx = 1;
2744         }
2745
2746         /* Set up the output file descriptor.  */
2747         if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) {
2748                 /* File descriptor was explicitly provided.  */
2749                 filedes_init(&wim->out_fd, *(const int *)path_or_fd);
2750                 if (!filedes_is_seekable(&wim->out_fd)) {
2751                         /* The file descriptor is a pipe.  */
2752                         ret = WIMLIB_ERR_INVALID_PARAM;
2753                         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2754                                 goto out_cleanup;
2755                         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2756                                 ERROR("Can't include integrity check when "
2757                                       "writing pipable WIM to pipe!");
2758                                 goto out_cleanup;
2759                         }
2760                 }
2761         } else {
2762                 /* Filename of WIM to write was provided; open file descriptor
2763                  * to it.  */
2764                 ret = open_wim_writable(wim, (const tchar*)path_or_fd,
2765                                         O_TRUNC | O_CREAT | O_RDWR);
2766                 if (ret)
2767                         goto out_cleanup;
2768         }
2769
2770         /* Write initial header.  This is merely a "dummy" header since it
2771          * doesn't have resource entries filled in yet, so it will be
2772          * overwritten later (unless writing a pipable WIM).  */
2773         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2774                 wim->out_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2775         ret = write_wim_header(&wim->out_hdr, &wim->out_fd, wim->out_fd.offset);
2776         wim->out_hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2777         if (ret)
2778                 goto out_cleanup;
2779
2780         /* Write file data and metadata resources.  */
2781         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) {
2782                 /* Default case: create a normal (non-pipable) WIM.  */
2783                 ret = write_file_data(wim, image, write_flags,
2784                                       num_threads,
2785                                       blob_list_override,
2786                                       &blob_table_list);
2787                 if (ret)
2788                         goto out_cleanup;
2789
2790                 ret = write_metadata_resources(wim, image, write_flags);
2791                 if (ret)
2792                         goto out_cleanup;
2793         } else {
2794                 /* Non-default case: create pipable WIM.  */
2795                 ret = write_pipable_wim(wim, image, write_flags, num_threads,
2796                                         blob_list_override,
2797                                         &blob_table_list);
2798                 if (ret)
2799                         goto out_cleanup;
2800         }
2801
2802         /* Write blob table, XML data, and (optional) integrity table.  */
2803         ret = finish_write(wim, image, write_flags, &blob_table_list);
2804 out_cleanup:
2805         (void)close_wim_writable(wim, write_flags);
2806         return ret;
2807 }
2808
2809 /* Write a standalone WIM to a file or file descriptor.  */
2810 static int
2811 write_standalone_wim(WIMStruct *wim, const void *path_or_fd,
2812                      int image, int write_flags, unsigned num_threads)
2813 {
2814         return write_wim_part(wim, path_or_fd, image, write_flags,
2815                               num_threads, 1, 1, NULL, NULL);
2816 }
2817
2818 /* API function documented in wimlib.h  */
2819 WIMLIBAPI int
2820 wimlib_write(WIMStruct *wim, const tchar *path,
2821              int image, int write_flags, unsigned num_threads)
2822 {
2823         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
2824                 return WIMLIB_ERR_INVALID_PARAM;
2825
2826         if (path == NULL || path[0] == T('\0'))
2827                 return WIMLIB_ERR_INVALID_PARAM;
2828
2829         return write_standalone_wim(wim, path, image, write_flags, num_threads);
2830 }
2831
2832 /* API function documented in wimlib.h  */
2833 WIMLIBAPI int
2834 wimlib_write_to_fd(WIMStruct *wim, int fd,
2835                    int image, int write_flags, unsigned num_threads)
2836 {
2837         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
2838                 return WIMLIB_ERR_INVALID_PARAM;
2839
2840         if (fd < 0)
2841                 return WIMLIB_ERR_INVALID_PARAM;
2842
2843         write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR;
2844
2845         return write_standalone_wim(wim, &fd, image, write_flags, num_threads);
2846 }
2847
2848 static bool
2849 any_images_modified(WIMStruct *wim)
2850 {
2851         for (int i = 0; i < wim->hdr.image_count; i++)
2852                 if (wim->image_metadata[i]->modified)
2853                         return true;
2854         return false;
2855 }
2856
2857 static int
2858 check_resource_offset(struct blob_descriptor *blob, void *_wim)
2859 {
2860         const WIMStruct *wim = _wim;
2861         off_t end_offset = *(const off_t*)wim->private;
2862
2863         if (blob->blob_location == BLOB_IN_WIM &&
2864             blob->rdesc->wim == wim &&
2865             blob->rdesc->offset_in_wim + blob->rdesc->size_in_wim > end_offset)
2866                 return WIMLIB_ERR_RESOURCE_ORDER;
2867         return 0;
2868 }
2869
2870 /* Make sure no file or metadata resources are located after the XML data (or
2871  * integrity table if present)--- otherwise we can't safely overwrite the WIM in
2872  * place and we return WIMLIB_ERR_RESOURCE_ORDER.  */
2873 static int
2874 check_resource_offsets(WIMStruct *wim, off_t end_offset)
2875 {
2876         int ret;
2877         unsigned i;
2878
2879         wim->private = &end_offset;
2880         ret = for_blob_in_table(wim->blob_table, check_resource_offset, wim);
2881         if (ret)
2882                 return ret;
2883
2884         for (i = 0; i < wim->hdr.image_count; i++) {
2885                 ret = check_resource_offset(wim->image_metadata[i]->metadata_blob, wim);
2886                 if (ret)
2887                         return ret;
2888         }
2889         return 0;
2890 }
2891
2892 /*
2893  * Overwrite a WIM, possibly appending new resources to it.
2894  *
2895  * A WIM looks like (or is supposed to look like) the following:
2896  *
2897  *                   Header (212 bytes)
2898  *                   Resources for metadata and files (variable size)
2899  *                   Blob table (variable size)
2900  *                   XML data (variable size)
2901  *                   Integrity table (optional) (variable size)
2902  *
2903  * If we are not adding any new files or metadata, then the blob table is
2904  * unchanged--- so we only need to overwrite the XML data, integrity table, and
2905  * header.  This operation is potentially unsafe if the program is abruptly
2906  * terminated while the XML data or integrity table are being overwritten, but
2907  * before the new header has been written.  To partially alleviate this problem,
2908  * we write a temporary header after the XML data has been written.  This may
2909  * prevent the WIM from becoming corrupted if the program is terminated while
2910  * the integrity table is being calculated (but no guarantees, due to write
2911  * re-ordering...).
2912  *
2913  * If we are adding new blobs, including new file data as well as any metadata
2914  * for any new images, then the blob table needs to be changed, and those blobs
2915  * need to be written.  In this case, we try to perform a safe update of the WIM
2916  * file by writing the blobs *after* the end of the previous WIM, then writing
2917  * the new blob table, XML data, and (optionally) integrity table following the
2918  * new blobs.  This will produce a layout like the following:
2919  *
2920  *                   Header (212 bytes)
2921  *                   (OLD) Resources for metadata and files (variable size)
2922  *                   (OLD) Blob table (variable size)
2923  *                   (OLD) XML data (variable size)
2924  *                   (OLD) Integrity table (optional) (variable size)
2925  *                   (NEW) Resources for metadata and files (variable size)
2926  *                   (NEW) Blob table (variable size)
2927  *                   (NEW) XML data (variable size)
2928  *                   (NEW) Integrity table (optional) (variable size)
2929  *
2930  * At all points, the WIM is valid as nothing points to the new data yet.  Then,
2931  * the header is overwritten to point to the new blob table, XML data, and
2932  * integrity table, to produce the following layout:
2933  *
2934  *                   Header (212 bytes)
2935  *                   Resources for metadata and files (variable size)
2936  *                   Nothing (variable size)
2937  *                   Resources for metadata and files (variable size)
2938  *                   Blob table (variable size)
2939  *                   XML data (variable size)
2940  *                   Integrity table (optional) (variable size)
2941  *
2942  * This method allows an image to be appended to a large WIM very quickly, and
2943  * is crash-safe except in the case of write re-ordering, but the disadvantage
2944  * is that a small hole is left in the WIM where the old blob table, xml data,
2945  * and integrity table were.  (These usually only take up a small amount of
2946  * space compared to the blobs, however.)
2947  */
2948 static int
2949 overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads)
2950 {
2951         int ret;
2952         off_t old_wim_end;
2953         u64 old_blob_table_end, old_xml_begin, old_xml_end;
2954         struct list_head blob_list;
2955         struct list_head blob_table_list;
2956         struct filter_context filter_ctx;
2957
2958         /* Include an integrity table by default if no preference was given and
2959          * the WIM already had an integrity table.  */
2960         if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2961                              WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2962                 if (wim_has_integrity_table(wim))
2963                         write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2964
2965         /* Start preparing the updated file header.  */
2966         memcpy(&wim->out_hdr, &wim->hdr, sizeof(wim->out_hdr));
2967
2968         /* If using solid compression, the version number must be set to
2969          * WIM_VERSION_SOLID.  */
2970         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
2971                 wim->out_hdr.wim_version = WIM_VERSION_SOLID;
2972
2973         /* Default to solid compression if it is valid in the chosen WIM file
2974          * format and the WIMStruct references any solid resources.  This is
2975          * useful when updating a solid WIM.  */
2976         if (should_default_to_solid_compression(wim, write_flags))
2977                 write_flags |= WIMLIB_WRITE_FLAG_SOLID;
2978
2979         /* Set additional flags for overwrite.  */
2980         write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
2981                        WIMLIB_WRITE_FLAG_STREAMS_OK;
2982
2983         /* Make sure there is no data after the XML data, except possibily an
2984          * integrity table.  If this were the case, then this data would be
2985          * overwritten.  */
2986         old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
2987         old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
2988         old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
2989                              wim->hdr.blob_table_reshdr.size_in_wim;
2990         if (wim_has_integrity_table(wim) &&
2991             wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
2992                 WARNING("Didn't expect the integrity table to be before the XML data");
2993                 ret = WIMLIB_ERR_RESOURCE_ORDER;
2994                 goto out;
2995         }
2996
2997         if (old_blob_table_end > old_xml_begin) {
2998                 WARNING("Didn't expect the blob table to be after the XML data");
2999                 ret = WIMLIB_ERR_RESOURCE_ORDER;
3000                 goto out;
3001         }
3002
3003         /* Set @old_wim_end, which indicates the point beyond which we don't
3004          * allow any file and metadata resources to appear without returning
3005          * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
3006          * overwrite these resources). */
3007         if (!wim->image_deletion_occurred && !any_images_modified(wim)) {
3008                 /* If no images have been modified and no images have been
3009                  * deleted, a new blob table does not need to be written.  We
3010                  * shall write the new XML data and optional integrity table
3011                  * immediately after the blob table.  Note that this may
3012                  * overwrite an existing integrity table. */
3013                 old_wim_end = old_blob_table_end;
3014                 write_flags |= WIMLIB_WRITE_FLAG_NO_NEW_BLOBS;
3015         } else if (wim_has_integrity_table(wim)) {
3016                 /* Old WIM has an integrity table; begin writing new blobs after
3017                  * it. */
3018                 old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
3019                               wim->hdr.integrity_table_reshdr.size_in_wim;
3020         } else {
3021                 /* No existing integrity table; begin writing new blobs after
3022                  * the old XML data. */
3023                 old_wim_end = old_xml_end;
3024         }
3025
3026         ret = check_resource_offsets(wim, old_wim_end);
3027         if (ret)
3028                 goto out;
3029
3030         ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3031                                           &blob_list, &blob_table_list,
3032                                           &filter_ctx);
3033         if (ret)
3034                 goto out;
3035
3036         if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)
3037                 wimlib_assert(list_empty(&blob_list));
3038
3039         ret = open_wim_writable(wim, wim->filename, O_RDWR);
3040         if (ret)
3041                 goto out;
3042
3043         ret = lock_wim_for_append(wim);
3044         if (ret)
3045                 goto out_close_wim;
3046
3047         /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */
3048         wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3049         ret = write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3050         wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3051         if (ret) {
3052                 ERROR_WITH_ERRNO("Error updating WIM header flags");
3053                 goto out_unlock_wim;
3054         }
3055
3056         if (filedes_seek(&wim->out_fd, old_wim_end) == -1) {
3057                 ERROR_WITH_ERRNO("Can't seek to end of WIM");
3058                 ret = WIMLIB_ERR_WRITE;
3059                 goto out_restore_hdr;
3060         }
3061
3062         ret = write_file_data_blobs(wim, &blob_list, write_flags,
3063                                     num_threads, &filter_ctx);
3064         if (ret)
3065                 goto out_truncate;
3066
3067         ret = write_metadata_resources(wim, WIMLIB_ALL_IMAGES, write_flags);
3068         if (ret)
3069                 goto out_truncate;
3070
3071         ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3072                            &blob_table_list);
3073         if (ret)
3074                 goto out_truncate;
3075
3076         unlock_wim_for_append(wim);
3077         return 0;
3078
3079 out_truncate:
3080         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) {
3081                 WARNING("Truncating \"%"TS"\" to its original size "
3082                         "(%"PRIu64" bytes)", wim->filename, old_wim_end);
3083                 /* Return value of ftruncate() is ignored because this is
3084                  * already an error path.  */
3085                 (void)ftruncate(wim->out_fd.fd, old_wim_end);
3086         }
3087 out_restore_hdr:
3088         (void)write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3089 out_unlock_wim:
3090         unlock_wim_for_append(wim);
3091 out_close_wim:
3092         (void)close_wim_writable(wim, write_flags);
3093 out:
3094         return ret;
3095 }
3096
3097 static int
3098 overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, unsigned num_threads)
3099 {
3100         size_t wim_name_len;
3101         int ret;
3102
3103         /* Write the WIM to a temporary file in the same directory as the
3104          * original WIM. */
3105         wim_name_len = tstrlen(wim->filename);
3106         tchar tmpfile[wim_name_len + 10];
3107         tmemcpy(tmpfile, wim->filename, wim_name_len);
3108         randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
3109         tmpfile[wim_name_len + 9] = T('\0');
3110
3111         ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
3112                            write_flags |
3113                                 WIMLIB_WRITE_FLAG_FSYNC |
3114                                 WIMLIB_WRITE_FLAG_RETAIN_GUID,
3115                            num_threads);
3116         if (ret) {
3117                 tunlink(tmpfile);
3118                 return ret;
3119         }
3120
3121         if (filedes_valid(&wim->in_fd)) {
3122                 filedes_close(&wim->in_fd);
3123                 filedes_invalidate(&wim->in_fd);
3124         }
3125
3126         /* Rename the new WIM file to the original WIM file.  Note: on Windows
3127          * this actually calls win32_rename_replacement(), not _wrename(), so
3128          * that removing the existing destination file can be handled.  */
3129         ret = trename(tmpfile, wim->filename);
3130         if (ret) {
3131                 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
3132                                  tmpfile, wim->filename);
3133         #ifdef __WIN32__
3134                 if (ret < 0)
3135         #endif
3136                 {
3137                         tunlink(tmpfile);
3138                 }
3139                 return WIMLIB_ERR_RENAME;
3140         }
3141
3142         union wimlib_progress_info progress;
3143         progress.rename.from = tmpfile;
3144         progress.rename.to = wim->filename;
3145         return call_progress(wim->progfunc, WIMLIB_PROGRESS_MSG_RENAME,
3146                              &progress, wim->progctx);
3147 }
3148
3149 /* Determine if the specified WIM file may be updated by appending in-place
3150  * rather than writing and replacing it with an entirely new file.  */
3151 static bool
3152 can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags)
3153 {
3154         /* REBUILD flag forces full rebuild.  */
3155         if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
3156                 return false;
3157
3158         /* Image deletions cause full rebuild by default.  */
3159         if (wim->image_deletion_occurred &&
3160             !(write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
3161                 return false;
3162
3163         /* Pipable WIMs cannot be updated in place, nor can a non-pipable WIM be
3164          * turned into a pipable WIM in-place.  */
3165         if (wim_is_pipable(wim) || (write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
3166                 return false;
3167
3168         /* The default compression type and compression chunk size selected for
3169          * the output WIM must be the same as those currently used for the WIM.
3170          */
3171         if (wim->compression_type != wim->out_compression_type)
3172                 return false;
3173         if (wim->chunk_size != wim->out_chunk_size)
3174                 return false;
3175
3176         return true;
3177 }
3178
3179 /* API function documented in wimlib.h  */
3180 WIMLIBAPI int
3181 wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads)
3182 {
3183         int ret;
3184         u32 orig_hdr_flags;
3185
3186         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3187                 return WIMLIB_ERR_INVALID_PARAM;
3188
3189         if (!wim->filename)
3190                 return WIMLIB_ERR_NO_FILENAME;
3191
3192         orig_hdr_flags = wim->hdr.flags;
3193         if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
3194                 wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
3195         ret = can_modify_wim(wim);
3196         wim->hdr.flags = orig_hdr_flags;
3197         if (ret)
3198                 return ret;
3199
3200         if (can_overwrite_wim_inplace(wim, write_flags)) {
3201                 ret = overwrite_wim_inplace(wim, write_flags, num_threads);
3202                 if (ret != WIMLIB_ERR_RESOURCE_ORDER)
3203                         return ret;
3204                 WARNING("Falling back to re-building entire WIM");
3205         }
3206         return overwrite_wim_via_tmpfile(wim, write_flags, num_threads);
3207 }