]> wimlib.net Git - wimlib/blob - src/write.c
write.c: handle empty buffers in write_wim_resource_from_buffer()
[wimlib] / src / write.c
1 /*
2  * write.c
3  *
4  * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5  * compressed file resources, etc.
6  */
7
8 /*
9  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
10  *
11  * This file is free software; you can redistribute it and/or modify it under
12  * the terms of the GNU Lesser General Public License as published by the Free
13  * Software Foundation; either version 3 of the License, or (at your option) any
14  * later version.
15  *
16  * This file is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
19  * details.
20  *
21  * You should have received a copy of the GNU Lesser General Public License
22  * along with this file; if not, see http://www.gnu.org/licenses/.
23  */
24
25 #ifdef HAVE_CONFIG_H
26 #  include "config.h"
27 #endif
28
29 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
30 /* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
31  * overwrite the LIST_HEAD macro. */
32 #  include <sys/file.h>
33 #endif
34
35 #include <errno.h>
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39
40 #include "wimlib/alloca.h"
41 #include "wimlib/assert.h"
42 #include "wimlib/blob_table.h"
43 #include "wimlib/chunk_compressor.h"
44 #include "wimlib/endianness.h"
45 #include "wimlib/error.h"
46 #include "wimlib/file_io.h"
47 #include "wimlib/header.h"
48 #include "wimlib/inode.h"
49 #include "wimlib/integrity.h"
50 #include "wimlib/metadata.h"
51 #include "wimlib/paths.h"
52 #include "wimlib/progress.h"
53 #include "wimlib/resource.h"
54 #include "wimlib/solid.h"
55 #include "wimlib/win32.h" /* win32_rename_replacement() */
56 #include "wimlib/write.h"
57 #include "wimlib/xml.h"
58
59
60 /* wimlib internal flags used when writing resources.  */
61 #define WRITE_RESOURCE_FLAG_RECOMPRESS          0x00000001
62 #define WRITE_RESOURCE_FLAG_PIPABLE             0x00000002
63 #define WRITE_RESOURCE_FLAG_SOLID               0x00000004
64 #define WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE 0x00000008
65 #define WRITE_RESOURCE_FLAG_SOLID_SORT          0x00000010
66
67 static int
68 write_flags_to_resource_flags(int write_flags)
69 {
70         int write_resource_flags = 0;
71
72         if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
73                 write_resource_flags |= WRITE_RESOURCE_FLAG_RECOMPRESS;
74
75         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
76                 write_resource_flags |= WRITE_RESOURCE_FLAG_PIPABLE;
77
78         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
79                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
80
81         if (write_flags & WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES)
82                 write_resource_flags |= WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE;
83
84         if ((write_flags & (WIMLIB_WRITE_FLAG_SOLID |
85                             WIMLIB_WRITE_FLAG_NO_SOLID_SORT)) ==
86             WIMLIB_WRITE_FLAG_SOLID)
87                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID_SORT;
88
89         return write_resource_flags;
90 }
91
92 struct filter_context {
93         int write_flags;
94         WIMStruct *wim;
95 };
96
97 /*
98  * Determine whether the specified blob should be filtered out from the write.
99  *
100  * Return values:
101  *
102  *  < 0 : The blob should be hard-filtered; that is, not included in the output
103  *        WIM file at all.
104  *    0 : The blob should not be filtered out.
105  *  > 0 : The blob should be soft-filtered; that is, it already exists in the
106  *        WIM file and may not need to be written again.
107  */
108 static int
109 blob_filtered(const struct blob_descriptor *blob,
110               const struct filter_context *ctx)
111 {
112         int write_flags;
113         WIMStruct *wim;
114
115         if (ctx == NULL)
116                 return 0;
117
118         write_flags = ctx->write_flags;
119         wim = ctx->wim;
120
121         if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE &&
122             blob->blob_location == BLOB_IN_WIM &&
123             blob->rdesc->wim == wim)
124                 return 1;
125
126         if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS &&
127             blob->blob_location == BLOB_IN_WIM &&
128             blob->rdesc->wim != wim)
129                 return -1;
130
131         return 0;
132 }
133
134 static bool
135 blob_hard_filtered(const struct blob_descriptor *blob,
136                    struct filter_context *ctx)
137 {
138         return blob_filtered(blob, ctx) < 0;
139 }
140
141 static inline int
142 may_soft_filter_blobs(const struct filter_context *ctx)
143 {
144         if (ctx == NULL)
145                 return 0;
146         return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE;
147 }
148
149 static inline int
150 may_hard_filter_blobs(const struct filter_context *ctx)
151 {
152         if (ctx == NULL)
153                 return 0;
154         return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS;
155 }
156
157 static inline int
158 may_filter_blobs(const struct filter_context *ctx)
159 {
160         return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx));
161 }
162
163 /* Return true if the specified resource is compressed and the compressed data
164  * can be reused with the specified output parameters.  */
165 static bool
166 can_raw_copy(const struct blob_descriptor *blob,
167              int write_resource_flags, int out_ctype, u32 out_chunk_size)
168 {
169         const struct wim_resource_descriptor *rdesc;
170
171         if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS)
172                 return false;
173
174         if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
175                 return false;
176
177         if (blob->blob_location != BLOB_IN_WIM)
178                 return false;
179
180         rdesc = blob->rdesc;
181
182         if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
183                 return false;
184
185         if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
186                 /* Normal compressed resource: Must use same compression type
187                  * and chunk size.  */
188                 return (rdesc->compression_type == out_ctype &&
189                         rdesc->chunk_size == out_chunk_size);
190         }
191
192         if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) &&
193             (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
194         {
195                 /* Solid resource: Such resources may contain multiple blobs,
196                  * and in general only a subset of them need to be written.  As
197                  * a heuristic, re-use the raw data if more than two-thirds the
198                  * uncompressed size is being written.  */
199
200                 /* Note: solid resources contain a header that specifies the
201                  * compression type and chunk size; therefore we don't need to
202                  * check if they are compatible with @out_ctype and
203                  * @out_chunk_size.  */
204
205                 struct blob_descriptor *res_blob;
206                 u64 write_size = 0;
207
208                 list_for_each_entry(res_blob, &rdesc->blob_list, rdesc_node)
209                         if (res_blob->will_be_in_output_wim)
210                                 write_size += res_blob->size;
211
212                 return (write_size > rdesc->uncompressed_size * 2 / 3);
213         }
214
215         return false;
216 }
217
218 static u32
219 reshdr_flags_for_blob(const struct blob_descriptor *blob)
220 {
221         u32 reshdr_flags = 0;
222         if (blob->is_metadata)
223                 reshdr_flags |= WIM_RESHDR_FLAG_METADATA;
224         return reshdr_flags;
225 }
226
227 static void
228 blob_set_out_reshdr_for_reuse(struct blob_descriptor *blob)
229 {
230         const struct wim_resource_descriptor *rdesc;
231
232         wimlib_assert(blob->blob_location == BLOB_IN_WIM);
233         rdesc = blob->rdesc;
234
235         if (rdesc->flags & WIM_RESHDR_FLAG_SOLID) {
236                 blob->out_reshdr.offset_in_wim = blob->offset_in_res;
237                 blob->out_reshdr.uncompressed_size = 0;
238                 blob->out_reshdr.size_in_wim = blob->size;
239
240                 blob->out_res_offset_in_wim = rdesc->offset_in_wim;
241                 blob->out_res_size_in_wim = rdesc->size_in_wim;
242                 blob->out_res_uncompressed_size = rdesc->uncompressed_size;
243         } else {
244                 blob->out_reshdr.offset_in_wim = rdesc->offset_in_wim;
245                 blob->out_reshdr.uncompressed_size = rdesc->uncompressed_size;
246                 blob->out_reshdr.size_in_wim = rdesc->size_in_wim;
247         }
248         blob->out_reshdr.flags = rdesc->flags;
249 }
250
251
252 /* Write the header for a blob in a pipable WIM.  */
253 static int
254 write_pwm_blob_header(const struct blob_descriptor *blob,
255                       struct filedes *out_fd, bool compressed)
256 {
257         struct pwm_blob_hdr blob_hdr;
258         u32 reshdr_flags;
259         int ret;
260
261         wimlib_assert(!blob->unhashed);
262
263         blob_hdr.magic = cpu_to_le64(PWM_BLOB_MAGIC);
264         blob_hdr.uncompressed_size = cpu_to_le64(blob->size);
265         copy_hash(blob_hdr.hash, blob->hash);
266         reshdr_flags = reshdr_flags_for_blob(blob);
267         if (compressed)
268                 reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
269         blob_hdr.flags = cpu_to_le32(reshdr_flags);
270         ret = full_write(out_fd, &blob_hdr, sizeof(blob_hdr));
271         if (ret)
272                 ERROR_WITH_ERRNO("Write error");
273         return ret;
274 }
275
276 struct write_blobs_progress_data {
277         wimlib_progress_func_t progfunc;
278         void *progctx;
279         union wimlib_progress_info progress;
280         u64 next_progress;
281 };
282
283 static int
284 do_write_blobs_progress(struct write_blobs_progress_data *progress_data,
285                         u64 complete_size, u32 complete_count, bool discarded)
286 {
287         union wimlib_progress_info *progress = &progress_data->progress;
288         int ret;
289
290         if (discarded) {
291                 progress->write_streams.total_bytes -= complete_size;
292                 progress->write_streams.total_streams -= complete_count;
293                 if (progress_data->next_progress != ~(u64)0 &&
294                     progress_data->next_progress > progress->write_streams.total_bytes)
295                 {
296                         progress_data->next_progress = progress->write_streams.total_bytes;
297                 }
298         } else {
299                 progress->write_streams.completed_bytes += complete_size;
300                 progress->write_streams.completed_streams += complete_count;
301         }
302
303         if (progress->write_streams.completed_bytes >= progress_data->next_progress)
304         {
305                 ret = call_progress(progress_data->progfunc,
306                                     WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
307                                     progress,
308                                     progress_data->progctx);
309                 if (ret)
310                         return ret;
311
312                 if (progress_data->next_progress == progress->write_streams.total_bytes) {
313                         progress_data->next_progress = ~(u64)0;
314                 } else {
315                         /* Handle rate-limiting of messages  */
316
317                         /* Send new message as soon as another 1/128 of the
318                          * total has been written.  (Arbitrary number.)  */
319                         progress_data->next_progress =
320                                 progress->write_streams.completed_bytes +
321                                         progress->write_streams.total_bytes / 128;
322
323                         /* ... Unless that would be more than 5000000 bytes, in
324                          * which case send the next after the next 5000000
325                          * bytes.  (Another arbitrary number.)  */
326                         if (progress->write_streams.completed_bytes + 5000000 <
327                             progress_data->next_progress)
328                                 progress_data->next_progress =
329                                         progress->write_streams.completed_bytes + 5000000;
330
331                         /* ... But always send a message as soon as we're
332                          * completely done.  */
333                         if (progress->write_streams.total_bytes <
334                             progress_data->next_progress)
335                                 progress_data->next_progress =
336                                         progress->write_streams.total_bytes;
337                 }
338         }
339         return 0;
340 }
341
342 struct write_blobs_ctx {
343         /* File descriptor to which the blobs are being written.  */
344         struct filedes *out_fd;
345
346         /* Blob table for the WIMStruct on whose behalf the blobs are being
347          * written.  */
348         struct blob_table *blob_table;
349
350         /* Compression format to use.  */
351         int out_ctype;
352
353         /* Maximum uncompressed chunk size in compressed resources to use.  */
354         u32 out_chunk_size;
355
356         /* Flags that affect how the blobs will be written.  */
357         int write_resource_flags;
358
359         /* Data used for issuing WRITE_STREAMS progress.  */
360         struct write_blobs_progress_data progress_data;
361
362         struct filter_context *filter_ctx;
363
364         /* Upper bound on the total number of bytes that need to be compressed.
365          * */
366         u64 num_bytes_to_compress;
367
368         /* Pointer to the chunk_compressor implementation being used for
369          * compressing chunks of data, or NULL if chunks are being written
370          * uncompressed.  */
371         struct chunk_compressor *compressor;
372
373         /* A buffer of size @out_chunk_size that has been loaned out from the
374          * chunk compressor and is currently being filled with the uncompressed
375          * data of the next chunk.  */
376         u8 *cur_chunk_buf;
377
378         /* Number of bytes in @cur_chunk_buf that are currently filled.  */
379         size_t cur_chunk_buf_filled;
380
381         /* List of blobs that currently have chunks being compressed.  */
382         struct list_head blobs_being_compressed;
383
384         /* List of blobs in the solid resource.  Blobs are moved here after
385          * @blobs_being_compressed only when writing a solid resource.  */
386         struct list_head blobs_in_solid_resource;
387
388         /* Current uncompressed offset in the blob being read.  */
389         u64 cur_read_blob_offset;
390
391         /* Uncompressed size of the blob currently being read.  */
392         u64 cur_read_blob_size;
393
394         /* Current uncompressed offset in the blob being written.  */
395         u64 cur_write_blob_offset;
396
397         /* Uncompressed size of resource currently being written.  */
398         u64 cur_write_res_size;
399
400         /* Array that is filled in with compressed chunk sizes as a resource is
401          * being written.  */
402         u64 *chunk_csizes;
403
404         /* Index of next entry in @chunk_csizes to fill in.  */
405         size_t chunk_index;
406
407         /* Number of entries in @chunk_csizes currently allocated.  */
408         size_t num_alloc_chunks;
409
410         /* Offset in the output file of the start of the chunks of the resource
411          * currently being written.  */
412         u64 chunks_start_offset;
413 };
414
415 /* Reserve space for the chunk table and prepare to accumulate the chunk table
416  * in memory.  */
417 static int
418 begin_chunk_table(struct write_blobs_ctx *ctx, u64 res_expected_size)
419 {
420         u64 expected_num_chunks;
421         u64 expected_num_chunk_entries;
422         size_t reserve_size;
423         int ret;
424
425         /* Calculate the number of chunks and chunk entries that should be
426          * needed for the resource.  These normally will be the final values,
427          * but in SOLID mode some of the blobs we're planning to write into the
428          * resource may be duplicates, and therefore discarded, potentially
429          * decreasing the number of chunk entries needed.  */
430         expected_num_chunks = DIV_ROUND_UP(res_expected_size, ctx->out_chunk_size);
431         expected_num_chunk_entries = expected_num_chunks;
432         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
433                 expected_num_chunk_entries--;
434
435         /* Make sure the chunk_csizes array is long enough to store the
436          * compressed size of each chunk.  */
437         if (expected_num_chunks > ctx->num_alloc_chunks) {
438                 u64 new_length = expected_num_chunks + 50;
439
440                 if ((size_t)new_length != new_length) {
441                         ERROR("Resource size too large (%"PRIu64" bytes!",
442                               res_expected_size);
443                         return WIMLIB_ERR_NOMEM;
444                 }
445
446                 FREE(ctx->chunk_csizes);
447                 ctx->chunk_csizes = MALLOC(new_length * sizeof(ctx->chunk_csizes[0]));
448                 if (ctx->chunk_csizes == NULL) {
449                         ctx->num_alloc_chunks = 0;
450                         return WIMLIB_ERR_NOMEM;
451                 }
452                 ctx->num_alloc_chunks = new_length;
453         }
454
455         ctx->chunk_index = 0;
456
457         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) {
458                 /* Reserve space for the chunk table in the output file.  In the
459                  * case of solid resources this reserves the upper bound for the
460                  * needed space, not necessarily the exact space which will
461                  * prove to be needed.  At this point, we just use @chunk_csizes
462                  * for a buffer of 0's because the actual compressed chunk sizes
463                  * are unknown.  */
464                 reserve_size = expected_num_chunk_entries *
465                                get_chunk_entry_size(res_expected_size,
466                                                     0 != (ctx->write_resource_flags &
467                                                           WRITE_RESOURCE_FLAG_SOLID));
468                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
469                         reserve_size += sizeof(struct alt_chunk_table_header_disk);
470                 memset(ctx->chunk_csizes, 0, reserve_size);
471                 ret = full_write(ctx->out_fd, ctx->chunk_csizes, reserve_size);
472                 if (ret)
473                         return ret;
474         }
475         return 0;
476 }
477
478 static int
479 begin_write_resource(struct write_blobs_ctx *ctx, u64 res_expected_size)
480 {
481         int ret;
482
483         wimlib_assert(res_expected_size != 0);
484
485         if (ctx->compressor != NULL) {
486                 ret = begin_chunk_table(ctx, res_expected_size);
487                 if (ret)
488                         return ret;
489         }
490
491         /* Output file descriptor is now positioned at the offset at which to
492          * write the first chunk of the resource.  */
493         ctx->chunks_start_offset = ctx->out_fd->offset;
494         ctx->cur_write_blob_offset = 0;
495         ctx->cur_write_res_size = res_expected_size;
496         return 0;
497 }
498
499 static int
500 end_chunk_table(struct write_blobs_ctx *ctx, u64 res_actual_size,
501                 u64 *res_start_offset_ret, u64 *res_store_size_ret)
502 {
503         size_t actual_num_chunks;
504         size_t actual_num_chunk_entries;
505         size_t chunk_entry_size;
506         int ret;
507
508         actual_num_chunks = ctx->chunk_index;
509         actual_num_chunk_entries = actual_num_chunks;
510         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
511                 actual_num_chunk_entries--;
512
513         chunk_entry_size = get_chunk_entry_size(res_actual_size,
514                                                 0 != (ctx->write_resource_flags &
515                                                       WRITE_RESOURCE_FLAG_SOLID));
516
517         typedef le64 _may_alias_attribute aliased_le64_t;
518         typedef le32 _may_alias_attribute aliased_le32_t;
519
520         if (chunk_entry_size == 4) {
521                 aliased_le32_t *entries = (aliased_le32_t*)ctx->chunk_csizes;
522
523                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
524                         for (size_t i = 0; i < actual_num_chunk_entries; i++)
525                                 entries[i] = cpu_to_le32(ctx->chunk_csizes[i]);
526                 } else {
527                         u32 offset = ctx->chunk_csizes[0];
528                         for (size_t i = 0; i < actual_num_chunk_entries; i++) {
529                                 u32 next_size = ctx->chunk_csizes[i + 1];
530                                 entries[i] = cpu_to_le32(offset);
531                                 offset += next_size;
532                         }
533                 }
534         } else {
535                 aliased_le64_t *entries = (aliased_le64_t*)ctx->chunk_csizes;
536
537                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
538                         for (size_t i = 0; i < actual_num_chunk_entries; i++)
539                                 entries[i] = cpu_to_le64(ctx->chunk_csizes[i]);
540                 } else {
541                         u64 offset = ctx->chunk_csizes[0];
542                         for (size_t i = 0; i < actual_num_chunk_entries; i++) {
543                                 u64 next_size = ctx->chunk_csizes[i + 1];
544                                 entries[i] = cpu_to_le64(offset);
545                                 offset += next_size;
546                         }
547                 }
548         }
549
550         size_t chunk_table_size = actual_num_chunk_entries * chunk_entry_size;
551         u64 res_start_offset;
552         u64 res_end_offset;
553
554         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
555                 ret = full_write(ctx->out_fd, ctx->chunk_csizes, chunk_table_size);
556                 if (ret)
557                         goto write_error;
558                 res_end_offset = ctx->out_fd->offset;
559                 res_start_offset = ctx->chunks_start_offset;
560         } else {
561                 res_end_offset = ctx->out_fd->offset;
562
563                 u64 chunk_table_offset;
564
565                 chunk_table_offset = ctx->chunks_start_offset - chunk_table_size;
566
567                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
568                         struct alt_chunk_table_header_disk hdr;
569
570                         hdr.res_usize = cpu_to_le64(res_actual_size);
571                         hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size);
572                         hdr.compression_format = cpu_to_le32(ctx->out_ctype);
573
574                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1);
575                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2);
576                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
577
578                         ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr),
579                                           chunk_table_offset - sizeof(hdr));
580                         if (ret)
581                                 goto write_error;
582                         res_start_offset = chunk_table_offset - sizeof(hdr);
583                 } else {
584                         res_start_offset = chunk_table_offset;
585                 }
586
587                 ret = full_pwrite(ctx->out_fd, ctx->chunk_csizes,
588                                   chunk_table_size, chunk_table_offset);
589                 if (ret)
590                         goto write_error;
591         }
592
593         *res_start_offset_ret = res_start_offset;
594         *res_store_size_ret = res_end_offset - res_start_offset;
595
596         return 0;
597
598 write_error:
599         ERROR_WITH_ERRNO("Write error");
600         return ret;
601 }
602
603 /* Finish writing a WIM resource by writing or updating the chunk table (if not
604  * writing the data uncompressed) and loading its metadata into @out_reshdr.  */
605 static int
606 end_write_resource(struct write_blobs_ctx *ctx, struct wim_reshdr *out_reshdr)
607 {
608         int ret;
609         u64 res_size_in_wim;
610         u64 res_uncompressed_size;
611         u64 res_offset_in_wim;
612
613         wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size ||
614                       (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID));
615         res_uncompressed_size = ctx->cur_write_res_size;
616
617         if (ctx->compressor) {
618                 ret = end_chunk_table(ctx, res_uncompressed_size,
619                                       &res_offset_in_wim, &res_size_in_wim);
620                 if (ret)
621                         return ret;
622         } else {
623                 res_offset_in_wim = ctx->chunks_start_offset;
624                 res_size_in_wim = ctx->out_fd->offset - res_offset_in_wim;
625         }
626         out_reshdr->uncompressed_size = res_uncompressed_size;
627         out_reshdr->size_in_wim = res_size_in_wim;
628         out_reshdr->offset_in_wim = res_offset_in_wim;
629         return 0;
630 }
631
632 /* Call when no more data from the file at @path is needed.  */
633 static int
634 done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx)
635 {
636         union wimlib_progress_info info;
637
638         info.done_with_file.path_to_file = path;
639
640         return call_progress(progfunc, WIMLIB_PROGRESS_MSG_DONE_WITH_FILE,
641                              &info, progctx);
642 }
643
644 static int
645 do_done_with_blob(struct blob_descriptor *blob,
646                   wimlib_progress_func_t progfunc, void *progctx)
647 {
648         int ret;
649         struct wim_inode *inode;
650
651         if (!blob->may_send_done_with_file)
652                 return 0;
653
654         inode = blob->file_inode;
655
656         wimlib_assert(inode != NULL);
657         wimlib_assert(inode->num_remaining_streams > 0);
658         if (--inode->num_remaining_streams > 0)
659                 return 0;
660
661 #ifdef __WIN32__
662         /* XXX: This logic really should be somewhere else.  */
663
664         /* We want the path to the file, but blob->file_on_disk might actually
665          * refer to a named data stream.  Temporarily strip the named data
666          * stream from the path.  */
667         wchar_t *p_colon = NULL;
668         wchar_t *p_question_mark = NULL;
669         const wchar_t *p_stream_name;
670
671         p_stream_name = path_stream_name(blob->file_on_disk);
672         if (unlikely(p_stream_name)) {
673                 p_colon = (wchar_t *)(p_stream_name - 1);
674                 wimlib_assert(*p_colon == L':');
675                 *p_colon = L'\0';
676         }
677
678         /* We also should use a fake Win32 path instead of a NT path  */
679         if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) {
680                 p_question_mark = &blob->file_on_disk[1];
681                 *p_question_mark = L'\\';
682         }
683 #endif
684
685         ret = done_with_file(blob->file_on_disk, progfunc, progctx);
686
687 #ifdef __WIN32__
688         if (p_colon)
689                 *p_colon = L':';
690         if (p_question_mark)
691                 *p_question_mark = L'?';
692 #endif
693         return ret;
694 }
695
696 /* Handle WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES mode.  */
697 static inline int
698 done_with_blob(struct blob_descriptor *blob, struct write_blobs_ctx *ctx)
699 {
700         if (likely(!(ctx->write_resource_flags &
701                      WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE)))
702                 return 0;
703         return do_done_with_blob(blob, ctx->progress_data.progfunc,
704                                  ctx->progress_data.progctx);
705 }
706
707 /* Begin processing a blob for writing.  */
708 static int
709 write_blob_begin_read(struct blob_descriptor *blob, void *_ctx)
710 {
711         struct write_blobs_ctx *ctx = _ctx;
712         int ret;
713
714         wimlib_assert(blob->size > 0);
715
716         ctx->cur_read_blob_offset = 0;
717         ctx->cur_read_blob_size = blob->size;
718
719         /* As an optimization, we allow some blobs to be "unhashed", meaning
720          * their SHA-1 message digests are unknown.  This is the case with blobs
721          * that are added by scanning a directory tree with wimlib_add_image(),
722          * for example.  Since WIM uses single-instance blobs, we don't know
723          * whether such each such blob really need to written until it is
724          * actually checksummed, unless it has a unique size.  In such cases we
725          * read and checksum the blob in this function, thereby advancing ahead
726          * of read_blob_list(), which will still provide the data again to
727          * write_blob_process_chunk().  This is okay because an unhashed blob
728          * cannot be in a WIM resource, which might be costly to decompress.  */
729         if (ctx->blob_table != NULL && blob->unhashed && !blob->unique_size) {
730
731                 struct blob_descriptor *new_blob;
732
733                 ret = hash_unhashed_blob(blob, ctx->blob_table, &new_blob);
734                 if (ret)
735                         return ret;
736                 if (new_blob != blob) {
737                         /* Duplicate blob detected.  */
738
739                         if (new_blob->will_be_in_output_wim ||
740                             blob_filtered(new_blob, ctx->filter_ctx))
741                         {
742                                 /* The duplicate blob is already being included
743                                  * in the output WIM, or it would be filtered
744                                  * out if it had been.  Skip writing this blob
745                                  * (and reading it again) entirely, passing its
746                                  * output reference count to the duplicate blob
747                                  * in the former case.  */
748                                 ret = do_write_blobs_progress(&ctx->progress_data,
749                                                               blob->size, 1, true);
750                                 list_del(&blob->write_blobs_list);
751                                 list_del(&blob->blob_table_list);
752                                 if (new_blob->will_be_in_output_wim)
753                                         new_blob->out_refcnt += blob->out_refcnt;
754                                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
755                                         ctx->cur_write_res_size -= blob->size;
756                                 if (!ret)
757                                         ret = done_with_blob(blob, ctx);
758                                 free_blob_descriptor(blob);
759                                 if (ret)
760                                         return ret;
761                                 return BEGIN_BLOB_STATUS_SKIP_BLOB;
762                         } else {
763                                 /* The duplicate blob can validly be written,
764                                  * but was not marked as such.  Discard the
765                                  * current blob descriptor and use the
766                                  * duplicate, but actually freeing the current
767                                  * blob descriptor must wait until
768                                  * read_blob_list() has finished reading its
769                                  * data.  */
770                                 list_replace(&blob->write_blobs_list,
771                                              &new_blob->write_blobs_list);
772                                 list_replace(&blob->blob_table_list,
773                                              &new_blob->blob_table_list);
774                                 blob->will_be_in_output_wim = 0;
775                                 new_blob->out_refcnt = blob->out_refcnt;
776                                 new_blob->will_be_in_output_wim = 1;
777                                 new_blob->may_send_done_with_file = 0;
778                                 blob = new_blob;
779                         }
780                 }
781         }
782         list_move_tail(&blob->write_blobs_list, &ctx->blobs_being_compressed);
783         return 0;
784 }
785
786 /* Rewrite a blob that was just written compressed (as a non-solid WIM resource)
787  * as uncompressed instead.  */
788 static int
789 write_blob_uncompressed(struct blob_descriptor *blob, struct filedes *out_fd)
790 {
791         int ret;
792         u64 begin_offset = blob->out_reshdr.offset_in_wim;
793         u64 end_offset = out_fd->offset;
794
795         if (filedes_seek(out_fd, begin_offset) == -1)
796                 return 0;
797
798         ret = extract_full_blob_to_fd(blob, out_fd);
799         if (ret) {
800                 /* Error reading the uncompressed data.  */
801                 if (out_fd->offset == begin_offset &&
802                     filedes_seek(out_fd, end_offset) != -1)
803                 {
804                         /* Nothing was actually written yet, and we successfully
805                          * seeked to the end of the compressed resource, so
806                          * don't issue a hard error; just keep the compressed
807                          * resource instead.  */
808                         WARNING("Recovered compressed resource of "
809                                 "size %"PRIu64", continuing on.", blob->size);
810                         return 0;
811                 }
812                 return ret;
813         }
814
815         wimlib_assert(out_fd->offset - begin_offset == blob->size);
816
817         if (out_fd->offset < end_offset &&
818             0 != ftruncate(out_fd->fd, out_fd->offset))
819         {
820                 ERROR_WITH_ERRNO("Can't truncate output file to "
821                                  "offset %"PRIu64, out_fd->offset);
822                 return WIMLIB_ERR_WRITE;
823         }
824
825         blob->out_reshdr.size_in_wim = blob->size;
826         blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED |
827                                     WIM_RESHDR_FLAG_SOLID);
828         return 0;
829 }
830
831 /* Returns true if the specified blob, which was written as a non-solid
832  * resource, should be truncated from the WIM file and re-written uncompressed.
833  * blob->out_reshdr must be filled in from the initial write of the blob.  */
834 static bool
835 should_rewrite_blob_uncompressed(const struct write_blobs_ctx *ctx,
836                                  const struct blob_descriptor *blob)
837 {
838         /* If the compressed data is smaller than the uncompressed data, prefer
839          * the compressed data.  */
840         if (blob->out_reshdr.size_in_wim < blob->out_reshdr.uncompressed_size)
841                 return false;
842
843         /* If we're not actually writing compressed data, then there's no need
844          * for re-writing.  */
845         if (!ctx->compressor)
846                 return false;
847
848         /* If writing a pipable WIM, everything we write to the output is final
849          * (it might actually be a pipe!).  */
850         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)
851                 return false;
852
853         /* If the blob that would need to be re-read is located in a solid
854          * resource in another WIM file, then re-reading it would be costly.  So
855          * don't do it.
856          *
857          * Exception: if the compressed size happens to be *exactly* the same as
858          * the uncompressed size, then the blob *must* be written uncompressed
859          * in order to remain compatible with the Windows Overlay Filesystem
860          * Filter Driver (WOF).
861          *
862          * TODO: we are currently assuming that the optimization for
863          * single-chunk resources in maybe_rewrite_blob_uncompressed() prevents
864          * this case from being triggered too often.  To fully prevent excessive
865          * decompressions in degenerate cases, we really should obtain the
866          * uncompressed data by decompressing the compressed data we wrote to
867          * the output file.
868          */
869         if (blob->blob_location == BLOB_IN_WIM &&
870             blob->size != blob->rdesc->uncompressed_size &&
871             blob->size != blob->out_reshdr.size_in_wim)
872                 return false;
873
874         return true;
875 }
876
877 static int
878 maybe_rewrite_blob_uncompressed(struct write_blobs_ctx *ctx,
879                                 struct blob_descriptor *blob)
880 {
881         if (!should_rewrite_blob_uncompressed(ctx, blob))
882                 return 0;
883
884         /* Regular (non-solid) WIM resources with exactly one chunk and
885          * compressed size equal to uncompressed size are exactly the same as
886          * the corresponding compressed data --- since there must be 0 entries
887          * in the chunk table and the only chunk must be stored uncompressed.
888          * In this case, there's no need to rewrite anything.  */
889         if (ctx->chunk_index == 1 &&
890             blob->out_reshdr.size_in_wim == blob->out_reshdr.uncompressed_size)
891         {
892                 blob->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
893                 return 0;
894         }
895
896         return write_blob_uncompressed(blob, ctx->out_fd);
897 }
898
899 /* Write the next chunk of (typically compressed) data to the output WIM,
900  * handling the writing of the chunk table.  */
901 static int
902 write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
903             size_t csize, size_t usize)
904 {
905         int ret;
906         struct blob_descriptor *blob;
907         u32 completed_blob_count;
908         u32 completed_size;
909
910         blob = list_entry(ctx->blobs_being_compressed.next,
911                           struct blob_descriptor, write_blobs_list);
912
913         if (ctx->cur_write_blob_offset == 0 &&
914             !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
915         {
916                 /* Starting to write a new blob in non-solid mode.  */
917
918                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
919                         ret = write_pwm_blob_header(blob, ctx->out_fd,
920                                                     ctx->compressor != NULL);
921                         if (ret)
922                                 return ret;
923                 }
924
925                 ret = begin_write_resource(ctx, blob->size);
926                 if (ret)
927                         return ret;
928         }
929
930         if (ctx->compressor != NULL) {
931                 /* Record the compresed chunk size.  */
932                 wimlib_assert(ctx->chunk_index < ctx->num_alloc_chunks);
933                 ctx->chunk_csizes[ctx->chunk_index++] = csize;
934
935                /* If writing a pipable WIM, before the chunk data write a chunk
936                 * header that provides the compressed chunk size.  */
937                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
938                         struct pwm_chunk_hdr chunk_hdr = {
939                                 .compressed_size = cpu_to_le32(csize),
940                         };
941                         ret = full_write(ctx->out_fd, &chunk_hdr,
942                                          sizeof(chunk_hdr));
943                         if (ret)
944                                 goto write_error;
945                 }
946         }
947
948         /* Write the chunk data.  */
949         ret = full_write(ctx->out_fd, cchunk, csize);
950         if (ret)
951                 goto write_error;
952
953         ctx->cur_write_blob_offset += usize;
954
955         completed_size = usize;
956         completed_blob_count = 0;
957         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
958                 /* Wrote chunk in solid mode.  It may have finished multiple
959                  * blobs.  */
960                 struct blob_descriptor *next_blob;
961
962                 while (blob && ctx->cur_write_blob_offset >= blob->size) {
963
964                         ctx->cur_write_blob_offset -= blob->size;
965
966                         if (ctx->cur_write_blob_offset)
967                                 next_blob = list_entry(blob->write_blobs_list.next,
968                                                       struct blob_descriptor,
969                                                       write_blobs_list);
970                         else
971                                 next_blob = NULL;
972
973                         ret = done_with_blob(blob, ctx);
974                         if (ret)
975                                 return ret;
976                         list_move_tail(&blob->write_blobs_list, &ctx->blobs_in_solid_resource);
977                         completed_blob_count++;
978
979                         blob = next_blob;
980                 }
981         } else {
982                 /* Wrote chunk in non-solid mode.  It may have finished a
983                  * blob.  */
984                 if (ctx->cur_write_blob_offset == blob->size) {
985
986                         wimlib_assert(ctx->cur_write_blob_offset ==
987                                       ctx->cur_write_res_size);
988
989                         ret = end_write_resource(ctx, &blob->out_reshdr);
990                         if (ret)
991                                 return ret;
992
993                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
994                         if (ctx->compressor != NULL)
995                                 blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
996
997                         ret = maybe_rewrite_blob_uncompressed(ctx, blob);
998                         if (ret)
999                                 return ret;
1000
1001                         wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size);
1002
1003                         ctx->cur_write_blob_offset = 0;
1004
1005                         ret = done_with_blob(blob, ctx);
1006                         if (ret)
1007                                 return ret;
1008                         list_del(&blob->write_blobs_list);
1009                         completed_blob_count++;
1010                 }
1011         }
1012
1013         return do_write_blobs_progress(&ctx->progress_data, completed_size,
1014                                        completed_blob_count, false);
1015
1016 write_error:
1017         ERROR_WITH_ERRNO("Write error");
1018         return ret;
1019 }
1020
1021 static int
1022 prepare_chunk_buffer(struct write_blobs_ctx *ctx)
1023 {
1024         /* While we are unable to get a new chunk buffer due to too many chunks
1025          * already outstanding, retrieve and write the next compressed chunk. */
1026         while (!(ctx->cur_chunk_buf =
1027                  ctx->compressor->get_chunk_buffer(ctx->compressor)))
1028         {
1029                 const void *cchunk;
1030                 u32 csize;
1031                 u32 usize;
1032                 bool bret;
1033                 int ret;
1034
1035                 bret = ctx->compressor->get_compression_result(ctx->compressor,
1036                                                                &cchunk,
1037                                                                &csize,
1038                                                                &usize);
1039                 wimlib_assert(bret);
1040
1041                 ret = write_chunk(ctx, cchunk, csize, usize);
1042                 if (ret)
1043                         return ret;
1044         }
1045         return 0;
1046 }
1047
1048 /* Process the next chunk of data to be written to a WIM resource.  */
1049 static int
1050 write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
1051 {
1052         struct write_blobs_ctx *ctx = _ctx;
1053         int ret;
1054         const u8 *chunkptr, *chunkend;
1055
1056         wimlib_assert(size != 0);
1057
1058         if (ctx->compressor == NULL) {
1059                 /* Write chunk uncompressed.  */
1060                  ret = write_chunk(ctx, chunk, size, size);
1061                  if (ret)
1062                          return ret;
1063                  ctx->cur_read_blob_offset += size;
1064                  return 0;
1065         }
1066
1067         /* Submit the chunk for compression, but take into account that the
1068          * @size the chunk was provided in may not correspond to the
1069          * @out_chunk_size being used for compression.  */
1070         chunkptr = chunk;
1071         chunkend = chunkptr + size;
1072         do {
1073                 size_t needed_chunk_size;
1074                 size_t bytes_consumed;
1075
1076                 if (!ctx->cur_chunk_buf) {
1077                         ret = prepare_chunk_buffer(ctx);
1078                         if (ret)
1079                                 return ret;
1080                 }
1081
1082                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1083                         needed_chunk_size = ctx->out_chunk_size;
1084                 } else {
1085                         needed_chunk_size = min(ctx->out_chunk_size,
1086                                                 ctx->cur_chunk_buf_filled +
1087                                                         (ctx->cur_read_blob_size -
1088                                                          ctx->cur_read_blob_offset));
1089                 }
1090
1091                 bytes_consumed = min(chunkend - chunkptr,
1092                                      needed_chunk_size - ctx->cur_chunk_buf_filled);
1093
1094                 memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled],
1095                        chunkptr, bytes_consumed);
1096
1097                 chunkptr += bytes_consumed;
1098                 ctx->cur_read_blob_offset += bytes_consumed;
1099                 ctx->cur_chunk_buf_filled += bytes_consumed;
1100
1101                 if (ctx->cur_chunk_buf_filled == needed_chunk_size) {
1102                         ctx->compressor->signal_chunk_filled(ctx->compressor,
1103                                                              ctx->cur_chunk_buf_filled);
1104                         ctx->cur_chunk_buf = NULL;
1105                         ctx->cur_chunk_buf_filled = 0;
1106                 }
1107         } while (chunkptr != chunkend);
1108         return 0;
1109 }
1110
1111 /* Finish processing a blob for writing.  It may not have been completely
1112  * written yet, as the chunk_compressor implementation may still have chunks
1113  * buffered or being compressed.  */
1114 static int
1115 write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx)
1116 {
1117         struct write_blobs_ctx *ctx = _ctx;
1118
1119         wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status);
1120
1121         if (!blob->will_be_in_output_wim) {
1122                 /* The blob was a duplicate.  Now that its data has finished
1123                  * being read, it is being discarded in favor of the duplicate
1124                  * entry.  It therefore is no longer needed, and we can fire the
1125                  * DONE_WITH_FILE callback because the file will not be read
1126                  * again.
1127                  *
1128                  * Note: we can't yet fire DONE_WITH_FILE for non-duplicate
1129                  * blobs, since it needs to be possible to re-read the file if
1130                  * it does not compress to less than its original size.  */
1131                 if (!status)
1132                         status = done_with_blob(blob, ctx);
1133                 free_blob_descriptor(blob);
1134         } else if (!status && blob->unhashed && ctx->blob_table != NULL) {
1135                 /* The blob was not a duplicate and was previously unhashed.
1136                  * Since we passed COMPUTE_MISSING_BLOB_HASHES to
1137                  * read_blob_list(), blob->hash is now computed and valid.  So
1138                  * turn this blob into a "hashed" blob.  */
1139                 list_del(&blob->unhashed_list);
1140                 blob_table_insert(ctx->blob_table, blob);
1141                 blob->unhashed = 0;
1142         }
1143         return status;
1144 }
1145
1146 /* Compute statistics about a list of blobs that will be written.
1147  *
1148  * Assumes the blobs are sorted such that all blobs located in each distinct WIM
1149  * (specified by WIMStruct) are together.  */
1150 static void
1151 compute_blob_list_stats(struct list_head *blob_list,
1152                         struct write_blobs_ctx *ctx)
1153 {
1154         struct blob_descriptor *blob;
1155         u64 total_bytes = 0;
1156         u64 num_blobs = 0;
1157         u64 total_parts = 0;
1158         WIMStruct *prev_wim_part = NULL;
1159
1160         list_for_each_entry(blob, blob_list, write_blobs_list) {
1161                 num_blobs++;
1162                 total_bytes += blob->size;
1163                 if (blob->blob_location == BLOB_IN_WIM) {
1164                         if (prev_wim_part != blob->rdesc->wim) {
1165                                 prev_wim_part = blob->rdesc->wim;
1166                                 total_parts++;
1167                         }
1168                 }
1169         }
1170         ctx->progress_data.progress.write_streams.total_bytes       = total_bytes;
1171         ctx->progress_data.progress.write_streams.total_streams     = num_blobs;
1172         ctx->progress_data.progress.write_streams.completed_bytes   = 0;
1173         ctx->progress_data.progress.write_streams.completed_streams = 0;
1174         ctx->progress_data.progress.write_streams.compression_type  = ctx->out_ctype;
1175         ctx->progress_data.progress.write_streams.total_parts       = total_parts;
1176         ctx->progress_data.progress.write_streams.completed_parts   = 0;
1177         ctx->progress_data.next_progress = 0;
1178 }
1179
1180 /* Find blobs in @blob_list that can be copied to the output WIM in raw form
1181  * rather than compressed.  Delete these blobs from @blob_list and move them to
1182  * @raw_copy_blobs.  Return the total uncompressed size of the blobs that need
1183  * to be compressed.  */
1184 static u64
1185 find_raw_copy_blobs(struct list_head *blob_list,
1186                     int write_resource_flags,
1187                     int out_ctype,
1188                     u32 out_chunk_size,
1189                     struct list_head *raw_copy_blobs)
1190 {
1191         struct blob_descriptor *blob, *tmp;
1192         u64 num_bytes_to_compress = 0;
1193
1194         INIT_LIST_HEAD(raw_copy_blobs);
1195
1196         /* Initialize temporary raw_copy_ok flag.  */
1197         list_for_each_entry(blob, blob_list, write_blobs_list)
1198                 if (blob->blob_location == BLOB_IN_WIM)
1199                         blob->rdesc->raw_copy_ok = 0;
1200
1201         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1202                 if (blob->blob_location == BLOB_IN_WIM &&
1203                     blob->rdesc->raw_copy_ok)
1204                 {
1205                         list_move_tail(&blob->write_blobs_list,
1206                                        raw_copy_blobs);
1207                 } else if (can_raw_copy(blob, write_resource_flags,
1208                                         out_ctype, out_chunk_size))
1209                 {
1210                         blob->rdesc->raw_copy_ok = 1;
1211                         list_move_tail(&blob->write_blobs_list,
1212                                        raw_copy_blobs);
1213                 } else {
1214                         num_bytes_to_compress += blob->size;
1215                 }
1216         }
1217
1218         return num_bytes_to_compress;
1219 }
1220
1221 /* Copy a raw compressed resource located in another WIM file to the WIM file
1222  * being written.  */
1223 static int
1224 write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc,
1225                         struct filedes *out_fd)
1226 {
1227         u64 cur_read_offset;
1228         u64 end_read_offset;
1229         u8 buf[BUFFER_SIZE];
1230         size_t bytes_to_read;
1231         int ret;
1232         struct filedes *in_fd;
1233         struct blob_descriptor *blob;
1234         u64 out_offset_in_wim;
1235
1236         /* Copy the raw data.  */
1237         cur_read_offset = in_rdesc->offset_in_wim;
1238         end_read_offset = cur_read_offset + in_rdesc->size_in_wim;
1239
1240         out_offset_in_wim = out_fd->offset;
1241
1242         if (in_rdesc->is_pipable) {
1243                 if (cur_read_offset < sizeof(struct pwm_blob_hdr))
1244                         return WIMLIB_ERR_INVALID_PIPABLE_WIM;
1245                 cur_read_offset -= sizeof(struct pwm_blob_hdr);
1246                 out_offset_in_wim += sizeof(struct pwm_blob_hdr);
1247         }
1248         in_fd = &in_rdesc->wim->in_fd;
1249         wimlib_assert(cur_read_offset != end_read_offset);
1250         do {
1251
1252                 bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
1253
1254                 ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
1255                 if (ret)
1256                         return ret;
1257
1258                 ret = full_write(out_fd, buf, bytes_to_read);
1259                 if (ret)
1260                         return ret;
1261
1262                 cur_read_offset += bytes_to_read;
1263
1264         } while (cur_read_offset != end_read_offset);
1265
1266         list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) {
1267                 if (blob->will_be_in_output_wim) {
1268                         blob_set_out_reshdr_for_reuse(blob);
1269                         if (in_rdesc->flags & WIM_RESHDR_FLAG_SOLID)
1270                                 blob->out_res_offset_in_wim = out_offset_in_wim;
1271                         else
1272                                 blob->out_reshdr.offset_in_wim = out_offset_in_wim;
1273
1274                 }
1275         }
1276         return 0;
1277 }
1278
1279 /* Copy a list of raw compressed resources located in other WIM file(s) to the
1280  * WIM file being written.  */
1281 static int
1282 write_raw_copy_resources(struct list_head *raw_copy_blobs,
1283                          struct filedes *out_fd,
1284                          struct write_blobs_progress_data *progress_data)
1285 {
1286         struct blob_descriptor *blob;
1287         int ret;
1288
1289         list_for_each_entry(blob, raw_copy_blobs, write_blobs_list)
1290                 blob->rdesc->raw_copy_ok = 1;
1291
1292         list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) {
1293                 if (blob->rdesc->raw_copy_ok) {
1294                         /* Write each solid resource only one time.  */
1295                         ret = write_raw_copy_resource(blob->rdesc, out_fd);
1296                         if (ret)
1297                                 return ret;
1298                         blob->rdesc->raw_copy_ok = 0;
1299                 }
1300                 ret = do_write_blobs_progress(progress_data, blob->size,
1301                                               1, false);
1302                 if (ret)
1303                         return ret;
1304         }
1305         return 0;
1306 }
1307
1308 /* Wait for and write all chunks pending in the compressor.  */
1309 static int
1310 finish_remaining_chunks(struct write_blobs_ctx *ctx)
1311 {
1312         const void *cdata;
1313         u32 csize;
1314         u32 usize;
1315         int ret;
1316
1317         if (ctx->compressor == NULL)
1318                 return 0;
1319
1320         if (ctx->cur_chunk_buf_filled != 0) {
1321                 ctx->compressor->signal_chunk_filled(ctx->compressor,
1322                                                      ctx->cur_chunk_buf_filled);
1323         }
1324
1325         while (ctx->compressor->get_compression_result(ctx->compressor, &cdata,
1326                                                        &csize, &usize))
1327         {
1328                 ret = write_chunk(ctx, cdata, csize, usize);
1329                 if (ret)
1330                         return ret;
1331         }
1332         return 0;
1333 }
1334
1335 static void
1336 remove_empty_blobs(struct list_head *blob_list)
1337 {
1338         struct blob_descriptor *blob, *tmp;
1339
1340         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1341                 wimlib_assert(blob->will_be_in_output_wim);
1342                 if (blob->size == 0) {
1343                         list_del(&blob->write_blobs_list);
1344                         blob->out_reshdr.offset_in_wim = 0;
1345                         blob->out_reshdr.size_in_wim = 0;
1346                         blob->out_reshdr.uncompressed_size = 0;
1347                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
1348                 }
1349         }
1350 }
1351
1352 static inline bool
1353 blob_is_in_file(const struct blob_descriptor *blob)
1354 {
1355         return blob->blob_location == BLOB_IN_FILE_ON_DISK
1356 #ifdef __WIN32__
1357             || blob->blob_location == BLOB_IN_WINNT_FILE_ON_DISK
1358             || blob->blob_location == BLOB_WIN32_ENCRYPTED
1359 #endif
1360            ;
1361 }
1362
1363 static void
1364 init_done_with_file_info(struct list_head *blob_list)
1365 {
1366         struct blob_descriptor *blob;
1367
1368         list_for_each_entry(blob, blob_list, write_blobs_list) {
1369                 if (blob_is_in_file(blob)) {
1370                         blob->file_inode->num_remaining_streams = 0;
1371                         blob->may_send_done_with_file = 1;
1372                 } else {
1373                         blob->may_send_done_with_file = 0;
1374                 }
1375         }
1376
1377         list_for_each_entry(blob, blob_list, write_blobs_list)
1378                 if (blob->may_send_done_with_file)
1379                         blob->file_inode->num_remaining_streams++;
1380 }
1381
1382 /*
1383  * Write a list of blobs to the output WIM file.
1384  *
1385  * @blob_list
1386  *      The list of blobs to write, specified by a list of 'struct blob_descriptor' linked
1387  *      by the 'write_blobs_list' member.
1388  *
1389  * @out_fd
1390  *      The file descriptor, opened for writing, to which to write the blobs.
1391  *
1392  * @write_resource_flags
1393  *      Flags to modify how the blobs are written:
1394  *
1395  *      WRITE_RESOURCE_FLAG_RECOMPRESS:
1396  *              Force compression of all resources, even if they could otherwise
1397  *              be re-used by copying the raw data, due to being located in a WIM
1398  *              file with compatible compression parameters.
1399  *
1400  *      WRITE_RESOURCE_FLAG_PIPABLE:
1401  *              Write the resources in the wimlib-specific pipable format, and
1402  *              furthermore do so in such a way that no seeking backwards in
1403  *              @out_fd will be performed (so it may be a pipe).
1404  *
1405  *      WRITE_RESOURCE_FLAG_SOLID:
1406  *              Combine all the blobs into a single resource rather than writing
1407  *              them in separate resources.  This flag is only valid if the WIM
1408  *              version number has been, or will be, set to WIM_VERSION_SOLID.
1409  *              This flag may not be combined with WRITE_RESOURCE_FLAG_PIPABLE.
1410  *
1411  * @out_ctype
1412  *      Compression format to use in the output resources, specified as one of
1413  *      the WIMLIB_COMPRESSION_TYPE_* constants.  WIMLIB_COMPRESSION_TYPE_NONE
1414  *      is allowed.
1415  *
1416  * @out_chunk_size
1417  *      Compression chunk size to use in the output resources.  It must be a
1418  *      valid chunk size for the specified compression format @out_ctype, unless
1419  *      @out_ctype is WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter
1420  *      is ignored.
1421  *
1422  * @num_threads
1423  *      Number of threads to use to compress data.  If 0, a default number of
1424  *      threads will be chosen.  The number of threads still may be decreased
1425  *      from the specified value if insufficient memory is detected.
1426  *
1427  * @blob_table
1428  *      If on-the-fly deduplication of unhashed blobs is desired, this parameter
1429  *      must be pointer to the blob table for the WIMStruct on whose behalf the
1430  *      blobs are being written.  Otherwise, this parameter can be NULL.
1431  *
1432  * @filter_ctx
1433  *      If on-the-fly deduplication of unhashed blobs is desired, this parameter
1434  *      can be a pointer to a context for blob filtering used to detect whether
1435  *      the duplicate blob has been hard-filtered or not.  If no blobs are
1436  *      hard-filtered or no blobs are unhashed, this parameter can be NULL.
1437  *
1438  * This function will write the blobs in @blob_list to resources in
1439  * consecutive positions in the output WIM file, or to a single solid resource
1440  * if WRITE_RESOURCE_FLAG_SOLID was specified in @write_resource_flags.  In both
1441  * cases, the @out_reshdr of the `struct blob_descriptor' for each blob written will be
1442  * updated to specify its location, size, and flags in the output WIM.  In the
1443  * solid resource case, WIM_RESHDR_FLAG_SOLID will be set in the @flags field of
1444  * each @out_reshdr, and furthermore @out_res_offset_in_wim and
1445  * @out_res_size_in_wim of each @out_reshdr will be set to the offset and size,
1446  * respectively, in the output WIM of the solid resource containing the
1447  * corresponding blob.
1448  *
1449  * Each of the blobs to write may be in any location supported by the
1450  * resource-handling code (specifically, read_blob_list()), such as the contents
1451  * of external file that has been logically added to the output WIM, or a blob
1452  * in another WIM file that has been imported, or even a blob in the "same" WIM
1453  * file of which a modified copy is being written.  In the case that a blob is
1454  * already in a WIM file and uses compatible compression parameters, by default
1455  * this function will re-use the raw data instead of decompressing it, then
1456  * recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS
1457  * specified in @write_resource_flags, this is not done.
1458  *
1459  * As a further requirement, this function requires that the
1460  * @will_be_in_output_wim member be set to 1 on all blobs in @blob_list as well
1461  * as any other blobs not in @blob_list that will be in the output WIM file, but
1462  * set to 0 on any other blobs in the output WIM's blob table or sharing a solid
1463  * resource with a blob in @blob_list.  Still furthermore, if on-the-fly
1464  * deduplication of blobs is possible, then all blobs in @blob_list must also be
1465  * linked by @blob_table_list along with any other blobs that have
1466  * @will_be_in_output_wim set.
1467  *
1468  * This function handles on-the-fly deduplication of blobs for which SHA-1
1469  * message digests have not yet been calculated.  Such blobs may or may not need
1470  * to be written.  If @blob_table is non-NULL, then each blob in @blob_list that
1471  * has @unhashed set but not @unique_size set is checksummed immediately before
1472  * it would otherwise be read for writing in order to determine if it is
1473  * identical to another blob already being written or one that would be filtered
1474  * out of the output WIM using blob_filtered() with the context @filter_ctx.
1475  * Each such duplicate blob will be removed from @blob_list, its reference count
1476  * transfered to the pre-existing duplicate blob, its memory freed, and will not
1477  * be written.  Alternatively, if a blob in @blob_list is a duplicate with any
1478  * blob in @blob_table that has not been marked for writing or would not be
1479  * hard-filtered, it is freed and the pre-existing duplicate is written instead,
1480  * taking ownership of the reference count and slot in the @blob_table_list.
1481  *
1482  * Returns 0 if every blob was either written successfully or did not need to be
1483  * written; otherwise returns a non-zero error code.
1484  */
1485 static int
1486 write_blob_list(struct list_head *blob_list,
1487                 struct filedes *out_fd,
1488                 int write_resource_flags,
1489                 int out_ctype,
1490                 u32 out_chunk_size,
1491                 unsigned num_threads,
1492                 struct blob_table *blob_table,
1493                 struct filter_context *filter_ctx,
1494                 wimlib_progress_func_t progfunc,
1495                 void *progctx)
1496 {
1497         int ret;
1498         struct write_blobs_ctx ctx;
1499         struct list_head raw_copy_blobs;
1500
1501         wimlib_assert((write_resource_flags &
1502                        (WRITE_RESOURCE_FLAG_SOLID |
1503                         WRITE_RESOURCE_FLAG_PIPABLE)) !=
1504                                 (WRITE_RESOURCE_FLAG_SOLID |
1505                                  WRITE_RESOURCE_FLAG_PIPABLE));
1506
1507         remove_empty_blobs(blob_list);
1508
1509         if (list_empty(blob_list))
1510                 return 0;
1511
1512         /* If needed, set auxiliary information so that we can detect when the
1513          * library has finished using each external file.  */
1514         if (unlikely(write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE))
1515                 init_done_with_file_info(blob_list);
1516
1517         memset(&ctx, 0, sizeof(ctx));
1518
1519         ctx.out_fd = out_fd;
1520         ctx.blob_table = blob_table;
1521         ctx.out_ctype = out_ctype;
1522         ctx.out_chunk_size = out_chunk_size;
1523         ctx.write_resource_flags = write_resource_flags;
1524         ctx.filter_ctx = filter_ctx;
1525
1526         /*
1527          * We normally sort the blobs to write by a "sequential" order that is
1528          * optimized for reading.  But when using solid compression, we instead
1529          * sort the blobs by file extension and file name (when applicable; and
1530          * we don't do this for blobs from solid resources) so that similar
1531          * files are grouped together, which improves the compression ratio.
1532          * This is somewhat of a hack since a blob does not necessarily
1533          * correspond one-to-one with a filename, nor is there any guarantee
1534          * that two files with similar names or extensions are actually similar
1535          * in content.  A potential TODO is to sort the blobs based on some
1536          * measure of similarity of their actual contents.
1537          */
1538
1539         ret = sort_blob_list_by_sequential_order(blob_list,
1540                                                  offsetof(struct blob_descriptor,
1541                                                           write_blobs_list));
1542         if (ret)
1543                 return ret;
1544
1545         compute_blob_list_stats(blob_list, &ctx);
1546
1547         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) {
1548                 ret = sort_blob_list_for_solid_compression(blob_list);
1549                 if (unlikely(ret))
1550                         WARNING("Failed to sort blobs for solid compression. Continuing anyways.");
1551         }
1552
1553         ctx.progress_data.progfunc = progfunc;
1554         ctx.progress_data.progctx = progctx;
1555
1556         ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list,
1557                                                         write_resource_flags,
1558                                                         out_ctype,
1559                                                         out_chunk_size,
1560                                                         &raw_copy_blobs);
1561
1562         if (ctx.num_bytes_to_compress == 0)
1563                 goto out_write_raw_copy_resources;
1564
1565         /* Unless uncompressed output was required, allocate a chunk_compressor
1566          * to do compression.  There are serial and parallel implementations of
1567          * the chunk_compressor interface.  We default to parallel using the
1568          * specified number of threads, unless the upper bound on the number
1569          * bytes needing to be compressed is less than a heuristic value.  */
1570         if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
1571
1572         #ifdef ENABLE_MULTITHREADED_COMPRESSION
1573                 if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) {
1574                         ret = new_parallel_chunk_compressor(out_ctype,
1575                                                             out_chunk_size,
1576                                                             num_threads, 0,
1577                                                             &ctx.compressor);
1578                         if (ret > 0) {
1579                                 WARNING("Couldn't create parallel chunk compressor: %"TS".\n"
1580                                         "          Falling back to single-threaded compression.",
1581                                         wimlib_get_error_string(ret));
1582                         }
1583                 }
1584         #endif
1585
1586                 if (ctx.compressor == NULL) {
1587                         ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
1588                                                           &ctx.compressor);
1589                         if (ret)
1590                                 goto out_destroy_context;
1591                 }
1592         }
1593
1594         if (ctx.compressor)
1595                 ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
1596         else
1597                 ctx.progress_data.progress.write_streams.num_threads = 1;
1598
1599         INIT_LIST_HEAD(&ctx.blobs_being_compressed);
1600         INIT_LIST_HEAD(&ctx.blobs_in_solid_resource);
1601
1602         ret = call_progress(ctx.progress_data.progfunc,
1603                             WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1604                             &ctx.progress_data.progress,
1605                             ctx.progress_data.progctx);
1606         if (ret)
1607                 goto out_destroy_context;
1608
1609         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1610                 ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
1611                 if (ret)
1612                         goto out_destroy_context;
1613         }
1614
1615         /* Read the list of blobs needing to be compressed, using the specified
1616          * callbacks to execute processing of the data.  */
1617
1618         struct read_blob_list_callbacks cbs = {
1619                 .begin_blob             = write_blob_begin_read,
1620                 .begin_blob_ctx         = &ctx,
1621                 .consume_chunk          = write_blob_process_chunk,
1622                 .consume_chunk_ctx      = &ctx,
1623                 .end_blob               = write_blob_end_read,
1624                 .end_blob_ctx           = &ctx,
1625         };
1626
1627         ret = read_blob_list(blob_list,
1628                              offsetof(struct blob_descriptor, write_blobs_list),
1629                              &cbs,
1630                              BLOB_LIST_ALREADY_SORTED |
1631                                 VERIFY_BLOB_HASHES |
1632                                 COMPUTE_MISSING_BLOB_HASHES);
1633
1634         if (ret)
1635                 goto out_destroy_context;
1636
1637         ret = finish_remaining_chunks(&ctx);
1638         if (ret)
1639                 goto out_destroy_context;
1640
1641         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1642                 struct wim_reshdr reshdr;
1643                 struct blob_descriptor *blob;
1644                 u64 offset_in_res;
1645
1646                 ret = end_write_resource(&ctx, &reshdr);
1647                 if (ret)
1648                         goto out_destroy_context;
1649
1650                 offset_in_res = 0;
1651                 list_for_each_entry(blob, &ctx.blobs_in_solid_resource, write_blobs_list) {
1652                         blob->out_reshdr.size_in_wim = blob->size;
1653                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob) |
1654                                                  WIM_RESHDR_FLAG_SOLID;
1655                         blob->out_reshdr.uncompressed_size = 0;
1656                         blob->out_reshdr.offset_in_wim = offset_in_res;
1657                         blob->out_res_offset_in_wim = reshdr.offset_in_wim;
1658                         blob->out_res_size_in_wim = reshdr.size_in_wim;
1659                         blob->out_res_uncompressed_size = reshdr.uncompressed_size;
1660                         offset_in_res += blob->size;
1661                 }
1662                 wimlib_assert(offset_in_res == reshdr.uncompressed_size);
1663         }
1664
1665 out_write_raw_copy_resources:
1666         /* Copy any compressed resources for which the raw data can be reused
1667          * without decompression.  */
1668         ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd,
1669                                        &ctx.progress_data);
1670
1671 out_destroy_context:
1672         FREE(ctx.chunk_csizes);
1673         if (ctx.compressor)
1674                 ctx.compressor->destroy(ctx.compressor);
1675         return ret;
1676 }
1677
1678
1679 static int
1680 write_file_data_blobs(WIMStruct *wim,
1681                       struct list_head *blob_list,
1682                       int write_flags,
1683                       unsigned num_threads,
1684                       struct filter_context *filter_ctx)
1685 {
1686         int out_ctype;
1687         u32 out_chunk_size;
1688         int write_resource_flags;
1689
1690         write_resource_flags = write_flags_to_resource_flags(write_flags);
1691
1692         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1693                 out_chunk_size = wim->out_solid_chunk_size;
1694                 out_ctype = wim->out_solid_compression_type;
1695         } else {
1696                 out_chunk_size = wim->out_chunk_size;
1697                 out_ctype = wim->out_compression_type;
1698         }
1699
1700         return write_blob_list(blob_list,
1701                                &wim->out_fd,
1702                                write_resource_flags,
1703                                out_ctype,
1704                                out_chunk_size,
1705                                num_threads,
1706                                wim->blob_table,
1707                                filter_ctx,
1708                                wim->progfunc,
1709                                wim->progctx);
1710 }
1711
1712 /* Write the contents of the specified blob as a WIM resource.  */
1713 static int
1714 write_wim_resource(struct blob_descriptor *blob,
1715                    struct filedes *out_fd,
1716                    int out_ctype,
1717                    u32 out_chunk_size,
1718                    int write_resource_flags)
1719 {
1720         LIST_HEAD(blob_list);
1721         list_add(&blob->write_blobs_list, &blob_list);
1722         blob->will_be_in_output_wim = 1;
1723         return write_blob_list(&blob_list,
1724                                out_fd,
1725                                write_resource_flags & ~WRITE_RESOURCE_FLAG_SOLID,
1726                                out_ctype,
1727                                out_chunk_size,
1728                                1,
1729                                NULL,
1730                                NULL,
1731                                NULL,
1732                                NULL);
1733 }
1734
1735 /* Write the contents of the specified buffer as a WIM resource.  */
1736 int
1737 write_wim_resource_from_buffer(const void *buf,
1738                                size_t buf_size,
1739                                bool is_metadata,
1740                                struct filedes *out_fd,
1741                                int out_ctype,
1742                                u32 out_chunk_size,
1743                                struct wim_reshdr *out_reshdr,
1744                                u8 *hash_ret,
1745                                int write_resource_flags)
1746 {
1747         int ret;
1748         struct blob_descriptor blob;
1749
1750         if (unlikely(buf_size == 0)) {
1751                 zero_reshdr(out_reshdr);
1752                 if (hash_ret)
1753                         copy_hash(hash_ret, zero_hash);
1754                 return 0;
1755         }
1756
1757         blob_set_is_located_in_attached_buffer(&blob, (void *)buf, buf_size);
1758         sha1_buffer(buf, buf_size, blob.hash);
1759         blob.unhashed = 0;
1760         blob.is_metadata = is_metadata;
1761
1762         ret = write_wim_resource(&blob, out_fd, out_ctype, out_chunk_size,
1763                                  write_resource_flags);
1764         if (ret)
1765                 return ret;
1766
1767         copy_reshdr(out_reshdr, &blob.out_reshdr);
1768
1769         if (hash_ret)
1770                 copy_hash(hash_ret, blob.hash);
1771         return 0;
1772 }
1773
1774 struct blob_size_table {
1775         struct hlist_head *array;
1776         size_t num_entries;
1777         size_t capacity;
1778 };
1779
1780 static int
1781 init_blob_size_table(struct blob_size_table *tab, size_t capacity)
1782 {
1783         tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1784         if (tab->array == NULL)
1785                 return WIMLIB_ERR_NOMEM;
1786         tab->num_entries = 0;
1787         tab->capacity = capacity;
1788         return 0;
1789 }
1790
1791 static void
1792 destroy_blob_size_table(struct blob_size_table *tab)
1793 {
1794         FREE(tab->array);
1795 }
1796
1797 static int
1798 blob_size_table_insert(struct blob_descriptor *blob, void *_tab)
1799 {
1800         struct blob_size_table *tab = _tab;
1801         size_t pos;
1802         struct blob_descriptor *same_size_blob;
1803
1804         pos = hash_u64(blob->size) % tab->capacity;
1805         blob->unique_size = 1;
1806         hlist_for_each_entry(same_size_blob, &tab->array[pos], hash_list_2) {
1807                 if (same_size_blob->size == blob->size) {
1808                         blob->unique_size = 0;
1809                         same_size_blob->unique_size = 0;
1810                         break;
1811                 }
1812         }
1813
1814         hlist_add_head(&blob->hash_list_2, &tab->array[pos]);
1815         tab->num_entries++;
1816         return 0;
1817 }
1818
1819 struct find_blobs_ctx {
1820         WIMStruct *wim;
1821         int write_flags;
1822         struct list_head blob_list;
1823         struct blob_size_table blob_size_tab;
1824 };
1825
1826 static void
1827 reference_blob_for_write(struct blob_descriptor *blob,
1828                          struct list_head *blob_list, u32 nref)
1829 {
1830         if (!blob->will_be_in_output_wim) {
1831                 blob->out_refcnt = 0;
1832                 list_add_tail(&blob->write_blobs_list, blob_list);
1833                 blob->will_be_in_output_wim = 1;
1834         }
1835         blob->out_refcnt += nref;
1836 }
1837
1838 static int
1839 fully_reference_blob_for_write(struct blob_descriptor *blob, void *_blob_list)
1840 {
1841         struct list_head *blob_list = _blob_list;
1842         blob->will_be_in_output_wim = 0;
1843         reference_blob_for_write(blob, blob_list, blob->refcnt);
1844         return 0;
1845 }
1846
1847 static int
1848 inode_find_blobs_to_reference(const struct wim_inode *inode,
1849                               const struct blob_table *table,
1850                               struct list_head *blob_list)
1851 {
1852         wimlib_assert(inode->i_nlink > 0);
1853
1854         for (unsigned i = 0; i < inode->i_num_streams; i++) {
1855                 struct blob_descriptor *blob;
1856                 const u8 *hash;
1857
1858                 blob = stream_blob(&inode->i_streams[i], table);
1859                 if (blob) {
1860                         reference_blob_for_write(blob, blob_list, inode->i_nlink);
1861                 } else {
1862                         hash = stream_hash(&inode->i_streams[i]);
1863                         if (!is_zero_hash(hash))
1864                                 return blob_not_found_error(inode, hash);
1865                 }
1866         }
1867         return 0;
1868 }
1869
1870 static int
1871 do_blob_set_not_in_output_wim(struct blob_descriptor *blob, void *_ignore)
1872 {
1873         blob->will_be_in_output_wim = 0;
1874         return 0;
1875 }
1876
1877 static int
1878 image_find_blobs_to_reference(WIMStruct *wim)
1879 {
1880         struct wim_image_metadata *imd;
1881         struct wim_inode *inode;
1882         struct blob_descriptor *blob;
1883         struct list_head *blob_list;
1884         int ret;
1885
1886         imd = wim_get_current_image_metadata(wim);
1887
1888         image_for_each_unhashed_blob(blob, imd)
1889                 blob->will_be_in_output_wim = 0;
1890
1891         blob_list = wim->private;
1892         image_for_each_inode(inode, imd) {
1893                 ret = inode_find_blobs_to_reference(inode,
1894                                                     wim->blob_table,
1895                                                     blob_list);
1896                 if (ret)
1897                         return ret;
1898         }
1899         return 0;
1900 }
1901
1902 static int
1903 prepare_unfiltered_list_of_blobs_in_output_wim(WIMStruct *wim,
1904                                                int image,
1905                                                int blobs_ok,
1906                                                struct list_head *blob_list_ret)
1907 {
1908         int ret;
1909
1910         INIT_LIST_HEAD(blob_list_ret);
1911
1912         if (blobs_ok && (image == WIMLIB_ALL_IMAGES ||
1913                          (image == 1 && wim->hdr.image_count == 1)))
1914         {
1915                 /* Fast case:  Assume that all blobs are being written and that
1916                  * the reference counts are correct.  */
1917                 struct blob_descriptor *blob;
1918                 struct wim_image_metadata *imd;
1919                 unsigned i;
1920
1921                 for_blob_in_table(wim->blob_table,
1922                                   fully_reference_blob_for_write,
1923                                   blob_list_ret);
1924
1925                 for (i = 0; i < wim->hdr.image_count; i++) {
1926                         imd = wim->image_metadata[i];
1927                         image_for_each_unhashed_blob(blob, imd)
1928                                 fully_reference_blob_for_write(blob, blob_list_ret);
1929                 }
1930         } else {
1931                 /* Slow case:  Walk through the images being written and
1932                  * determine the blobs referenced.  */
1933                 for_blob_in_table(wim->blob_table,
1934                                   do_blob_set_not_in_output_wim, NULL);
1935                 wim->private = blob_list_ret;
1936                 ret = for_image(wim, image, image_find_blobs_to_reference);
1937                 if (ret)
1938                         return ret;
1939         }
1940
1941         return 0;
1942 }
1943
1944 struct insert_other_if_hard_filtered_ctx {
1945         struct blob_size_table *tab;
1946         struct filter_context *filter_ctx;
1947 };
1948
1949 static int
1950 insert_other_if_hard_filtered(struct blob_descriptor *blob, void *_ctx)
1951 {
1952         struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
1953
1954         if (!blob->will_be_in_output_wim &&
1955             blob_hard_filtered(blob, ctx->filter_ctx))
1956                 blob_size_table_insert(blob, ctx->tab);
1957         return 0;
1958 }
1959
1960 static int
1961 determine_blob_size_uniquity(struct list_head *blob_list,
1962                              struct blob_table *lt,
1963                              struct filter_context *filter_ctx)
1964 {
1965         int ret;
1966         struct blob_size_table tab;
1967         struct blob_descriptor *blob;
1968
1969         ret = init_blob_size_table(&tab, 9001);
1970         if (ret)
1971                 return ret;
1972
1973         if (may_hard_filter_blobs(filter_ctx)) {
1974                 struct insert_other_if_hard_filtered_ctx ctx = {
1975                         .tab = &tab,
1976                         .filter_ctx = filter_ctx,
1977                 };
1978                 for_blob_in_table(lt, insert_other_if_hard_filtered, &ctx);
1979         }
1980
1981         list_for_each_entry(blob, blob_list, write_blobs_list)
1982                 blob_size_table_insert(blob, &tab);
1983
1984         destroy_blob_size_table(&tab);
1985         return 0;
1986 }
1987
1988 static void
1989 filter_blob_list_for_write(struct list_head *blob_list,
1990                            struct filter_context *filter_ctx)
1991 {
1992         struct blob_descriptor *blob, *tmp;
1993
1994         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1995                 int status = blob_filtered(blob, filter_ctx);
1996
1997                 if (status == 0) {
1998                         /* Not filtered.  */
1999                         continue;
2000                 } else {
2001                         if (status > 0) {
2002                                 /* Soft filtered.  */
2003                         } else {
2004                                 /* Hard filtered.  */
2005                                 blob->will_be_in_output_wim = 0;
2006                                 list_del(&blob->blob_table_list);
2007                         }
2008                         list_del(&blob->write_blobs_list);
2009                 }
2010         }
2011 }
2012
2013 /*
2014  * prepare_blob_list_for_write() -
2015  *
2016  * Prepare the list of blobs to write for writing a WIM containing the specified
2017  * image(s) with the specified write flags.
2018  *
2019  * @wim
2020  *      The WIMStruct on whose behalf the write is occurring.
2021  *
2022  * @image
2023  *      Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES.
2024  *
2025  * @write_flags
2026  *      WIMLIB_WRITE_FLAG_* flags for the write operation:
2027  *
2028  *      STREAMS_OK:  For writes of all images, assume that all blobs in the blob
2029  *      table of @wim and the per-image lists of unhashed blobs should be taken
2030  *      as-is, and image metadata should not be searched for references.  This
2031  *      does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below.
2032  *
2033  *      OVERWRITE:  Blobs already present in @wim shall not be returned in
2034  *      @blob_list_ret.
2035  *
2036  *      SKIP_EXTERNAL_WIMS:  Blobs already present in a WIM file, but not @wim,
2037  *      shall be returned in neither @blob_list_ret nor @blob_table_list_ret.
2038  *
2039  * @blob_list_ret
2040  *      List of blobs, linked by write_blobs_list, that need to be written will
2041  *      be returned here.
2042  *
2043  *      Note that this function assumes that unhashed blobs will be written; it
2044  *      does not take into account that they may become duplicates when actually
2045  *      hashed.
2046  *
2047  * @blob_table_list_ret
2048  *      List of blobs, linked by blob_table_list, that need to be included in
2049  *      the WIM's blob table will be returned here.  This will be a superset of
2050  *      the blobs in @blob_list_ret.
2051  *
2052  *      This list will be a proper superset of @blob_list_ret if and only if
2053  *      WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
2054  *      the blobs that would otherwise need to be written were already located
2055  *      in the WIM file.
2056  *
2057  *      All blobs in this list will have @out_refcnt set to the number of
2058  *      references to the blob in the output WIM.  If
2059  *      WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt
2060  *      may be as low as 0.
2061  *
2062  * @filter_ctx_ret
2063  *      A context for queries of blob filter status with blob_filtered() is
2064  *      returned in this location.
2065  *
2066  * In addition, @will_be_in_output_wim will be set to 1 in all blobs inserted
2067  * into @blob_table_list_ret and to 0 in all blobs in the blob table of @wim not
2068  * inserted into @blob_table_list_ret.
2069  *
2070  * Still furthermore, @unique_size will be set to 1 on all blobs in
2071  * @blob_list_ret that have unique size among all blobs in @blob_list_ret and
2072  * among all blobs in the blob table of @wim that are ineligible for being
2073  * written due to filtering.
2074  *
2075  * Returns 0 on success; nonzero on read error, memory allocation error, or
2076  * otherwise.
2077  */
2078 static int
2079 prepare_blob_list_for_write(WIMStruct *wim, int image,
2080                             int write_flags,
2081                             struct list_head *blob_list_ret,
2082                             struct list_head *blob_table_list_ret,
2083                             struct filter_context *filter_ctx_ret)
2084 {
2085         int ret;
2086         struct blob_descriptor *blob;
2087
2088         filter_ctx_ret->write_flags = write_flags;
2089         filter_ctx_ret->wim = wim;
2090
2091         ret = prepare_unfiltered_list_of_blobs_in_output_wim(
2092                                 wim,
2093                                 image,
2094                                 write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK,
2095                                 blob_list_ret);
2096         if (ret)
2097                 return ret;
2098
2099         INIT_LIST_HEAD(blob_table_list_ret);
2100         list_for_each_entry(blob, blob_list_ret, write_blobs_list)
2101                 list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2102
2103         ret = determine_blob_size_uniquity(blob_list_ret, wim->blob_table,
2104                                            filter_ctx_ret);
2105         if (ret)
2106                 return ret;
2107
2108         if (may_filter_blobs(filter_ctx_ret))
2109                 filter_blob_list_for_write(blob_list_ret, filter_ctx_ret);
2110
2111         return 0;
2112 }
2113
2114 static int
2115 write_file_data(WIMStruct *wim, int image, int write_flags,
2116                 unsigned num_threads,
2117                 struct list_head *blob_list_override,
2118                 struct list_head *blob_table_list_ret)
2119 {
2120         int ret;
2121         struct list_head _blob_list;
2122         struct list_head *blob_list;
2123         struct blob_descriptor *blob;
2124         struct filter_context _filter_ctx;
2125         struct filter_context *filter_ctx;
2126
2127         if (blob_list_override == NULL) {
2128                 /* Normal case: prepare blob list from image(s) being written.
2129                  */
2130                 blob_list = &_blob_list;
2131                 filter_ctx = &_filter_ctx;
2132                 ret = prepare_blob_list_for_write(wim, image, write_flags,
2133                                                   blob_list,
2134                                                   blob_table_list_ret,
2135                                                   filter_ctx);
2136                 if (ret)
2137                         return ret;
2138         } else {
2139                 /* Currently only as a result of wimlib_split() being called:
2140                  * use blob list already explicitly provided.  Use existing
2141                  * reference counts.  */
2142                 blob_list = blob_list_override;
2143                 filter_ctx = NULL;
2144                 INIT_LIST_HEAD(blob_table_list_ret);
2145                 list_for_each_entry(blob, blob_list, write_blobs_list) {
2146                         blob->out_refcnt = blob->refcnt;
2147                         blob->will_be_in_output_wim = 1;
2148                         blob->unique_size = 0;
2149                         list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2150                 }
2151         }
2152
2153         return write_file_data_blobs(wim,
2154                                      blob_list,
2155                                      write_flags,
2156                                      num_threads,
2157                                      filter_ctx);
2158 }
2159
2160 static int
2161 write_metadata_resources(WIMStruct *wim, int image, int write_flags)
2162 {
2163         int ret;
2164         int start_image;
2165         int end_image;
2166         int write_resource_flags;
2167
2168         if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
2169                 return 0;
2170
2171         write_resource_flags = write_flags_to_resource_flags(write_flags);
2172
2173         write_resource_flags &= ~WRITE_RESOURCE_FLAG_SOLID;
2174
2175         ret = call_progress(wim->progfunc,
2176                             WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN,
2177                             NULL, wim->progctx);
2178         if (ret)
2179                 return ret;
2180
2181         if (image == WIMLIB_ALL_IMAGES) {
2182                 start_image = 1;
2183                 end_image = wim->hdr.image_count;
2184         } else {
2185                 start_image = image;
2186                 end_image = image;
2187         }
2188
2189         for (int i = start_image; i <= end_image; i++) {
2190                 struct wim_image_metadata *imd;
2191
2192                 imd = wim->image_metadata[i - 1];
2193                 /* Build a new metadata resource only if image was modified from
2194                  * the original (or was newly added).  Otherwise just copy the
2195                  * existing one.  */
2196                 if (imd->modified) {
2197                         ret = write_metadata_resource(wim, i,
2198                                                       write_resource_flags);
2199                 } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2200                         blob_set_out_reshdr_for_reuse(imd->metadata_blob);
2201                         ret = 0;
2202                 } else {
2203                         ret = write_wim_resource(imd->metadata_blob,
2204                                                  &wim->out_fd,
2205                                                  wim->out_compression_type,
2206                                                  wim->out_chunk_size,
2207                                                  write_resource_flags);
2208                 }
2209                 if (ret)
2210                         return ret;
2211         }
2212
2213         return call_progress(wim->progfunc,
2214                              WIMLIB_PROGRESS_MSG_WRITE_METADATA_END,
2215                              NULL, wim->progctx);
2216 }
2217
2218 static int
2219 open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
2220 {
2221         int raw_fd = topen(path, open_flags | O_BINARY, 0644);
2222         if (raw_fd < 0) {
2223                 ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
2224                 return WIMLIB_ERR_OPEN;
2225         }
2226         filedes_init(&wim->out_fd, raw_fd);
2227         return 0;
2228 }
2229
2230 static int
2231 close_wim_writable(WIMStruct *wim, int write_flags)
2232 {
2233         int ret = 0;
2234
2235         if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR))
2236                 if (filedes_valid(&wim->out_fd))
2237                         if (filedes_close(&wim->out_fd))
2238                                 ret = WIMLIB_ERR_WRITE;
2239         filedes_invalidate(&wim->out_fd);
2240         return ret;
2241 }
2242
2243 static int
2244 cmp_blobs_by_out_rdesc(const void *p1, const void *p2)
2245 {
2246         const struct blob_descriptor *blob1, *blob2;
2247
2248         blob1 = *(const struct blob_descriptor**)p1;
2249         blob2 = *(const struct blob_descriptor**)p2;
2250
2251         if (blob1->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2252                 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2253                         if (blob1->out_res_offset_in_wim != blob2->out_res_offset_in_wim)
2254                                 return cmp_u64(blob1->out_res_offset_in_wim,
2255                                                blob2->out_res_offset_in_wim);
2256                 } else {
2257                         return 1;
2258                 }
2259         } else {
2260                 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID)
2261                         return -1;
2262         }
2263         return cmp_u64(blob1->out_reshdr.offset_in_wim,
2264                        blob2->out_reshdr.offset_in_wim);
2265 }
2266
2267 static int
2268 write_blob_table(WIMStruct *wim, int image, int write_flags,
2269                  struct list_head *blob_table_list)
2270 {
2271         int ret;
2272
2273         /* Set output resource metadata for blobs already present in WIM.  */
2274         if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2275                 struct blob_descriptor *blob;
2276                 list_for_each_entry(blob, blob_table_list, blob_table_list) {
2277                         if (blob->blob_location == BLOB_IN_WIM &&
2278                             blob->rdesc->wim == wim)
2279                         {
2280                                 blob_set_out_reshdr_for_reuse(blob);
2281                         }
2282                 }
2283         }
2284
2285         ret = sort_blob_list(blob_table_list,
2286                              offsetof(struct blob_descriptor, blob_table_list),
2287                              cmp_blobs_by_out_rdesc);
2288         if (ret)
2289                 return ret;
2290
2291         /* Add entries for metadata resources.  */
2292         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)) {
2293                 int start_image;
2294                 int end_image;
2295
2296                 if (image == WIMLIB_ALL_IMAGES) {
2297                         start_image = 1;
2298                         end_image = wim->hdr.image_count;
2299                 } else {
2300                         start_image = image;
2301                         end_image = image;
2302                 }
2303
2304                 /* Push metadata blob table entries onto the front of the list
2305                  * in reverse order, so that they're written in order.
2306                  */
2307                 for (int i = end_image; i >= start_image; i--) {
2308                         struct blob_descriptor *metadata_blob;
2309
2310                         metadata_blob = wim->image_metadata[i - 1]->metadata_blob;
2311                         wimlib_assert(metadata_blob->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA);
2312                         metadata_blob->out_refcnt = 1;
2313                         list_add(&metadata_blob->blob_table_list, blob_table_list);
2314                 }
2315         }
2316
2317         return write_blob_table_from_blob_list(blob_table_list,
2318                                                &wim->out_fd,
2319                                                wim->out_hdr.part_number,
2320                                                &wim->out_hdr.blob_table_reshdr,
2321                                                write_flags_to_resource_flags(write_flags));
2322 }
2323
2324 /*
2325  * Finish writing a WIM file: write the blob table, xml data, and integrity
2326  * table, then overwrite the WIM header.
2327  *
2328  * The output file descriptor is closed on success, except when writing to a
2329  * user-specified file descriptor (WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR set).
2330  */
2331 static int
2332 finish_write(WIMStruct *wim, int image, int write_flags,
2333              struct list_head *blob_table_list)
2334 {
2335         int write_resource_flags;
2336         off_t old_blob_table_end = 0;
2337         struct integrity_table *old_integrity_table = NULL;
2338         off_t new_blob_table_end;
2339         u64 xml_totalbytes;
2340         int ret;
2341
2342         write_resource_flags = write_flags_to_resource_flags(write_flags);
2343
2344         /* In the WIM header, there is room for the resource entry for a
2345          * metadata resource labeled as the "boot metadata".  This entry should
2346          * be zeroed out if there is no bootable image (boot_idx 0).  Otherwise,
2347          * it should be a copy of the resource entry for the image that is
2348          * marked as bootable.  */
2349         if (wim->out_hdr.boot_idx == 0) {
2350                 zero_reshdr(&wim->out_hdr.boot_metadata_reshdr);
2351         } else {
2352                 copy_reshdr(&wim->out_hdr.boot_metadata_reshdr,
2353                             &wim->image_metadata[
2354                                 wim->out_hdr.boot_idx - 1]->metadata_blob->out_reshdr);
2355         }
2356
2357         /* If overwriting the WIM file containing an integrity table in-place,
2358          * we'd like to re-use the information in the old integrity table
2359          * instead of recalculating it.  But we might overwrite the old
2360          * integrity table when we expand the XML data.  Read it into memory
2361          * just in case.  */
2362         if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
2363                             WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) ==
2364                 (WIMLIB_WRITE_FLAG_OVERWRITE |
2365                  WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2366             && wim_has_integrity_table(wim))
2367         {
2368                 old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
2369                                      wim->hdr.blob_table_reshdr.size_in_wim;
2370                 (void)read_integrity_table(wim,
2371                                            old_blob_table_end - WIM_HEADER_DISK_SIZE,
2372                                            &old_integrity_table);
2373                 /* If we couldn't read the old integrity table, we can still
2374                  * re-calculate the full integrity table ourselves.  Hence the
2375                  * ignoring of the return value.  */
2376         }
2377
2378         /* Write blob table if needed.  */
2379         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) {
2380                 ret = write_blob_table(wim, image, write_flags,
2381                                        blob_table_list);
2382                 if (ret) {
2383                         free_integrity_table(old_integrity_table);
2384                         return ret;
2385                 }
2386         }
2387
2388         /* Write XML data.  */
2389         xml_totalbytes = wim->out_fd.offset;
2390         if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2391                 xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING;
2392         ret = write_wim_xml_data(wim, image, xml_totalbytes,
2393                                  &wim->out_hdr.xml_data_reshdr,
2394                                  write_resource_flags);
2395         if (ret) {
2396                 free_integrity_table(old_integrity_table);
2397                 return ret;
2398         }
2399
2400         /* Write integrity table if needed.  */
2401         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2402                 if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS) {
2403                         /* The XML data we wrote may have overwritten part of
2404                          * the old integrity table, so while calculating the new
2405                          * integrity table we should temporarily update the WIM
2406                          * header to remove the integrity table reference.   */
2407                         struct wim_header checkpoint_hdr;
2408                         memcpy(&checkpoint_hdr, &wim->out_hdr, sizeof(struct wim_header));
2409                         zero_reshdr(&checkpoint_hdr.integrity_table_reshdr);
2410                         checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2411                         ret = write_wim_header(&checkpoint_hdr, &wim->out_fd, 0);
2412                         if (ret) {
2413                                 free_integrity_table(old_integrity_table);
2414                                 return ret;
2415                         }
2416                 }
2417
2418                 new_blob_table_end = wim->out_hdr.blob_table_reshdr.offset_in_wim +
2419                                      wim->out_hdr.blob_table_reshdr.size_in_wim;
2420
2421                 ret = write_integrity_table(wim,
2422                                             new_blob_table_end,
2423                                             old_blob_table_end,
2424                                             old_integrity_table);
2425                 free_integrity_table(old_integrity_table);
2426                 if (ret)
2427                         return ret;
2428         } else {
2429                 /* No integrity table.  */
2430                 zero_reshdr(&wim->out_hdr.integrity_table_reshdr);
2431         }
2432
2433         /* Now that all information in the WIM header has been determined, the
2434          * preliminary header written earlier can be overwritten, the header of
2435          * the existing WIM file can be overwritten, or the final header can be
2436          * written to the end of the pipable WIM.  */
2437         wim->out_hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2438         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2439                 ret = write_wim_header(&wim->out_hdr, &wim->out_fd, wim->out_fd.offset);
2440         else
2441                 ret = write_wim_header(&wim->out_hdr, &wim->out_fd, 0);
2442         if (ret)
2443                 return ret;
2444
2445         /* Possibly sync file data to disk before closing.  On POSIX systems, it
2446          * is necessary to do this before using rename() to overwrite an
2447          * existing file with a new file.  Otherwise, data loss would occur if
2448          * the system is abruptly terminated when the metadata for the rename
2449          * operation has been written to disk, but the new file data has not.
2450          */
2451         if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
2452                 if (fsync(wim->out_fd.fd)) {
2453                         ERROR_WITH_ERRNO("Error syncing data to WIM file");
2454                         return WIMLIB_ERR_WRITE;
2455                 }
2456         }
2457
2458         if (close_wim_writable(wim, write_flags)) {
2459                 ERROR_WITH_ERRNO("Failed to close the output WIM file");
2460                 return WIMLIB_ERR_WRITE;
2461         }
2462
2463         return 0;
2464 }
2465
2466 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
2467
2468 /* Set advisory lock on WIM file (if not already done so)  */
2469 int
2470 lock_wim_for_append(WIMStruct *wim)
2471 {
2472         if (wim->locked_for_append)
2473                 return 0;
2474         if (!flock(wim->in_fd.fd, LOCK_EX | LOCK_NB)) {
2475                 wim->locked_for_append = 1;
2476                 return 0;
2477         }
2478         if (errno != EWOULDBLOCK)
2479                 return 0;
2480         return WIMLIB_ERR_ALREADY_LOCKED;
2481 }
2482
2483 /* Remove advisory lock on WIM file (if present)  */
2484 void
2485 unlock_wim_for_append(WIMStruct *wim)
2486 {
2487         if (wim->locked_for_append) {
2488                 flock(wim->in_fd.fd, LOCK_UN);
2489                 wim->locked_for_append = 0;
2490         }
2491 }
2492 #endif
2493
2494 /*
2495  * write_pipable_wim():
2496  *
2497  * Perform the intermediate stages of creating a "pipable" WIM (i.e. a WIM
2498  * capable of being applied from a pipe).
2499  *
2500  * Pipable WIMs are a wimlib-specific modification of the WIM format such that
2501  * images can be applied from them sequentially when the file data is sent over
2502  * a pipe.  In addition, a pipable WIM can be written sequentially to a pipe.
2503  * The modifications made to the WIM format for pipable WIMs are:
2504  *
2505  * - Magic characters in header are "WLPWM\0\0\0" (wimlib pipable WIM) instead
2506  *   of "MSWIM\0\0\0".  This lets wimlib know that the WIM is pipable and also
2507  *   stops other software from trying to read the file as a normal WIM.
2508  *
2509  * - The header at the beginning of the file does not contain all the normal
2510  *   information; in particular it will have all 0's for the blob table and XML
2511  *   data resource entries.  This is because this information cannot be
2512  *   determined until the blob table and XML data have been written.
2513  *   Consequently, wimlib will write the full header at the very end of the
2514  *   file.  The header at the end, however, is only used when reading the WIM
2515  *   from a seekable file (not a pipe).
2516  *
2517  * - An extra copy of the XML data is placed directly after the header.  This
2518  *   allows image names and sizes to be determined at an appropriate time when
2519  *   reading the WIM from a pipe.  This copy of the XML data is ignored if the
2520  *   WIM is read from a seekable file (not a pipe).
2521  *
2522  * - Solid resources are not allowed.  Each blob is always stored in its own
2523  *   resource.
2524  *
2525  * - The format of resources, or blobs, has been modified to allow them to be
2526  *   used before the "blob table" has been read.  Each blob is prefixed with a
2527  *   `struct pwm_blob_hdr' that is basically an abbreviated form of `struct
2528  *   blob_descriptor_disk' that only contains the SHA-1 message digest,
2529  *   uncompressed blob size, and flags that indicate whether the blob is
2530  *   compressed.  The data of uncompressed blobs then follows literally, while
2531  *   the data of compressed blobs follows in a modified format.  Compressed
2532  *   blobs do not begin with a chunk table, since the chunk table cannot be
2533  *   written until all chunks have been compressed.  Instead, each compressed
2534  *   chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size.
2535  *   Furthermore, the chunk table is written at the end of the resource instead
2536  *   of the start.  Note: chunk offsets are given in the chunk table as if the
2537  *   `struct pwm_chunk_hdr's were not present; also, the chunk table is only
2538  *   used if the WIM is being read from a seekable file (not a pipe).
2539  *
2540  * - Metadata blobs always come before non-metadata blobs.  (This does not by
2541  *   itself constitute an incompatibility with normal WIMs, since this is valid
2542  *   in normal WIMs.)
2543  *
2544  * - At least up to the end of the blobs, all components must be packed as
2545  *   tightly as possible; there cannot be any "holes" in the WIM.  (This does
2546  *   not by itself consititute an incompatibility with normal WIMs, since this
2547  *   is valid in normal WIMs.)
2548  *
2549  * Note: the blob table, XML data, and header at the end are not used when
2550  * applying from a pipe.  They exist to support functionality such as image
2551  * application and export when the WIM is *not* read from a pipe.
2552  *
2553  *   Layout of pipable WIM:
2554  *
2555  * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2556  * | Header | XML data | Metadata resources | File resources |  Blob table  | XML data  | Header |
2557  * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2558  *
2559  *   Layout of normal WIM:
2560  *
2561  * +--------+-----------------------------+-------------------------+
2562  * | Header | File and metadata resources |  Blob table  | XML data |
2563  * +--------+-----------------------------+-------------------------+
2564  *
2565  * An optional integrity table can follow the final XML data in both normal and
2566  * pipable WIMs.  However, due to implementation details, wimlib currently can
2567  * only include an integrity table in a pipable WIM when writing it to a
2568  * seekable file (not a pipe).
2569  *
2570  * Do note that since pipable WIMs are not supported by Microsoft's software,
2571  * wimlib does not create them unless explicitly requested (with
2572  * WIMLIB_WRITE_FLAG_PIPABLE) and as stated above they use different magic
2573  * characters to identify the file.
2574  */
2575 static int
2576 write_pipable_wim(WIMStruct *wim, int image, int write_flags,
2577                   unsigned num_threads,
2578                   struct list_head *blob_list_override,
2579                   struct list_head *blob_table_list_ret)
2580 {
2581         int ret;
2582         struct wim_reshdr xml_reshdr;
2583
2584         WARNING("Creating a pipable WIM, which will "
2585                 "be incompatible\n"
2586                 "          with Microsoft's software (WIMGAPI/ImageX/DISM).");
2587
2588         /* At this point, the header at the beginning of the file has already
2589          * been written.  */
2590
2591         /* For efficiency, when wimlib adds an image to the WIM with
2592          * wimlib_add_image(), the SHA-1 message digests of files are not
2593          * calculated; instead, they are calculated while the files are being
2594          * written.  However, this does not work when writing a pipable WIM,
2595          * since when writing a blob to a pipable WIM, its SHA-1 message digest
2596          * needs to be known before the blob data is written.  Therefore, before
2597          * getting much farther, we need to pre-calculate the SHA-1 message
2598          * digests of all blobs that will be written.  */
2599         ret = wim_checksum_unhashed_blobs(wim);
2600         if (ret)
2601                 return ret;
2602
2603         /* Write extra copy of the XML data.  */
2604         ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT,
2605                                  &xml_reshdr, WRITE_RESOURCE_FLAG_PIPABLE);
2606         if (ret)
2607                 return ret;
2608
2609         /* Write metadata resources for the image(s) being included in the
2610          * output WIM.  */
2611         ret = write_metadata_resources(wim, image, write_flags);
2612         if (ret)
2613                 return ret;
2614
2615         /* Write file data needed for the image(s) being included in the output
2616          * WIM, or file data needed for the split WIM part.  */
2617         return write_file_data(wim, image, write_flags,
2618                                num_threads, blob_list_override,
2619                                blob_table_list_ret);
2620
2621         /* The blob table, XML data, and header at end are handled by
2622          * finish_write().  */
2623 }
2624
2625 static bool
2626 should_default_to_solid_compression(WIMStruct *wim, int write_flags)
2627 {
2628         return wim->out_hdr.wim_version == WIM_VERSION_SOLID &&
2629                 !(write_flags & (WIMLIB_WRITE_FLAG_SOLID |
2630                                  WIMLIB_WRITE_FLAG_PIPABLE)) &&
2631                 wim_has_solid_resources(wim);
2632 }
2633
2634 /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file
2635  * descriptor.  */
2636 int
2637 write_wim_part(WIMStruct *wim,
2638                const void *path_or_fd,
2639                int image,
2640                int write_flags,
2641                unsigned num_threads,
2642                unsigned part_number,
2643                unsigned total_parts,
2644                struct list_head *blob_list_override,
2645                const u8 *guid)
2646 {
2647         int ret;
2648         struct list_head blob_table_list;
2649
2650         /* Internally, this is always called with a valid part number and total
2651          * parts.  */
2652         wimlib_assert(total_parts >= 1);
2653         wimlib_assert(part_number >= 1 && part_number <= total_parts);
2654
2655         /* A valid image (or all images) must be specified.  */
2656         if (image != WIMLIB_ALL_IMAGES &&
2657              (image < 1 || image > wim->hdr.image_count))
2658                 return WIMLIB_ERR_INVALID_IMAGE;
2659
2660         /* If we need to write metadata resources, make sure the ::WIMStruct has
2661          * the needed information attached (e.g. is not a resource-only WIM,
2662          * such as a non-first part of a split WIM).  */
2663         if (!wim_has_metadata(wim) &&
2664             !(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA))
2665                 return WIMLIB_ERR_METADATA_NOT_FOUND;
2666
2667         /* Check for contradictory flags.  */
2668         if ((write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2669                             WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2670                                 == (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2671                                     WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2672                 return WIMLIB_ERR_INVALID_PARAM;
2673
2674         if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2675                             WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2676                                 == (WIMLIB_WRITE_FLAG_PIPABLE |
2677                                     WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2678                 return WIMLIB_ERR_INVALID_PARAM;
2679
2680         /* Include an integrity table by default if no preference was given and
2681          * the WIM already had an integrity table.  */
2682         if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2683                              WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))) {
2684                 if (wim_has_integrity_table(wim))
2685                         write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2686         }
2687
2688         /* Write a pipable WIM by default if no preference was given and the WIM
2689          * was already pipable.  */
2690         if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2691                              WIMLIB_WRITE_FLAG_NOT_PIPABLE))) {
2692                 if (wim_is_pipable(wim))
2693                         write_flags |= WIMLIB_WRITE_FLAG_PIPABLE;
2694         }
2695
2696         if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2697                             WIMLIB_WRITE_FLAG_SOLID))
2698                                     == (WIMLIB_WRITE_FLAG_PIPABLE |
2699                                         WIMLIB_WRITE_FLAG_SOLID))
2700         {
2701                 ERROR("Solid compression is unsupported in pipable WIMs");
2702                 return WIMLIB_ERR_INVALID_PARAM;
2703         }
2704
2705         /* Start initializing the new file header.  */
2706         memset(&wim->out_hdr, 0, sizeof(wim->out_hdr));
2707
2708         /* Set the magic number.  */
2709         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2710                 wim->out_hdr.magic = PWM_MAGIC;
2711         else
2712                 wim->out_hdr.magic = WIM_MAGIC;
2713
2714         /* Set the version number.  */
2715         if ((write_flags & WIMLIB_WRITE_FLAG_SOLID) ||
2716             wim->out_compression_type == WIMLIB_COMPRESSION_TYPE_LZMS)
2717                 wim->out_hdr.wim_version = WIM_VERSION_SOLID;
2718         else
2719                 wim->out_hdr.wim_version = WIM_VERSION_DEFAULT;
2720
2721         /* Default to solid compression if it is valid in the chosen WIM file
2722          * format and the WIMStruct references any solid resources.  This is
2723          * useful when exporting an image from a solid WIM.  */
2724         if (should_default_to_solid_compression(wim, write_flags))
2725                 write_flags |= WIMLIB_WRITE_FLAG_SOLID;
2726
2727         /* Set the header flags.  */
2728         wim->out_hdr.flags = (wim->hdr.flags & (WIM_HDR_FLAG_RP_FIX |
2729                                                 WIM_HDR_FLAG_READONLY));
2730         if (total_parts != 1)
2731                 wim->out_hdr.flags |= WIM_HDR_FLAG_SPANNED;
2732         if (wim->out_compression_type != WIMLIB_COMPRESSION_TYPE_NONE) {
2733                 wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESSION;
2734                 switch (wim->out_compression_type) {
2735                 case WIMLIB_COMPRESSION_TYPE_XPRESS:
2736                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_XPRESS;
2737                         break;
2738                 case WIMLIB_COMPRESSION_TYPE_LZX:
2739                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_LZX;
2740                         break;
2741                 case WIMLIB_COMPRESSION_TYPE_LZMS:
2742                         wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_LZMS;
2743                         break;
2744                 }
2745         }
2746
2747         /* Set the chunk size.  */
2748         wim->out_hdr.chunk_size = wim->out_chunk_size;
2749
2750         /* Set the GUID.  */
2751         if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)
2752                 guid = wim->hdr.guid;
2753         if (guid)
2754                 memcpy(wim->out_hdr.guid, guid, WIMLIB_GUID_LEN);
2755         else
2756                 randomize_byte_array(wim->out_hdr.guid, WIMLIB_GUID_LEN);
2757
2758         /* Set the part number and total parts.  */
2759         wim->out_hdr.part_number = part_number;
2760         wim->out_hdr.total_parts = total_parts;
2761
2762         /* Set the image count.  */
2763         if (image == WIMLIB_ALL_IMAGES)
2764                 wim->out_hdr.image_count = wim->hdr.image_count;
2765         else
2766                 wim->out_hdr.image_count = 1;
2767
2768         /* Set the boot index.  */
2769         wim->out_hdr.boot_idx = 0;
2770         if (total_parts == 1) {
2771                 if (image == WIMLIB_ALL_IMAGES)
2772                         wim->out_hdr.boot_idx = wim->hdr.boot_idx;
2773                 else if (image == wim->hdr.boot_idx)
2774                         wim->out_hdr.boot_idx = 1;
2775         }
2776
2777         /* Set up the output file descriptor.  */
2778         if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) {
2779                 /* File descriptor was explicitly provided.  */
2780                 filedes_init(&wim->out_fd, *(const int *)path_or_fd);
2781                 if (!filedes_is_seekable(&wim->out_fd)) {
2782                         /* The file descriptor is a pipe.  */
2783                         ret = WIMLIB_ERR_INVALID_PARAM;
2784                         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2785                                 goto out_cleanup;
2786                         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2787                                 ERROR("Can't include integrity check when "
2788                                       "writing pipable WIM to pipe!");
2789                                 goto out_cleanup;
2790                         }
2791                 }
2792         } else {
2793                 /* Filename of WIM to write was provided; open file descriptor
2794                  * to it.  */
2795                 ret = open_wim_writable(wim, (const tchar*)path_or_fd,
2796                                         O_TRUNC | O_CREAT | O_RDWR);
2797                 if (ret)
2798                         goto out_cleanup;
2799         }
2800
2801         /* Write initial header.  This is merely a "dummy" header since it
2802          * doesn't have resource entries filled in yet, so it will be
2803          * overwritten later (unless writing a pipable WIM).  */
2804         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2805                 wim->out_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2806         ret = write_wim_header(&wim->out_hdr, &wim->out_fd, wim->out_fd.offset);
2807         wim->out_hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2808         if (ret)
2809                 goto out_cleanup;
2810
2811         /* Write file data and metadata resources.  */
2812         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) {
2813                 /* Default case: create a normal (non-pipable) WIM.  */
2814                 ret = write_file_data(wim, image, write_flags,
2815                                       num_threads,
2816                                       blob_list_override,
2817                                       &blob_table_list);
2818                 if (ret)
2819                         goto out_cleanup;
2820
2821                 ret = write_metadata_resources(wim, image, write_flags);
2822                 if (ret)
2823                         goto out_cleanup;
2824         } else {
2825                 /* Non-default case: create pipable WIM.  */
2826                 ret = write_pipable_wim(wim, image, write_flags, num_threads,
2827                                         blob_list_override,
2828                                         &blob_table_list);
2829                 if (ret)
2830                         goto out_cleanup;
2831         }
2832
2833         /* Write blob table, XML data, and (optional) integrity table.  */
2834         ret = finish_write(wim, image, write_flags, &blob_table_list);
2835 out_cleanup:
2836         (void)close_wim_writable(wim, write_flags);
2837         return ret;
2838 }
2839
2840 /* Write a standalone WIM to a file or file descriptor.  */
2841 static int
2842 write_standalone_wim(WIMStruct *wim, const void *path_or_fd,
2843                      int image, int write_flags, unsigned num_threads)
2844 {
2845         return write_wim_part(wim, path_or_fd, image, write_flags,
2846                               num_threads, 1, 1, NULL, NULL);
2847 }
2848
2849 /* API function documented in wimlib.h  */
2850 WIMLIBAPI int
2851 wimlib_write(WIMStruct *wim, const tchar *path,
2852              int image, int write_flags, unsigned num_threads)
2853 {
2854         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
2855                 return WIMLIB_ERR_INVALID_PARAM;
2856
2857         if (path == NULL || path[0] == T('\0'))
2858                 return WIMLIB_ERR_INVALID_PARAM;
2859
2860         return write_standalone_wim(wim, path, image, write_flags, num_threads);
2861 }
2862
2863 /* API function documented in wimlib.h  */
2864 WIMLIBAPI int
2865 wimlib_write_to_fd(WIMStruct *wim, int fd,
2866                    int image, int write_flags, unsigned num_threads)
2867 {
2868         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
2869                 return WIMLIB_ERR_INVALID_PARAM;
2870
2871         if (fd < 0)
2872                 return WIMLIB_ERR_INVALID_PARAM;
2873
2874         write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR;
2875
2876         return write_standalone_wim(wim, &fd, image, write_flags, num_threads);
2877 }
2878
2879 static bool
2880 any_images_modified(WIMStruct *wim)
2881 {
2882         for (int i = 0; i < wim->hdr.image_count; i++)
2883                 if (wim->image_metadata[i]->modified)
2884                         return true;
2885         return false;
2886 }
2887
2888 static int
2889 check_resource_offset(struct blob_descriptor *blob, void *_wim)
2890 {
2891         const WIMStruct *wim = _wim;
2892         off_t end_offset = *(const off_t*)wim->private;
2893
2894         if (blob->blob_location == BLOB_IN_WIM &&
2895             blob->rdesc->wim == wim &&
2896             blob->rdesc->offset_in_wim + blob->rdesc->size_in_wim > end_offset)
2897                 return WIMLIB_ERR_RESOURCE_ORDER;
2898         return 0;
2899 }
2900
2901 /* Make sure no file or metadata resources are located after the XML data (or
2902  * integrity table if present)--- otherwise we can't safely overwrite the WIM in
2903  * place and we return WIMLIB_ERR_RESOURCE_ORDER.  */
2904 static int
2905 check_resource_offsets(WIMStruct *wim, off_t end_offset)
2906 {
2907         int ret;
2908         unsigned i;
2909
2910         wim->private = &end_offset;
2911         ret = for_blob_in_table(wim->blob_table, check_resource_offset, wim);
2912         if (ret)
2913                 return ret;
2914
2915         for (i = 0; i < wim->hdr.image_count; i++) {
2916                 ret = check_resource_offset(wim->image_metadata[i]->metadata_blob, wim);
2917                 if (ret)
2918                         return ret;
2919         }
2920         return 0;
2921 }
2922
2923 /*
2924  * Overwrite a WIM, possibly appending new resources to it.
2925  *
2926  * A WIM looks like (or is supposed to look like) the following:
2927  *
2928  *                   Header (212 bytes)
2929  *                   Resources for metadata and files (variable size)
2930  *                   Blob table (variable size)
2931  *                   XML data (variable size)
2932  *                   Integrity table (optional) (variable size)
2933  *
2934  * If we are not adding any new files or metadata, then the blob table is
2935  * unchanged--- so we only need to overwrite the XML data, integrity table, and
2936  * header.  This operation is potentially unsafe if the program is abruptly
2937  * terminated while the XML data or integrity table are being overwritten, but
2938  * before the new header has been written.  To partially alleviate this problem,
2939  * we write a temporary header after the XML data has been written.  This may
2940  * prevent the WIM from becoming corrupted if the program is terminated while
2941  * the integrity table is being calculated (but no guarantees, due to write
2942  * re-ordering...).
2943  *
2944  * If we are adding new blobs, including new file data as well as any metadata
2945  * for any new images, then the blob table needs to be changed, and those blobs
2946  * need to be written.  In this case, we try to perform a safe update of the WIM
2947  * file by writing the blobs *after* the end of the previous WIM, then writing
2948  * the new blob table, XML data, and (optionally) integrity table following the
2949  * new blobs.  This will produce a layout like the following:
2950  *
2951  *                   Header (212 bytes)
2952  *                   (OLD) Resources for metadata and files (variable size)
2953  *                   (OLD) Blob table (variable size)
2954  *                   (OLD) XML data (variable size)
2955  *                   (OLD) Integrity table (optional) (variable size)
2956  *                   (NEW) Resources for metadata and files (variable size)
2957  *                   (NEW) Blob table (variable size)
2958  *                   (NEW) XML data (variable size)
2959  *                   (NEW) Integrity table (optional) (variable size)
2960  *
2961  * At all points, the WIM is valid as nothing points to the new data yet.  Then,
2962  * the header is overwritten to point to the new blob table, XML data, and
2963  * integrity table, to produce the following layout:
2964  *
2965  *                   Header (212 bytes)
2966  *                   Resources for metadata and files (variable size)
2967  *                   Nothing (variable size)
2968  *                   Resources for metadata and files (variable size)
2969  *                   Blob table (variable size)
2970  *                   XML data (variable size)
2971  *                   Integrity table (optional) (variable size)
2972  *
2973  * This method allows an image to be appended to a large WIM very quickly, and
2974  * is crash-safe except in the case of write re-ordering, but the disadvantage
2975  * is that a small hole is left in the WIM where the old blob table, xml data,
2976  * and integrity table were.  (These usually only take up a small amount of
2977  * space compared to the blobs, however.)
2978  */
2979 static int
2980 overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads)
2981 {
2982         int ret;
2983         off_t old_wim_end;
2984         u64 old_blob_table_end, old_xml_begin, old_xml_end;
2985         struct list_head blob_list;
2986         struct list_head blob_table_list;
2987         struct filter_context filter_ctx;
2988
2989         /* Include an integrity table by default if no preference was given and
2990          * the WIM already had an integrity table.  */
2991         if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2992                              WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2993                 if (wim_has_integrity_table(wim))
2994                         write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2995
2996         /* Start preparing the updated file header.  */
2997         memcpy(&wim->out_hdr, &wim->hdr, sizeof(wim->out_hdr));
2998
2999         /* If using solid compression, the version number must be set to
3000          * WIM_VERSION_SOLID.  */
3001         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
3002                 wim->out_hdr.wim_version = WIM_VERSION_SOLID;
3003
3004         /* Default to solid compression if it is valid in the chosen WIM file
3005          * format and the WIMStruct references any solid resources.  This is
3006          * useful when updating a solid WIM.  */
3007         if (should_default_to_solid_compression(wim, write_flags))
3008                 write_flags |= WIMLIB_WRITE_FLAG_SOLID;
3009
3010         /* Set additional flags for overwrite.  */
3011         write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
3012                        WIMLIB_WRITE_FLAG_STREAMS_OK;
3013
3014         /* Make sure there is no data after the XML data, except possibily an
3015          * integrity table.  If this were the case, then this data would be
3016          * overwritten.  */
3017         old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
3018         old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
3019         old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
3020                              wim->hdr.blob_table_reshdr.size_in_wim;
3021         if (wim_has_integrity_table(wim) &&
3022             wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
3023                 WARNING("Didn't expect the integrity table to be before the XML data");
3024                 ret = WIMLIB_ERR_RESOURCE_ORDER;
3025                 goto out;
3026         }
3027
3028         if (old_blob_table_end > old_xml_begin) {
3029                 WARNING("Didn't expect the blob table to be after the XML data");
3030                 ret = WIMLIB_ERR_RESOURCE_ORDER;
3031                 goto out;
3032         }
3033
3034         /* Set @old_wim_end, which indicates the point beyond which we don't
3035          * allow any file and metadata resources to appear without returning
3036          * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
3037          * overwrite these resources). */
3038         if (!wim->image_deletion_occurred && !any_images_modified(wim)) {
3039                 /* If no images have been modified and no images have been
3040                  * deleted, a new blob table does not need to be written.  We
3041                  * shall write the new XML data and optional integrity table
3042                  * immediately after the blob table.  Note that this may
3043                  * overwrite an existing integrity table. */
3044                 old_wim_end = old_blob_table_end;
3045                 write_flags |= WIMLIB_WRITE_FLAG_NO_NEW_BLOBS;
3046         } else if (wim_has_integrity_table(wim)) {
3047                 /* Old WIM has an integrity table; begin writing new blobs after
3048                  * it. */
3049                 old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
3050                               wim->hdr.integrity_table_reshdr.size_in_wim;
3051         } else {
3052                 /* No existing integrity table; begin writing new blobs after
3053                  * the old XML data. */
3054                 old_wim_end = old_xml_end;
3055         }
3056
3057         ret = check_resource_offsets(wim, old_wim_end);
3058         if (ret)
3059                 goto out;
3060
3061         ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3062                                           &blob_list, &blob_table_list,
3063                                           &filter_ctx);
3064         if (ret)
3065                 goto out;
3066
3067         if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)
3068                 wimlib_assert(list_empty(&blob_list));
3069
3070         ret = open_wim_writable(wim, wim->filename, O_RDWR);
3071         if (ret)
3072                 goto out;
3073
3074         ret = lock_wim_for_append(wim);
3075         if (ret)
3076                 goto out_close_wim;
3077
3078         /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */
3079         wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3080         ret = write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3081         wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3082         if (ret) {
3083                 ERROR_WITH_ERRNO("Error updating WIM header flags");
3084                 goto out_unlock_wim;
3085         }
3086
3087         if (filedes_seek(&wim->out_fd, old_wim_end) == -1) {
3088                 ERROR_WITH_ERRNO("Can't seek to end of WIM");
3089                 ret = WIMLIB_ERR_WRITE;
3090                 goto out_restore_hdr;
3091         }
3092
3093         ret = write_file_data_blobs(wim, &blob_list, write_flags,
3094                                     num_threads, &filter_ctx);
3095         if (ret)
3096                 goto out_truncate;
3097
3098         ret = write_metadata_resources(wim, WIMLIB_ALL_IMAGES, write_flags);
3099         if (ret)
3100                 goto out_truncate;
3101
3102         ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3103                            &blob_table_list);
3104         if (ret)
3105                 goto out_truncate;
3106
3107         unlock_wim_for_append(wim);
3108         return 0;
3109
3110 out_truncate:
3111         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) {
3112                 WARNING("Truncating \"%"TS"\" to its original size "
3113                         "(%"PRIu64" bytes)", wim->filename, old_wim_end);
3114                 /* Return value of ftruncate() is ignored because this is
3115                  * already an error path.  */
3116                 (void)ftruncate(wim->out_fd.fd, old_wim_end);
3117         }
3118 out_restore_hdr:
3119         (void)write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3120 out_unlock_wim:
3121         unlock_wim_for_append(wim);
3122 out_close_wim:
3123         (void)close_wim_writable(wim, write_flags);
3124 out:
3125         return ret;
3126 }
3127
3128 static int
3129 overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, unsigned num_threads)
3130 {
3131         size_t wim_name_len;
3132         int ret;
3133
3134         /* Write the WIM to a temporary file in the same directory as the
3135          * original WIM. */
3136         wim_name_len = tstrlen(wim->filename);
3137         tchar tmpfile[wim_name_len + 10];
3138         tmemcpy(tmpfile, wim->filename, wim_name_len);
3139         randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
3140         tmpfile[wim_name_len + 9] = T('\0');
3141
3142         ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
3143                            write_flags |
3144                                 WIMLIB_WRITE_FLAG_FSYNC |
3145                                 WIMLIB_WRITE_FLAG_RETAIN_GUID,
3146                            num_threads);
3147         if (ret) {
3148                 tunlink(tmpfile);
3149                 return ret;
3150         }
3151
3152         if (filedes_valid(&wim->in_fd)) {
3153                 filedes_close(&wim->in_fd);
3154                 filedes_invalidate(&wim->in_fd);
3155         }
3156
3157         /* Rename the new WIM file to the original WIM file.  Note: on Windows
3158          * this actually calls win32_rename_replacement(), not _wrename(), so
3159          * that removing the existing destination file can be handled.  */
3160         ret = trename(tmpfile, wim->filename);
3161         if (ret) {
3162                 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
3163                                  tmpfile, wim->filename);
3164         #ifdef __WIN32__
3165                 if (ret < 0)
3166         #endif
3167                 {
3168                         tunlink(tmpfile);
3169                 }
3170                 return WIMLIB_ERR_RENAME;
3171         }
3172
3173         union wimlib_progress_info progress;
3174         progress.rename.from = tmpfile;
3175         progress.rename.to = wim->filename;
3176         return call_progress(wim->progfunc, WIMLIB_PROGRESS_MSG_RENAME,
3177                              &progress, wim->progctx);
3178 }
3179
3180 /* Determine if the specified WIM file may be updated by appending in-place
3181  * rather than writing and replacing it with an entirely new file.  */
3182 static bool
3183 can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags)
3184 {
3185         /* REBUILD flag forces full rebuild.  */
3186         if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
3187                 return false;
3188
3189         /* Image deletions cause full rebuild by default.  */
3190         if (wim->image_deletion_occurred &&
3191             !(write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
3192                 return false;
3193
3194         /* Pipable WIMs cannot be updated in place, nor can a non-pipable WIM be
3195          * turned into a pipable WIM in-place.  */
3196         if (wim_is_pipable(wim) || (write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
3197                 return false;
3198
3199         /* The default compression type and compression chunk size selected for
3200          * the output WIM must be the same as those currently used for the WIM.
3201          */
3202         if (wim->compression_type != wim->out_compression_type)
3203                 return false;
3204         if (wim->chunk_size != wim->out_chunk_size)
3205                 return false;
3206
3207         return true;
3208 }
3209
3210 /* API function documented in wimlib.h  */
3211 WIMLIBAPI int
3212 wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads)
3213 {
3214         int ret;
3215         u32 orig_hdr_flags;
3216
3217         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3218                 return WIMLIB_ERR_INVALID_PARAM;
3219
3220         if (!wim->filename)
3221                 return WIMLIB_ERR_NO_FILENAME;
3222
3223         orig_hdr_flags = wim->hdr.flags;
3224         if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
3225                 wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
3226         ret = can_modify_wim(wim);
3227         wim->hdr.flags = orig_hdr_flags;
3228         if (ret)
3229                 return ret;
3230
3231         if (can_overwrite_wim_inplace(wim, write_flags)) {
3232                 ret = overwrite_wim_inplace(wim, write_flags, num_threads);
3233                 if (ret != WIMLIB_ERR_RESOURCE_ORDER)
3234                         return ret;
3235                 WARNING("Falling back to re-building entire WIM");
3236         }
3237         return overwrite_wim_via_tmpfile(wim, write_flags, num_threads);
3238 }