]> wimlib.net Git - wimlib/blob - src/write.c
75a6fcf2e454a0785248ea800c985fe9ea60faa2
[wimlib] / src / write.c
1 /*
2  * write.c
3  *
4  * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5  * compressed file resources, etc.
6  */
7
8 /*
9  * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
10  *
11  * This file is free software; you can redistribute it and/or modify it under
12  * the terms of the GNU Lesser General Public License as published by the Free
13  * Software Foundation; either version 3 of the License, or (at your option) any
14  * later version.
15  *
16  * This file is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
19  * details.
20  *
21  * You should have received a copy of the GNU Lesser General Public License
22  * along with this file; if not, see http://www.gnu.org/licenses/.
23  */
24
25 #ifdef HAVE_CONFIG_H
26 #  include "config.h"
27 #endif
28
29 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
30 /* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
31  * overwrite the LIST_HEAD macro. */
32 #  include <sys/file.h>
33 #endif
34
35 #include <errno.h>
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39
40 #include "wimlib/alloca.h"
41 #include "wimlib/assert.h"
42 #include "wimlib/blob_table.h"
43 #include "wimlib/chunk_compressor.h"
44 #include "wimlib/endianness.h"
45 #include "wimlib/error.h"
46 #include "wimlib/file_io.h"
47 #include "wimlib/header.h"
48 #include "wimlib/inode.h"
49 #include "wimlib/integrity.h"
50 #include "wimlib/metadata.h"
51 #include "wimlib/paths.h"
52 #include "wimlib/progress.h"
53 #include "wimlib/resource.h"
54 #include "wimlib/solid.h"
55 #ifdef __WIN32__
56 #  include "wimlib/win32.h" /* win32_rename_replacement() */
57 #endif
58 #include "wimlib/write.h"
59 #include "wimlib/xml.h"
60
61
62 /* wimlib internal flags used when writing resources.  */
63 #define WRITE_RESOURCE_FLAG_RECOMPRESS          0x00000001
64 #define WRITE_RESOURCE_FLAG_PIPABLE             0x00000002
65 #define WRITE_RESOURCE_FLAG_SOLID               0x00000004
66 #define WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE 0x00000008
67 #define WRITE_RESOURCE_FLAG_SOLID_SORT          0x00000010
68
69 static inline int
70 write_flags_to_resource_flags(int write_flags)
71 {
72         int write_resource_flags = 0;
73
74         if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
75                 write_resource_flags |= WRITE_RESOURCE_FLAG_RECOMPRESS;
76         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
77                 write_resource_flags |= WRITE_RESOURCE_FLAG_PIPABLE;
78         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
79                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
80         if (write_flags & WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES)
81                 write_resource_flags |= WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE;
82         if ((write_flags & (WIMLIB_WRITE_FLAG_SOLID |
83                             WIMLIB_WRITE_FLAG_NO_SOLID_SORT)) ==
84             WIMLIB_WRITE_FLAG_SOLID)
85                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID_SORT;
86         return write_resource_flags;
87 }
88
89 struct filter_context {
90         int write_flags;
91         WIMStruct *wim;
92 };
93
94 /*
95  * Determine whether the specified blob should be filtered out from the write.
96  *
97  * Return values:
98  *
99  *  < 0 : The blob should be hard-filtered; that is, not included in the output
100  *        WIM file at all.
101  *    0 : The blob should not be filtered out.
102  *  > 0 : The blob should be soft-filtered; that is, it already exists in the
103  *        WIM file and may not need to be written again.
104  */
105 static int
106 blob_filtered(const struct blob_descriptor *blob,
107               const struct filter_context *ctx)
108 {
109         int write_flags;
110         WIMStruct *wim;
111
112         if (ctx == NULL)
113                 return 0;
114
115         write_flags = ctx->write_flags;
116         wim = ctx->wim;
117
118         if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE &&
119             blob->blob_location == BLOB_IN_WIM &&
120             blob->rdesc->wim == wim)
121                 return 1;
122
123         if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS &&
124             blob->blob_location == BLOB_IN_WIM &&
125             blob->rdesc->wim != wim)
126                 return -1;
127
128         return 0;
129 }
130
131 static bool
132 blob_hard_filtered(const struct blob_descriptor *blob,
133                    struct filter_context *ctx)
134 {
135         return blob_filtered(blob, ctx) < 0;
136 }
137
138 static inline int
139 may_soft_filter_blobs(const struct filter_context *ctx)
140 {
141         if (ctx == NULL)
142                 return 0;
143         return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE;
144 }
145
146 static inline int
147 may_hard_filter_blobs(const struct filter_context *ctx)
148 {
149         if (ctx == NULL)
150                 return 0;
151         return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS;
152 }
153
154 static inline int
155 may_filter_blobs(const struct filter_context *ctx)
156 {
157         return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx));
158 }
159
160 /* Return true if the specified resource is compressed and the compressed data
161  * can be reused with the specified output parameters.  */
162 static bool
163 can_raw_copy(const struct blob_descriptor *blob,
164              int write_resource_flags, int out_ctype, u32 out_chunk_size)
165 {
166         const struct wim_resource_descriptor *rdesc;
167
168         if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS)
169                 return false;
170
171         if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
172                 return false;
173
174         if (blob->blob_location != BLOB_IN_WIM)
175                 return false;
176
177         rdesc = blob->rdesc;
178
179         if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
180                 return false;
181
182         if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
183                 /* Normal compressed resource: Must use same compression type
184                  * and chunk size.  */
185                 return (rdesc->compression_type == out_ctype &&
186                         rdesc->chunk_size == out_chunk_size);
187         }
188
189         if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) &&
190             (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
191         {
192                 /* Solid resource: Such resources may contain multiple blobs,
193                  * and in general only a subset of them need to be written.  As
194                  * a heuristic, re-use the raw data if more than two-thirds the
195                  * uncompressed size is being written.  */
196
197                 /* Note: solid resources contain a header that specifies the
198                  * compression type and chunk size; therefore we don't need to
199                  * check if they are compatible with @out_ctype and
200                  * @out_chunk_size.  */
201
202                 struct blob_descriptor *res_blob;
203                 u64 write_size = 0;
204
205                 list_for_each_entry(res_blob, &rdesc->blob_list, rdesc_node)
206                         if (res_blob->will_be_in_output_wim)
207                                 write_size += res_blob->size;
208
209                 return (write_size > rdesc->uncompressed_size * 2 / 3);
210         }
211
212         return false;
213 }
214
215 static u32
216 reshdr_flags_for_blob(const struct blob_descriptor *blob)
217 {
218         u32 reshdr_flags = 0;
219         if (blob->is_metadata)
220                 reshdr_flags |= WIM_RESHDR_FLAG_METADATA;
221         return reshdr_flags;
222 }
223
224 static void
225 blob_set_out_reshdr_for_reuse(struct blob_descriptor *blob)
226 {
227         const struct wim_resource_descriptor *rdesc;
228
229         wimlib_assert(blob->blob_location == BLOB_IN_WIM);
230         rdesc = blob->rdesc;
231
232         if (rdesc->flags & WIM_RESHDR_FLAG_SOLID) {
233                 blob->out_reshdr.offset_in_wim = blob->offset_in_res;
234                 blob->out_reshdr.uncompressed_size = 0;
235                 blob->out_reshdr.size_in_wim = blob->size;
236
237                 blob->out_res_offset_in_wim = rdesc->offset_in_wim;
238                 blob->out_res_size_in_wim = rdesc->size_in_wim;
239                 blob->out_res_uncompressed_size = rdesc->uncompressed_size;
240         } else {
241                 blob->out_reshdr.offset_in_wim = rdesc->offset_in_wim;
242                 blob->out_reshdr.uncompressed_size = rdesc->uncompressed_size;
243                 blob->out_reshdr.size_in_wim = rdesc->size_in_wim;
244         }
245         blob->out_reshdr.flags = rdesc->flags;
246 }
247
248
249 /* Write the header for a blob in a pipable WIM.  */
250 static int
251 write_pwm_blob_header(const struct blob_descriptor *blob,
252                       struct filedes *out_fd, bool compressed)
253 {
254         struct pwm_blob_hdr blob_hdr;
255         u32 reshdr_flags;
256         int ret;
257
258         wimlib_assert(!blob->unhashed);
259
260         blob_hdr.magic = cpu_to_le64(PWM_BLOB_MAGIC);
261         blob_hdr.uncompressed_size = cpu_to_le64(blob->size);
262         copy_hash(blob_hdr.hash, blob->hash);
263         reshdr_flags = reshdr_flags_for_blob(blob);
264         if (compressed)
265                 reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
266         blob_hdr.flags = cpu_to_le32(reshdr_flags);
267         ret = full_write(out_fd, &blob_hdr, sizeof(blob_hdr));
268         if (ret)
269                 ERROR_WITH_ERRNO("Write error");
270         return ret;
271 }
272
273 struct write_blobs_progress_data {
274         wimlib_progress_func_t progfunc;
275         void *progctx;
276         union wimlib_progress_info progress;
277         uint64_t next_progress;
278 };
279
280 static int
281 do_write_blobs_progress(struct write_blobs_progress_data *progress_data,
282                         u64 complete_size, u32 complete_count, bool discarded)
283 {
284         union wimlib_progress_info *progress = &progress_data->progress;
285         int ret;
286
287         if (discarded) {
288                 progress->write_streams.total_bytes -= complete_size;
289                 progress->write_streams.total_streams -= complete_count;
290                 if (progress_data->next_progress != ~(uint64_t)0 &&
291                     progress_data->next_progress > progress->write_streams.total_bytes)
292                 {
293                         progress_data->next_progress = progress->write_streams.total_bytes;
294                 }
295         } else {
296                 progress->write_streams.completed_bytes += complete_size;
297                 progress->write_streams.completed_streams += complete_count;
298         }
299
300         if (progress->write_streams.completed_bytes >= progress_data->next_progress)
301         {
302                 ret = call_progress(progress_data->progfunc,
303                                     WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
304                                     progress,
305                                     progress_data->progctx);
306                 if (ret)
307                         return ret;
308
309                 if (progress_data->next_progress == progress->write_streams.total_bytes) {
310                         progress_data->next_progress = ~(uint64_t)0;
311                 } else {
312                         /* Handle rate-limiting of messages  */
313
314                         /* Send new message as soon as another 1/128 of the
315                          * total has been written.  (Arbitrary number.)  */
316                         progress_data->next_progress =
317                                 progress->write_streams.completed_bytes +
318                                         progress->write_streams.total_bytes / 128;
319
320                         /* ... Unless that would be more than 5000000 bytes, in
321                          * which case send the next after the next 5000000
322                          * bytes.  (Another arbitrary number.)  */
323                         if (progress->write_streams.completed_bytes + 5000000 <
324                             progress_data->next_progress)
325                                 progress_data->next_progress =
326                                         progress->write_streams.completed_bytes + 5000000;
327
328                         /* ... But always send a message as soon as we're
329                          * completely done.  */
330                         if (progress->write_streams.total_bytes <
331                             progress_data->next_progress)
332                                 progress_data->next_progress =
333                                         progress->write_streams.total_bytes;
334                 }
335         }
336         return 0;
337 }
338
339 struct write_blobs_ctx {
340         /* File descriptor to which the blobs are being written.  */
341         struct filedes *out_fd;
342
343         /* Blob table for the WIMStruct on whose behalf the blobs are being
344          * written.  */
345         struct blob_table *blob_table;
346
347         /* Compression format to use.  */
348         int out_ctype;
349
350         /* Maximum uncompressed chunk size in compressed resources to use.  */
351         u32 out_chunk_size;
352
353         /* Flags that affect how the blobs will be written.  */
354         int write_resource_flags;
355
356         /* Data used for issuing WRITE_STREAMS progress.  */
357         struct write_blobs_progress_data progress_data;
358
359         struct filter_context *filter_ctx;
360
361         /* Upper bound on the total number of bytes that need to be compressed.
362          * */
363         u64 num_bytes_to_compress;
364
365         /* Pointer to the chunk_compressor implementation being used for
366          * compressing chunks of data, or NULL if chunks are being written
367          * uncompressed.  */
368         struct chunk_compressor *compressor;
369
370         /* A buffer of size @out_chunk_size that has been loaned out from the
371          * chunk compressor and is currently being filled with the uncompressed
372          * data of the next chunk.  */
373         u8 *cur_chunk_buf;
374
375         /* Number of bytes in @cur_chunk_buf that are currently filled.  */
376         size_t cur_chunk_buf_filled;
377
378         /* List of blobs that currently have chunks being compressed.  */
379         struct list_head blobs_being_compressed;
380
381         /* List of blobs in the solid resource.  Blobs are moved here after
382          * @blobs_being_compressed only when writing a solid resource.  */
383         struct list_head blobs_in_solid_resource;
384
385         /* Current uncompressed offset in the blob being read.  */
386         u64 cur_read_blob_offset;
387
388         /* Uncompressed size of the blob currently being read.  */
389         u64 cur_read_blob_size;
390
391         /* Current uncompressed offset in the blob being written.  */
392         u64 cur_write_blob_offset;
393
394         /* Uncompressed size of resource currently being written.  */
395         u64 cur_write_res_size;
396
397         /* Array that is filled in with compressed chunk sizes as a resource is
398          * being written.  */
399         u64 *chunk_csizes;
400
401         /* Index of next entry in @chunk_csizes to fill in.  */
402         size_t chunk_index;
403
404         /* Number of entries in @chunk_csizes currently allocated.  */
405         size_t num_alloc_chunks;
406
407         /* Offset in the output file of the start of the chunks of the resource
408          * currently being written.  */
409         u64 chunks_start_offset;
410 };
411
412 /* Reserve space for the chunk table and prepare to accumulate the chunk table
413  * in memory.  */
414 static int
415 begin_chunk_table(struct write_blobs_ctx *ctx, u64 res_expected_size)
416 {
417         u64 expected_num_chunks;
418         u64 expected_num_chunk_entries;
419         size_t reserve_size;
420         int ret;
421
422         /* Calculate the number of chunks and chunk entries that should be
423          * needed for the resource.  These normally will be the final values,
424          * but in SOLID mode some of the blobs we're planning to write into the
425          * resource may be duplicates, and therefore discarded, potentially
426          * decreasing the number of chunk entries needed.  */
427         expected_num_chunks = DIV_ROUND_UP(res_expected_size, ctx->out_chunk_size);
428         expected_num_chunk_entries = expected_num_chunks;
429         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
430                 expected_num_chunk_entries--;
431
432         /* Make sure the chunk_csizes array is long enough to store the
433          * compressed size of each chunk.  */
434         if (expected_num_chunks > ctx->num_alloc_chunks) {
435                 u64 new_length = expected_num_chunks + 50;
436
437                 if ((size_t)new_length != new_length) {
438                         ERROR("Resource size too large (%"PRIu64" bytes!",
439                               res_expected_size);
440                         return WIMLIB_ERR_NOMEM;
441                 }
442
443                 FREE(ctx->chunk_csizes);
444                 ctx->chunk_csizes = MALLOC(new_length * sizeof(ctx->chunk_csizes[0]));
445                 if (ctx->chunk_csizes == NULL) {
446                         ctx->num_alloc_chunks = 0;
447                         return WIMLIB_ERR_NOMEM;
448                 }
449                 ctx->num_alloc_chunks = new_length;
450         }
451
452         ctx->chunk_index = 0;
453
454         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) {
455                 /* Reserve space for the chunk table in the output file.  In the
456                  * case of solid resources this reserves the upper bound for the
457                  * needed space, not necessarily the exact space which will
458                  * prove to be needed.  At this point, we just use @chunk_csizes
459                  * for a buffer of 0's because the actual compressed chunk sizes
460                  * are unknown.  */
461                 reserve_size = expected_num_chunk_entries *
462                                get_chunk_entry_size(res_expected_size,
463                                                     0 != (ctx->write_resource_flags &
464                                                           WRITE_RESOURCE_FLAG_SOLID));
465                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
466                         reserve_size += sizeof(struct alt_chunk_table_header_disk);
467                 memset(ctx->chunk_csizes, 0, reserve_size);
468                 ret = full_write(ctx->out_fd, ctx->chunk_csizes, reserve_size);
469                 if (ret)
470                         return ret;
471         }
472         return 0;
473 }
474
475 static int
476 begin_write_resource(struct write_blobs_ctx *ctx, u64 res_expected_size)
477 {
478         int ret;
479
480         wimlib_assert(res_expected_size != 0);
481
482         if (ctx->compressor != NULL) {
483                 ret = begin_chunk_table(ctx, res_expected_size);
484                 if (ret)
485                         return ret;
486         }
487
488         /* Output file descriptor is now positioned at the offset at which to
489          * write the first chunk of the resource.  */
490         ctx->chunks_start_offset = ctx->out_fd->offset;
491         ctx->cur_write_blob_offset = 0;
492         ctx->cur_write_res_size = res_expected_size;
493         return 0;
494 }
495
496 static int
497 end_chunk_table(struct write_blobs_ctx *ctx, u64 res_actual_size,
498                 u64 *res_start_offset_ret, u64 *res_store_size_ret)
499 {
500         size_t actual_num_chunks;
501         size_t actual_num_chunk_entries;
502         size_t chunk_entry_size;
503         int ret;
504
505         actual_num_chunks = ctx->chunk_index;
506         actual_num_chunk_entries = actual_num_chunks;
507         if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
508                 actual_num_chunk_entries--;
509
510         chunk_entry_size = get_chunk_entry_size(res_actual_size,
511                                                 0 != (ctx->write_resource_flags &
512                                                       WRITE_RESOURCE_FLAG_SOLID));
513
514         typedef le64 _may_alias_attribute aliased_le64_t;
515         typedef le32 _may_alias_attribute aliased_le32_t;
516
517         if (chunk_entry_size == 4) {
518                 aliased_le32_t *entries = (aliased_le32_t*)ctx->chunk_csizes;
519
520                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
521                         for (size_t i = 0; i < actual_num_chunk_entries; i++)
522                                 entries[i] = cpu_to_le32(ctx->chunk_csizes[i]);
523                 } else {
524                         u32 offset = ctx->chunk_csizes[0];
525                         for (size_t i = 0; i < actual_num_chunk_entries; i++) {
526                                 u32 next_size = ctx->chunk_csizes[i + 1];
527                                 entries[i] = cpu_to_le32(offset);
528                                 offset += next_size;
529                         }
530                 }
531         } else {
532                 aliased_le64_t *entries = (aliased_le64_t*)ctx->chunk_csizes;
533
534                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
535                         for (size_t i = 0; i < actual_num_chunk_entries; i++)
536                                 entries[i] = cpu_to_le64(ctx->chunk_csizes[i]);
537                 } else {
538                         u64 offset = ctx->chunk_csizes[0];
539                         for (size_t i = 0; i < actual_num_chunk_entries; i++) {
540                                 u64 next_size = ctx->chunk_csizes[i + 1];
541                                 entries[i] = cpu_to_le64(offset);
542                                 offset += next_size;
543                         }
544                 }
545         }
546
547         size_t chunk_table_size = actual_num_chunk_entries * chunk_entry_size;
548         u64 res_start_offset;
549         u64 res_end_offset;
550
551         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
552                 ret = full_write(ctx->out_fd, ctx->chunk_csizes, chunk_table_size);
553                 if (ret)
554                         goto write_error;
555                 res_end_offset = ctx->out_fd->offset;
556                 res_start_offset = ctx->chunks_start_offset;
557         } else {
558                 res_end_offset = ctx->out_fd->offset;
559
560                 u64 chunk_table_offset;
561
562                 chunk_table_offset = ctx->chunks_start_offset - chunk_table_size;
563
564                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
565                         struct alt_chunk_table_header_disk hdr;
566
567                         hdr.res_usize = cpu_to_le64(res_actual_size);
568                         hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size);
569                         hdr.compression_format = cpu_to_le32(ctx->out_ctype);
570
571                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1);
572                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2);
573                         BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
574
575                         ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr),
576                                           chunk_table_offset - sizeof(hdr));
577                         if (ret)
578                                 goto write_error;
579                         res_start_offset = chunk_table_offset - sizeof(hdr);
580                 } else {
581                         res_start_offset = chunk_table_offset;
582                 }
583
584                 ret = full_pwrite(ctx->out_fd, ctx->chunk_csizes,
585                                   chunk_table_size, chunk_table_offset);
586                 if (ret)
587                         goto write_error;
588         }
589
590         *res_start_offset_ret = res_start_offset;
591         *res_store_size_ret = res_end_offset - res_start_offset;
592
593         return 0;
594
595 write_error:
596         ERROR_WITH_ERRNO("Write error");
597         return ret;
598 }
599
600 /* Finish writing a WIM resource by writing or updating the chunk table (if not
601  * writing the data uncompressed) and loading its metadata into @out_reshdr.  */
602 static int
603 end_write_resource(struct write_blobs_ctx *ctx, struct wim_reshdr *out_reshdr)
604 {
605         int ret;
606         u64 res_size_in_wim;
607         u64 res_uncompressed_size;
608         u64 res_offset_in_wim;
609
610         wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size ||
611                       (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID));
612         res_uncompressed_size = ctx->cur_write_res_size;
613
614         if (ctx->compressor) {
615                 ret = end_chunk_table(ctx, res_uncompressed_size,
616                                       &res_offset_in_wim, &res_size_in_wim);
617                 if (ret)
618                         return ret;
619         } else {
620                 res_offset_in_wim = ctx->chunks_start_offset;
621                 res_size_in_wim = ctx->out_fd->offset - res_offset_in_wim;
622         }
623         out_reshdr->uncompressed_size = res_uncompressed_size;
624         out_reshdr->size_in_wim = res_size_in_wim;
625         out_reshdr->offset_in_wim = res_offset_in_wim;
626         DEBUG("Finished writing resource: %"PRIu64" => %"PRIu64" @ %"PRIu64"",
627               res_uncompressed_size, res_size_in_wim, res_offset_in_wim);
628         return 0;
629 }
630
631 /* Call when no more data from the file at @path is needed.  */
632 static int
633 done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx)
634 {
635         union wimlib_progress_info info;
636
637         info.done_with_file.path_to_file = path;
638
639         return call_progress(progfunc, WIMLIB_PROGRESS_MSG_DONE_WITH_FILE,
640                              &info, progctx);
641 }
642
643 static int
644 do_done_with_blob(struct blob_descriptor *blob,
645                   wimlib_progress_func_t progfunc, void *progctx)
646 {
647         int ret;
648         struct wim_inode *inode;
649
650         if (!blob->may_send_done_with_file)
651                 return 0;
652
653         inode = blob->file_inode;
654
655         wimlib_assert(inode != NULL);
656         wimlib_assert(inode->num_remaining_streams > 0);
657         if (--inode->num_remaining_streams > 0)
658                 return 0;
659
660 #ifdef __WIN32__
661         /* XXX: This logic really should be somewhere else.  */
662
663         /* We want the path to the file, but blob->file_on_disk might actually
664          * refer to a named data stream.  Temporarily strip the named data
665          * stream from the path.  */
666         wchar_t *p_colon = NULL;
667         wchar_t *p_question_mark = NULL;
668         const wchar_t *p_stream_name;
669
670         p_stream_name = path_stream_name(blob->file_on_disk);
671         if (unlikely(p_stream_name)) {
672                 p_colon = (wchar_t *)(p_stream_name - 1);
673                 wimlib_assert(*p_colon == L':');
674                 *p_colon = L'\0';
675         }
676
677         /* We also should use a fake Win32 path instead of a NT path  */
678         if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) {
679                 p_question_mark = &blob->file_on_disk[1];
680                 *p_question_mark = L'\\';
681         }
682 #endif
683
684         ret = done_with_file(blob->file_on_disk, progfunc, progctx);
685
686 #ifdef __WIN32__
687         if (p_colon)
688                 *p_colon = L':';
689         if (p_question_mark)
690                 *p_question_mark = L'?';
691 #endif
692         return ret;
693 }
694
695 /* Handle WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES mode.  */
696 static inline int
697 done_with_blob(struct blob_descriptor *blob, struct write_blobs_ctx *ctx)
698 {
699         if (likely(!(ctx->write_resource_flags &
700                      WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE)))
701                 return 0;
702         return do_done_with_blob(blob, ctx->progress_data.progfunc,
703                                  ctx->progress_data.progctx);
704 }
705
706 /* Begin processing a blob for writing.  */
707 static int
708 write_blob_begin_read(struct blob_descriptor *blob, void *_ctx)
709 {
710         struct write_blobs_ctx *ctx = _ctx;
711         int ret;
712
713         wimlib_assert(blob->size > 0);
714
715         ctx->cur_read_blob_offset = 0;
716         ctx->cur_read_blob_size = blob->size;
717
718         /* As an optimization, we allow some blobs to be "unhashed", meaning
719          * their SHA-1 message digests are unknown.  This is the case with blobs
720          * that are added by scanning a directory tree with wimlib_add_image(),
721          * for example.  Since WIM uses single-instance blobs, we don't know
722          * whether such each such blob really need to written until it is
723          * actually checksummed, unless it has a unique size.  In such cases we
724          * read and checksum the blob in this function, thereby advancing ahead
725          * of read_blob_list(), which will still provide the data again to
726          * write_blob_process_chunk().  This is okay because an unhashed blob
727          * cannot be in a WIM resource, which might be costly to decompress.  */
728         if (ctx->blob_table != NULL && blob->unhashed && !blob->unique_size) {
729
730                 struct blob_descriptor *new_blob;
731
732                 ret = hash_unhashed_blob(blob, ctx->blob_table, &new_blob);
733                 if (ret)
734                         return ret;
735                 if (new_blob != blob) {
736                         /* Duplicate blob detected.  */
737
738                         if (new_blob->will_be_in_output_wim ||
739                             blob_filtered(new_blob, ctx->filter_ctx))
740                         {
741                                 /* The duplicate blob is already being included
742                                  * in the output WIM, or it would be filtered
743                                  * out if it had been.  Skip writing this blob
744                                  * (and reading it again) entirely, passing its
745                                  * output reference count to the duplicate blob
746                                  * in the former case.  */
747                                 DEBUG("Discarding duplicate blob of "
748                                       "length %"PRIu64, blob->size);
749                                 ret = do_write_blobs_progress(&ctx->progress_data,
750                                                               blob->size, 1, true);
751                                 list_del(&blob->write_blobs_list);
752                                 list_del(&blob->blob_table_list);
753                                 if (new_blob->will_be_in_output_wim)
754                                         new_blob->out_refcnt += blob->out_refcnt;
755                                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
756                                         ctx->cur_write_res_size -= blob->size;
757                                 if (!ret)
758                                         ret = done_with_blob(blob, ctx);
759                                 free_blob_descriptor(blob);
760                                 if (ret)
761                                         return ret;
762                                 return BEGIN_BLOB_STATUS_SKIP_BLOB;
763                         } else {
764                                 /* The duplicate blob can validly be written,
765                                  * but was not marked as such.  Discard the
766                                  * current blob descriptor and use the
767                                  * duplicate, but actually freeing the current
768                                  * blob descriptor must wait until
769                                  * read_blob_list() has finished reading its
770                                  * data.  */
771                                 DEBUG("Blob duplicate, but not already "
772                                       "selected for writing.");
773                                 list_replace(&blob->write_blobs_list,
774                                              &new_blob->write_blobs_list);
775                                 list_replace(&blob->blob_table_list,
776                                              &new_blob->blob_table_list);
777                                 blob->will_be_in_output_wim = 0;
778                                 new_blob->out_refcnt = blob->out_refcnt;
779                                 new_blob->will_be_in_output_wim = 1;
780                                 new_blob->may_send_done_with_file = 0;
781                                 blob = new_blob;
782                         }
783                 }
784         }
785         list_move_tail(&blob->write_blobs_list, &ctx->blobs_being_compressed);
786         return 0;
787 }
788
789 /* Rewrite a blob that was just written compressed as uncompressed instead.
790  */
791 static int
792 write_blob_uncompressed(struct blob_descriptor *blob, struct filedes *out_fd)
793 {
794         int ret;
795         u64 begin_offset = blob->out_reshdr.offset_in_wim;
796         u64 end_offset = out_fd->offset;
797
798         if (filedes_seek(out_fd, begin_offset) == -1)
799                 return 0;
800
801         ret = extract_full_blob_to_fd(blob, out_fd);
802         if (ret) {
803                 /* Error reading the uncompressed data.  */
804                 if (out_fd->offset == begin_offset &&
805                     filedes_seek(out_fd, end_offset) != -1)
806                 {
807                         /* Nothing was actually written yet, and we successfully
808                          * seeked to the end of the compressed resource, so
809                          * don't issue a hard error; just keep the compressed
810                          * resource instead.  */
811                         WARNING("Recovered compressed blob of "
812                                 "size %"PRIu64", continuing on.", blob->size);
813                         return 0;
814                 }
815                 return ret;
816         }
817
818         wimlib_assert(out_fd->offset - begin_offset == blob->size);
819
820         if (out_fd->offset < end_offset &&
821             0 != ftruncate(out_fd->fd, out_fd->offset))
822         {
823                 ERROR_WITH_ERRNO("Can't truncate output file to "
824                                  "offset %"PRIu64, out_fd->offset);
825                 return WIMLIB_ERR_WRITE;
826         }
827
828         blob->out_reshdr.size_in_wim = blob->size;
829         blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED |
830                                     WIM_RESHDR_FLAG_SOLID);
831         return 0;
832 }
833
834 /* Returns true if the specified blob, which was written as a non-solid
835  * resource, should be truncated from the WIM file and re-written uncompressed.
836  * blob->out_reshdr must be filled in from the initial write of the blob.  */
837 static bool
838 should_rewrite_blob_uncompressed(const struct write_blobs_ctx *ctx,
839                                  const struct blob_descriptor *blob)
840 {
841         /* If the compressed data is smaller than the uncompressed data, prefer
842          * the compressed data.  */
843         if (blob->out_reshdr.size_in_wim < blob->out_reshdr.uncompressed_size)
844                 return false;
845
846         /* If we're not actually writing compressed data, then there's no need
847          * for re-writing.  */
848         if (!ctx->compressor)
849                 return false;
850
851         /* If writing a pipable WIM, everything we write to the output is final
852          * (it might actually be a pipe!).  */
853         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)
854                 return false;
855
856         /* If the blob that would need to be re-read is located in a solid
857          * resource in another WIM file, then re-reading it would be costly.  So
858          * don't do it.
859          *
860          * Exception: if the compressed size happens to be *exactly* the same as
861          * the uncompressed size, then the blob *must* be written uncompressed
862          * in order to remain compatible with the Windows Overlay Filesystem
863          * Filter Driver (WOF).
864          *
865          * TODO: we are currently assuming that the optimization for
866          * single-chunk resources in maybe_rewrite_blob_uncompressed() prevents
867          * this case from being triggered too often.  To fully prevent excessive
868          * decompressions in degenerate cases, we really should obtain the
869          * uncompressed data by decompressing the compressed data we wrote to
870          * the output file.
871          */
872         if (blob->blob_location == BLOB_IN_WIM &&
873             blob->size != blob->rdesc->uncompressed_size &&
874             blob->size != blob->out_reshdr.size_in_wim)
875                 return false;
876
877         return true;
878 }
879
880 static int
881 maybe_rewrite_blob_uncompressed(struct write_blobs_ctx *ctx,
882                                 struct blob_descriptor *blob)
883 {
884         if (!should_rewrite_blob_uncompressed(ctx, blob))
885                 return 0;
886
887         /* Regular (non-solid) WIM resources with exactly one chunk and
888          * compressed size equal to uncompressed size are exactly the same as
889          * the corresponding compressed data --- since there must be 0 entries
890          * in the chunk table and the only chunk must be stored uncompressed.
891          * In this case, there's no need to rewrite anything.  */
892         if (ctx->chunk_index == 1 &&
893             blob->out_reshdr.size_in_wim == blob->out_reshdr.uncompressed_size)
894         {
895                 blob->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
896                 return 0;
897         }
898
899         return write_blob_uncompressed(blob, ctx->out_fd);
900 }
901
902 /* Write the next chunk of (typically compressed) data to the output WIM,
903  * handling the writing of the chunk table.  */
904 static int
905 write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
906             size_t csize, size_t usize)
907 {
908         int ret;
909         struct blob_descriptor *blob;
910         u32 completed_blob_count;
911         u32 completed_size;
912
913         blob = list_entry(ctx->blobs_being_compressed.next,
914                           struct blob_descriptor, write_blobs_list);
915
916         if (ctx->cur_write_blob_offset == 0 &&
917             !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
918         {
919                 /* Starting to write a new blob in non-solid mode.  */
920
921                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
922                         DEBUG("Writing pipable WIM blob header "
923                               "(offset=%"PRIu64")", ctx->out_fd->offset);
924                         ret = write_pwm_blob_header(blob, ctx->out_fd,
925                                                     ctx->compressor != NULL);
926                         if (ret)
927                                 return ret;
928                 }
929
930                 ret = begin_write_resource(ctx, blob->size);
931                 if (ret)
932                         return ret;
933         }
934
935         if (ctx->compressor != NULL) {
936                 /* Record the compresed chunk size.  */
937                 wimlib_assert(ctx->chunk_index < ctx->num_alloc_chunks);
938                 ctx->chunk_csizes[ctx->chunk_index++] = csize;
939
940                /* If writing a pipable WIM, before the chunk data write a chunk
941                 * header that provides the compressed chunk size.  */
942                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
943                         struct pwm_chunk_hdr chunk_hdr = {
944                                 .compressed_size = cpu_to_le32(csize),
945                         };
946                         ret = full_write(ctx->out_fd, &chunk_hdr,
947                                          sizeof(chunk_hdr));
948                         if (ret)
949                                 goto write_error;
950                 }
951         }
952
953         /* Write the chunk data.  */
954         ret = full_write(ctx->out_fd, cchunk, csize);
955         if (ret)
956                 goto write_error;
957
958         ctx->cur_write_blob_offset += usize;
959
960         completed_size = usize;
961         completed_blob_count = 0;
962         if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
963                 /* Wrote chunk in solid mode.  It may have finished multiple
964                  * blobs.  */
965                 struct blob_descriptor *next_blob;
966
967                 while (blob && ctx->cur_write_blob_offset >= blob->size) {
968
969                         ctx->cur_write_blob_offset -= blob->size;
970
971                         if (ctx->cur_write_blob_offset)
972                                 next_blob = list_entry(blob->write_blobs_list.next,
973                                                       struct blob_descriptor,
974                                                       write_blobs_list);
975                         else
976                                 next_blob = NULL;
977
978                         ret = done_with_blob(blob, ctx);
979                         if (ret)
980                                 return ret;
981                         list_move_tail(&blob->write_blobs_list, &ctx->blobs_in_solid_resource);
982                         completed_blob_count++;
983
984                         blob = next_blob;
985                 }
986         } else {
987                 /* Wrote chunk in non-solid mode.  It may have finished a
988                  * blob.  */
989                 if (ctx->cur_write_blob_offset == blob->size) {
990
991                         wimlib_assert(ctx->cur_write_blob_offset ==
992                                       ctx->cur_write_res_size);
993
994                         ret = end_write_resource(ctx, &blob->out_reshdr);
995                         if (ret)
996                                 return ret;
997
998                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
999                         if (ctx->compressor != NULL)
1000                                 blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
1001
1002                         ret = maybe_rewrite_blob_uncompressed(ctx, blob);
1003                         if (ret)
1004                                 return ret;
1005
1006                         wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size);
1007
1008                         ctx->cur_write_blob_offset = 0;
1009
1010                         ret = done_with_blob(blob, ctx);
1011                         if (ret)
1012                                 return ret;
1013                         list_del(&blob->write_blobs_list);
1014                         completed_blob_count++;
1015                 }
1016         }
1017
1018         return do_write_blobs_progress(&ctx->progress_data, completed_size,
1019                                        completed_blob_count, false);
1020
1021 write_error:
1022         ERROR_WITH_ERRNO("Write error");
1023         return ret;
1024 }
1025
1026 static int
1027 prepare_chunk_buffer(struct write_blobs_ctx *ctx)
1028 {
1029         /* While we are unable to get a new chunk buffer due to too many chunks
1030          * already outstanding, retrieve and write the next compressed chunk. */
1031         while (!(ctx->cur_chunk_buf =
1032                  ctx->compressor->get_chunk_buffer(ctx->compressor)))
1033         {
1034                 const void *cchunk;
1035                 u32 csize;
1036                 u32 usize;
1037                 bool bret;
1038                 int ret;
1039
1040                 bret = ctx->compressor->get_compression_result(ctx->compressor,
1041                                                                &cchunk,
1042                                                                &csize,
1043                                                                &usize);
1044                 wimlib_assert(bret);
1045
1046                 ret = write_chunk(ctx, cchunk, csize, usize);
1047                 if (ret)
1048                         return ret;
1049         }
1050         return 0;
1051 }
1052
1053 /* Process the next chunk of data to be written to a WIM resource.  */
1054 static int
1055 write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
1056 {
1057         struct write_blobs_ctx *ctx = _ctx;
1058         int ret;
1059         const u8 *chunkptr, *chunkend;
1060
1061         wimlib_assert(size != 0);
1062
1063         if (ctx->compressor == NULL) {
1064                 /* Write chunk uncompressed.  */
1065                  ret = write_chunk(ctx, chunk, size, size);
1066                  if (ret)
1067                          return ret;
1068                  ctx->cur_read_blob_offset += size;
1069                  return 0;
1070         }
1071
1072         /* Submit the chunk for compression, but take into account that the
1073          * @size the chunk was provided in may not correspond to the
1074          * @out_chunk_size being used for compression.  */
1075         chunkptr = chunk;
1076         chunkend = chunkptr + size;
1077         do {
1078                 size_t needed_chunk_size;
1079                 size_t bytes_consumed;
1080
1081                 if (!ctx->cur_chunk_buf) {
1082                         ret = prepare_chunk_buffer(ctx);
1083                         if (ret)
1084                                 return ret;
1085                 }
1086
1087                 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1088                         needed_chunk_size = ctx->out_chunk_size;
1089                 } else {
1090                         needed_chunk_size = min(ctx->out_chunk_size,
1091                                                 ctx->cur_chunk_buf_filled +
1092                                                         (ctx->cur_read_blob_size -
1093                                                          ctx->cur_read_blob_offset));
1094                 }
1095
1096                 bytes_consumed = min(chunkend - chunkptr,
1097                                      needed_chunk_size - ctx->cur_chunk_buf_filled);
1098
1099                 memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled],
1100                        chunkptr, bytes_consumed);
1101
1102                 chunkptr += bytes_consumed;
1103                 ctx->cur_read_blob_offset += bytes_consumed;
1104                 ctx->cur_chunk_buf_filled += bytes_consumed;
1105
1106                 if (ctx->cur_chunk_buf_filled == needed_chunk_size) {
1107                         ctx->compressor->signal_chunk_filled(ctx->compressor,
1108                                                              ctx->cur_chunk_buf_filled);
1109                         ctx->cur_chunk_buf = NULL;
1110                         ctx->cur_chunk_buf_filled = 0;
1111                 }
1112         } while (chunkptr != chunkend);
1113         return 0;
1114 }
1115
1116 /* Finish processing a blob for writing.  It may not have been completely
1117  * written yet, as the chunk_compressor implementation may still have chunks
1118  * buffered or being compressed.  */
1119 static int
1120 write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx)
1121 {
1122         struct write_blobs_ctx *ctx = _ctx;
1123
1124         wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status);
1125
1126         if (!blob->will_be_in_output_wim) {
1127                 /* The blob was a duplicate.  Now that its data has finished
1128                  * being read, it is being discarded in favor of the duplicate
1129                  * entry.  It therefore is no longer needed, and we can fire the
1130                  * DONE_WITH_FILE callback because the file will not be read
1131                  * again.
1132                  *
1133                  * Note: we can't yet fire DONE_WITH_FILE for non-duplicate
1134                  * blobs, since it needs to be possible to re-read the file if
1135                  * it does not compress to less than its original size.  */
1136                 if (!status)
1137                         status = done_with_blob(blob, ctx);
1138                 free_blob_descriptor(blob);
1139         } else if (!status && blob->unhashed && ctx->blob_table != NULL) {
1140                 /* The blob was not a duplicate and was previously unhashed.
1141                  * Since we passed COMPUTE_MISSING_BLOB_HASHES to
1142                  * read_blob_list(), blob->hash is now computed and valid.  So
1143                  * turn this blob into a "hashed" blob.  */
1144                 list_del(&blob->unhashed_list);
1145                 blob_table_insert(ctx->blob_table, blob);
1146                 blob->unhashed = 0;
1147         }
1148         return status;
1149 }
1150
1151 /* Compute statistics about a list of blobs that will be written.
1152  *
1153  * Assumes the blobs are sorted such that all blobs located in each distinct WIM
1154  * (specified by WIMStruct) are together.  */
1155 static void
1156 compute_blob_list_stats(struct list_head *blob_list,
1157                         struct write_blobs_ctx *ctx)
1158 {
1159         struct blob_descriptor *blob;
1160         u64 total_bytes = 0;
1161         u64 num_blobs = 0;
1162         u64 total_parts = 0;
1163         WIMStruct *prev_wim_part = NULL;
1164
1165         list_for_each_entry(blob, blob_list, write_blobs_list) {
1166                 num_blobs++;
1167                 total_bytes += blob->size;
1168                 if (blob->blob_location == BLOB_IN_WIM) {
1169                         if (prev_wim_part != blob->rdesc->wim) {
1170                                 prev_wim_part = blob->rdesc->wim;
1171                                 total_parts++;
1172                         }
1173                 }
1174         }
1175         ctx->progress_data.progress.write_streams.total_bytes       = total_bytes;
1176         ctx->progress_data.progress.write_streams.total_streams     = num_blobs;
1177         ctx->progress_data.progress.write_streams.completed_bytes   = 0;
1178         ctx->progress_data.progress.write_streams.completed_streams = 0;
1179         ctx->progress_data.progress.write_streams.compression_type  = ctx->out_ctype;
1180         ctx->progress_data.progress.write_streams.total_parts       = total_parts;
1181         ctx->progress_data.progress.write_streams.completed_parts   = 0;
1182         ctx->progress_data.next_progress = 0;
1183 }
1184
1185 /* Find blobs in @blob_list that can be copied to the output WIM in raw form
1186  * rather than compressed.  Delete these blobs from @blob_list and move them to
1187  * @raw_copy_blobs.  Return the total uncompressed size of the blobs that need
1188  * to be compressed.  */
1189 static u64
1190 find_raw_copy_blobs(struct list_head *blob_list,
1191                     int write_resource_flags,
1192                     int out_ctype,
1193                     u32 out_chunk_size,
1194                     struct list_head *raw_copy_blobs)
1195 {
1196         struct blob_descriptor *blob, *tmp;
1197         u64 num_bytes_to_compress = 0;
1198
1199         INIT_LIST_HEAD(raw_copy_blobs);
1200
1201         /* Initialize temporary raw_copy_ok flag.  */
1202         list_for_each_entry(blob, blob_list, write_blobs_list)
1203                 if (blob->blob_location == BLOB_IN_WIM)
1204                         blob->rdesc->raw_copy_ok = 0;
1205
1206         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1207                 if (blob->blob_location == BLOB_IN_WIM &&
1208                     blob->rdesc->raw_copy_ok)
1209                 {
1210                         list_move_tail(&blob->write_blobs_list,
1211                                        raw_copy_blobs);
1212                 } else if (can_raw_copy(blob, write_resource_flags,
1213                                         out_ctype, out_chunk_size))
1214                 {
1215                         blob->rdesc->raw_copy_ok = 1;
1216                         list_move_tail(&blob->write_blobs_list,
1217                                        raw_copy_blobs);
1218                 } else {
1219                         num_bytes_to_compress += blob->size;
1220                 }
1221         }
1222
1223         return num_bytes_to_compress;
1224 }
1225
1226 /* Copy a raw compressed resource located in another WIM file to the WIM file
1227  * being written.  */
1228 static int
1229 write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc,
1230                         struct filedes *out_fd)
1231 {
1232         u64 cur_read_offset;
1233         u64 end_read_offset;
1234         u8 buf[BUFFER_SIZE];
1235         size_t bytes_to_read;
1236         int ret;
1237         struct filedes *in_fd;
1238         struct blob_descriptor *blob;
1239         u64 out_offset_in_wim;
1240
1241         DEBUG("Copying raw compressed data (size_in_wim=%"PRIu64", "
1242               "uncompressed_size=%"PRIu64")",
1243               in_rdesc->size_in_wim, in_rdesc->uncompressed_size);
1244
1245         /* Copy the raw data.  */
1246         cur_read_offset = in_rdesc->offset_in_wim;
1247         end_read_offset = cur_read_offset + in_rdesc->size_in_wim;
1248
1249         out_offset_in_wim = out_fd->offset;
1250
1251         if (in_rdesc->is_pipable) {
1252                 if (cur_read_offset < sizeof(struct pwm_blob_hdr))
1253                         return WIMLIB_ERR_INVALID_PIPABLE_WIM;
1254                 cur_read_offset -= sizeof(struct pwm_blob_hdr);
1255                 out_offset_in_wim += sizeof(struct pwm_blob_hdr);
1256         }
1257         in_fd = &in_rdesc->wim->in_fd;
1258         wimlib_assert(cur_read_offset != end_read_offset);
1259         do {
1260
1261                 bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
1262
1263                 ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
1264                 if (ret)
1265                         return ret;
1266
1267                 ret = full_write(out_fd, buf, bytes_to_read);
1268                 if (ret)
1269                         return ret;
1270
1271                 cur_read_offset += bytes_to_read;
1272
1273         } while (cur_read_offset != end_read_offset);
1274
1275         list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) {
1276                 if (blob->will_be_in_output_wim) {
1277                         blob_set_out_reshdr_for_reuse(blob);
1278                         if (in_rdesc->flags & WIM_RESHDR_FLAG_SOLID)
1279                                 blob->out_res_offset_in_wim = out_offset_in_wim;
1280                         else
1281                                 blob->out_reshdr.offset_in_wim = out_offset_in_wim;
1282
1283                 }
1284         }
1285         return 0;
1286 }
1287
1288 /* Copy a list of raw compressed resources located in other WIM file(s) to the
1289  * WIM file being written.  */
1290 static int
1291 write_raw_copy_resources(struct list_head *raw_copy_blobs,
1292                          struct filedes *out_fd,
1293                          struct write_blobs_progress_data *progress_data)
1294 {
1295         struct blob_descriptor *blob;
1296         int ret;
1297
1298         list_for_each_entry(blob, raw_copy_blobs, write_blobs_list)
1299                 blob->rdesc->raw_copy_ok = 1;
1300
1301         list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) {
1302                 if (blob->rdesc->raw_copy_ok) {
1303                         /* Write each solid resource only one time.  */
1304                         ret = write_raw_copy_resource(blob->rdesc, out_fd);
1305                         if (ret)
1306                                 return ret;
1307                         blob->rdesc->raw_copy_ok = 0;
1308                 }
1309                 ret = do_write_blobs_progress(progress_data, blob->size,
1310                                               1, false);
1311                 if (ret)
1312                         return ret;
1313         }
1314         return 0;
1315 }
1316
1317 /* Wait for and write all chunks pending in the compressor.  */
1318 static int
1319 finish_remaining_chunks(struct write_blobs_ctx *ctx)
1320 {
1321         const void *cdata;
1322         u32 csize;
1323         u32 usize;
1324         int ret;
1325
1326         if (ctx->compressor == NULL)
1327                 return 0;
1328
1329         if (ctx->cur_chunk_buf_filled != 0) {
1330                 ctx->compressor->signal_chunk_filled(ctx->compressor,
1331                                                      ctx->cur_chunk_buf_filled);
1332         }
1333
1334         while (ctx->compressor->get_compression_result(ctx->compressor, &cdata,
1335                                                        &csize, &usize))
1336         {
1337                 ret = write_chunk(ctx, cdata, csize, usize);
1338                 if (ret)
1339                         return ret;
1340         }
1341         return 0;
1342 }
1343
1344 static void
1345 remove_empty_blobs(struct list_head *blob_list)
1346 {
1347         struct blob_descriptor *blob, *tmp;
1348
1349         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1350                 wimlib_assert(blob->will_be_in_output_wim);
1351                 if (blob->size == 0) {
1352                         list_del(&blob->write_blobs_list);
1353                         blob->out_reshdr.offset_in_wim = 0;
1354                         blob->out_reshdr.size_in_wim = 0;
1355                         blob->out_reshdr.uncompressed_size = 0;
1356                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
1357                 }
1358         }
1359 }
1360
1361 static inline bool
1362 blob_is_in_file(const struct blob_descriptor *blob)
1363 {
1364         return blob->blob_location == BLOB_IN_FILE_ON_DISK
1365 #ifdef __WIN32__
1366             || blob->blob_location == BLOB_IN_WINNT_FILE_ON_DISK
1367             || blob->blob_location == BLOB_WIN32_ENCRYPTED
1368 #endif
1369            ;
1370 }
1371
1372 static void
1373 init_done_with_file_info(struct list_head *blob_list)
1374 {
1375         struct blob_descriptor *blob;
1376
1377         list_for_each_entry(blob, blob_list, write_blobs_list) {
1378                 if (blob_is_in_file(blob)) {
1379                         blob->file_inode->num_remaining_streams = 0;
1380                         blob->may_send_done_with_file = 1;
1381                 } else {
1382                         blob->may_send_done_with_file = 0;
1383                 }
1384         }
1385
1386         list_for_each_entry(blob, blob_list, write_blobs_list)
1387                 if (blob->may_send_done_with_file)
1388                         blob->file_inode->num_remaining_streams++;
1389 }
1390
1391 /*
1392  * Write a list of blobs to the output WIM file.
1393  *
1394  * @blob_list
1395  *      The list of blobs to write, specified by a list of 'struct blob_descriptor' linked
1396  *      by the 'write_blobs_list' member.
1397  *
1398  * @out_fd
1399  *      The file descriptor, opened for writing, to which to write the blobs.
1400  *
1401  * @write_resource_flags
1402  *      Flags to modify how the blobs are written:
1403  *
1404  *      WRITE_RESOURCE_FLAG_RECOMPRESS:
1405  *              Force compression of all resources, even if they could otherwise
1406  *              be re-used by copying the raw data, due to being located in a WIM
1407  *              file with compatible compression parameters.
1408  *
1409  *      WRITE_RESOURCE_FLAG_PIPABLE:
1410  *              Write the resources in the wimlib-specific pipable format, and
1411  *              furthermore do so in such a way that no seeking backwards in
1412  *              @out_fd will be performed (so it may be a pipe).
1413  *
1414  *      WRITE_RESOURCE_FLAG_SOLID:
1415  *              Combine all the blobs into a single resource rather than writing
1416  *              them in separate resources.  This flag is only valid if the WIM
1417  *              version number has been, or will be, set to WIM_VERSION_SOLID.
1418  *              This flag may not be combined with WRITE_RESOURCE_FLAG_PIPABLE.
1419  *
1420  * @out_ctype
1421  *      Compression format to use in the output resources, specified as one of
1422  *      the WIMLIB_COMPRESSION_TYPE_* constants.  WIMLIB_COMPRESSION_TYPE_NONE
1423  *      is allowed.
1424  *
1425  * @out_chunk_size
1426  *      Compression chunk size to use in the output resources.  It must be a
1427  *      valid chunk size for the specified compression format @out_ctype, unless
1428  *      @out_ctype is WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter
1429  *      is ignored.
1430  *
1431  * @num_threads
1432  *      Number of threads to use to compress data.  If 0, a default number of
1433  *      threads will be chosen.  The number of threads still may be decreased
1434  *      from the specified value if insufficient memory is detected.
1435  *
1436  * @blob_table
1437  *      If on-the-fly deduplication of unhashed blobs is desired, this parameter
1438  *      must be pointer to the blob table for the WIMStruct on whose behalf the
1439  *      blobs are being written.  Otherwise, this parameter can be NULL.
1440  *
1441  * @filter_ctx
1442  *      If on-the-fly deduplication of unhashed blobs is desired, this parameter
1443  *      can be a pointer to a context for blob filtering used to detect whether
1444  *      the duplicate blob has been hard-filtered or not.  If no blobs are
1445  *      hard-filtered or no blobs are unhashed, this parameter can be NULL.
1446  *
1447  * This function will write the blobs in @blob_list to resources in
1448  * consecutive positions in the output WIM file, or to a single solid resource
1449  * if WRITE_RESOURCE_FLAG_SOLID was specified in @write_resource_flags.  In both
1450  * cases, the @out_reshdr of the `struct blob_descriptor' for each blob written will be
1451  * updated to specify its location, size, and flags in the output WIM.  In the
1452  * solid resource case, WIM_RESHDR_FLAG_SOLID will be set in the @flags field of
1453  * each @out_reshdr, and furthermore @out_res_offset_in_wim and
1454  * @out_res_size_in_wim of each @out_reshdr will be set to the offset and size,
1455  * respectively, in the output WIM of the solid resource containing the
1456  * corresponding blob.
1457  *
1458  * Each of the blobs to write may be in any location supported by the
1459  * resource-handling code (specifically, read_blob_list()), such as the contents
1460  * of external file that has been logically added to the output WIM, or a blob
1461  * in another WIM file that has been imported, or even a blob in the "same" WIM
1462  * file of which a modified copy is being written.  In the case that a blob is
1463  * already in a WIM file and uses compatible compression parameters, by default
1464  * this function will re-use the raw data instead of decompressing it, then
1465  * recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS
1466  * specified in @write_resource_flags, this is not done.
1467  *
1468  * As a further requirement, this function requires that the
1469  * @will_be_in_output_wim member be set to 1 on all blobs in @blob_list as well
1470  * as any other blobs not in @blob_list that will be in the output WIM file, but
1471  * set to 0 on any other blobs in the output WIM's blob table or sharing a solid
1472  * resource with a blob in @blob_list.  Still furthermore, if on-the-fly
1473  * deduplication of blobs is possible, then all blobs in @blob_list must also be
1474  * linked by @blob_table_list along with any other blobs that have
1475  * @will_be_in_output_wim set.
1476  *
1477  * This function handles on-the-fly deduplication of blobs for which SHA-1
1478  * message digests have not yet been calculated.  Such blobs may or may not need
1479  * to be written.  If @blob_table is non-NULL, then each blob in @blob_list that
1480  * has @unhashed set but not @unique_size set is checksummed immediately before
1481  * it would otherwise be read for writing in order to determine if it is
1482  * identical to another blob already being written or one that would be filtered
1483  * out of the output WIM using blob_filtered() with the context @filter_ctx.
1484  * Each such duplicate blob will be removed from @blob_list, its reference count
1485  * transfered to the pre-existing duplicate blob, its memory freed, and will not
1486  * be written.  Alternatively, if a blob in @blob_list is a duplicate with any
1487  * blob in @blob_table that has not been marked for writing or would not be
1488  * hard-filtered, it is freed and the pre-existing duplicate is written instead,
1489  * taking ownership of the reference count and slot in the @blob_table_list.
1490  *
1491  * Returns 0 if every blob was either written successfully or did not need to be
1492  * written; otherwise returns a non-zero error code.
1493  */
1494 static int
1495 write_blob_list(struct list_head *blob_list,
1496                 struct filedes *out_fd,
1497                 int write_resource_flags,
1498                 int out_ctype,
1499                 u32 out_chunk_size,
1500                 unsigned num_threads,
1501                 struct blob_table *blob_table,
1502                 struct filter_context *filter_ctx,
1503                 wimlib_progress_func_t progfunc,
1504                 void *progctx)
1505 {
1506         int ret;
1507         struct write_blobs_ctx ctx;
1508         struct list_head raw_copy_blobs;
1509
1510         wimlib_assert((write_resource_flags &
1511                        (WRITE_RESOURCE_FLAG_SOLID |
1512                         WRITE_RESOURCE_FLAG_PIPABLE)) !=
1513                                 (WRITE_RESOURCE_FLAG_SOLID |
1514                                  WRITE_RESOURCE_FLAG_PIPABLE));
1515
1516         remove_empty_blobs(blob_list);
1517
1518         if (list_empty(blob_list)) {
1519                 DEBUG("No blobs to write.");
1520                 return 0;
1521         }
1522
1523         /* If needed, set auxiliary information so that we can detect when the
1524          * library has finished using each external file.  */
1525         if (unlikely(write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE))
1526                 init_done_with_file_info(blob_list);
1527
1528         memset(&ctx, 0, sizeof(ctx));
1529
1530         ctx.out_fd = out_fd;
1531         ctx.blob_table = blob_table;
1532         ctx.out_ctype = out_ctype;
1533         ctx.out_chunk_size = out_chunk_size;
1534         ctx.write_resource_flags = write_resource_flags;
1535         ctx.filter_ctx = filter_ctx;
1536
1537         /*
1538          * We normally sort the blobs to write by a "sequential" order that is
1539          * optimized for reading.  But when using solid compression, we instead
1540          * sort the blobs by file extension and file name (when applicable; and
1541          * we don't do this for blobs from solid resources) so that similar
1542          * files are grouped together, which improves the compression ratio.
1543          * This is somewhat of a hack since a blob does not necessarily
1544          * correspond one-to-one with a filename, nor is there any guarantee
1545          * that two files with similar names or extensions are actually similar
1546          * in content.  A potential TODO is to sort the blobs based on some
1547          * measure of similarity of their actual contents.
1548          */
1549
1550         ret = sort_blob_list_by_sequential_order(blob_list,
1551                                                  offsetof(struct blob_descriptor,
1552                                                           write_blobs_list));
1553         if (ret)
1554                 return ret;
1555
1556         compute_blob_list_stats(blob_list, &ctx);
1557
1558         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) {
1559                 ret = sort_blob_list_for_solid_compression(blob_list);
1560                 if (unlikely(ret))
1561                         WARNING("Failed to sort blobs for solid compression. Continuing anyways.");
1562         }
1563
1564         ctx.progress_data.progfunc = progfunc;
1565         ctx.progress_data.progctx = progctx;
1566
1567         ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list,
1568                                                         write_resource_flags,
1569                                                         out_ctype,
1570                                                         out_chunk_size,
1571                                                         &raw_copy_blobs);
1572
1573         DEBUG("Writing blob list "
1574               "(offset = %"PRIu64", write_resource_flags=0x%08x, "
1575               "out_ctype=%d, out_chunk_size=%u, num_threads=%u, "
1576               "total_bytes=%"PRIu64", num_bytes_to_compress=%"PRIu64")",
1577               out_fd->offset, write_resource_flags,
1578               out_ctype, out_chunk_size, num_threads,
1579               ctx.progress_data.progress.write_streams.total_bytes,
1580               ctx.num_bytes_to_compress);
1581
1582         if (ctx.num_bytes_to_compress == 0) {
1583                 DEBUG("No compression needed; skipping to raw copy!");
1584                 goto out_write_raw_copy_resources;
1585         }
1586
1587         /* Unless uncompressed output was required, allocate a chunk_compressor
1588          * to do compression.  There are serial and parallel implementations of
1589          * the chunk_compressor interface.  We default to parallel using the
1590          * specified number of threads, unless the upper bound on the number
1591          * bytes needing to be compressed is less than a heuristic value.  */
1592         if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
1593
1594         #ifdef ENABLE_MULTITHREADED_COMPRESSION
1595                 if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) {
1596                         ret = new_parallel_chunk_compressor(out_ctype,
1597                                                             out_chunk_size,
1598                                                             num_threads, 0,
1599                                                             &ctx.compressor);
1600                         if (ret > 0) {
1601                                 WARNING("Couldn't create parallel chunk compressor: %"TS".\n"
1602                                         "          Falling back to single-threaded compression.",
1603                                         wimlib_get_error_string(ret));
1604                         }
1605                 }
1606         #endif
1607
1608                 if (ctx.compressor == NULL) {
1609                         ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
1610                                                           &ctx.compressor);
1611                         if (ret)
1612                                 goto out_destroy_context;
1613                 }
1614         }
1615
1616         if (ctx.compressor)
1617                 ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
1618         else
1619                 ctx.progress_data.progress.write_streams.num_threads = 1;
1620
1621         DEBUG("Actually using %u threads",
1622               ctx.progress_data.progress.write_streams.num_threads);
1623
1624         INIT_LIST_HEAD(&ctx.blobs_being_compressed);
1625         INIT_LIST_HEAD(&ctx.blobs_in_solid_resource);
1626
1627         ret = call_progress(ctx.progress_data.progfunc,
1628                             WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1629                             &ctx.progress_data.progress,
1630                             ctx.progress_data.progctx);
1631         if (ret)
1632                 goto out_destroy_context;
1633
1634         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1635                 ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
1636                 if (ret)
1637                         goto out_destroy_context;
1638         }
1639
1640         /* Read the list of blobs needing to be compressed, using the specified
1641          * callbacks to execute processing of the data.  */
1642
1643         struct read_blob_list_callbacks cbs = {
1644                 .begin_blob             = write_blob_begin_read,
1645                 .begin_blob_ctx         = &ctx,
1646                 .consume_chunk          = write_blob_process_chunk,
1647                 .consume_chunk_ctx      = &ctx,
1648                 .end_blob               = write_blob_end_read,
1649                 .end_blob_ctx           = &ctx,
1650         };
1651
1652         ret = read_blob_list(blob_list,
1653                              offsetof(struct blob_descriptor, write_blobs_list),
1654                              &cbs,
1655                              BLOB_LIST_ALREADY_SORTED |
1656                                 VERIFY_BLOB_HASHES |
1657                                 COMPUTE_MISSING_BLOB_HASHES);
1658
1659         if (ret)
1660                 goto out_destroy_context;
1661
1662         ret = finish_remaining_chunks(&ctx);
1663         if (ret)
1664                 goto out_destroy_context;
1665
1666         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1667                 struct wim_reshdr reshdr;
1668                 struct blob_descriptor *blob;
1669                 u64 offset_in_res;
1670
1671                 ret = end_write_resource(&ctx, &reshdr);
1672                 if (ret)
1673                         goto out_destroy_context;
1674
1675                 DEBUG("Ending solid resource: %lu %lu %lu.",
1676                       reshdr.offset_in_wim,
1677                       reshdr.size_in_wim,
1678                       reshdr.uncompressed_size);
1679
1680                 offset_in_res = 0;
1681                 list_for_each_entry(blob, &ctx.blobs_in_solid_resource, write_blobs_list) {
1682                         blob->out_reshdr.size_in_wim = blob->size;
1683                         blob->out_reshdr.flags = reshdr_flags_for_blob(blob) |
1684                                                  WIM_RESHDR_FLAG_SOLID;
1685                         blob->out_reshdr.uncompressed_size = 0;
1686                         blob->out_reshdr.offset_in_wim = offset_in_res;
1687                         blob->out_res_offset_in_wim = reshdr.offset_in_wim;
1688                         blob->out_res_size_in_wim = reshdr.size_in_wim;
1689                         blob->out_res_uncompressed_size = reshdr.uncompressed_size;
1690                         offset_in_res += blob->size;
1691                 }
1692                 wimlib_assert(offset_in_res == reshdr.uncompressed_size);
1693         }
1694
1695 out_write_raw_copy_resources:
1696         /* Copy any compressed resources for which the raw data can be reused
1697          * without decompression.  */
1698         ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd,
1699                                        &ctx.progress_data);
1700
1701 out_destroy_context:
1702         FREE(ctx.chunk_csizes);
1703         if (ctx.compressor)
1704                 ctx.compressor->destroy(ctx.compressor);
1705         DEBUG("Done (ret=%d)", ret);
1706         return ret;
1707 }
1708
1709
1710 static int
1711 wim_write_blob_list(WIMStruct *wim,
1712                     struct list_head *blob_list,
1713                     int write_flags,
1714                     unsigned num_threads,
1715                     struct filter_context *filter_ctx)
1716 {
1717         int out_ctype;
1718         u32 out_chunk_size;
1719         int write_resource_flags;
1720
1721         write_resource_flags = write_flags_to_resource_flags(write_flags);
1722
1723         /* wimlib v1.7.0: create a solid WIM file by default if the WIM version
1724          * has been set to WIM_VERSION_SOLID and at least one blob in the WIM's
1725          * blob table is located in a solid resource (may be the same WIM, or a
1726          * different one in the case of export).  */
1727         if (wim->hdr.wim_version == WIM_VERSION_SOLID &&
1728             wim_has_solid_resources(wim))
1729         {
1730                 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
1731         }
1732
1733         if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1734                 out_chunk_size = wim->out_solid_chunk_size;
1735                 out_ctype = wim->out_solid_compression_type;
1736         } else {
1737                 out_chunk_size = wim->out_chunk_size;
1738                 out_ctype = wim->out_compression_type;
1739         }
1740
1741         return write_blob_list(blob_list,
1742                                &wim->out_fd,
1743                                write_resource_flags,
1744                                out_ctype,
1745                                out_chunk_size,
1746                                num_threads,
1747                                wim->blob_table,
1748                                filter_ctx,
1749                                wim->progfunc,
1750                                wim->progctx);
1751 }
1752
1753 /* Write the contents of the specified blob as a WIM resource.  */
1754 static int
1755 write_wim_resource(struct blob_descriptor *blob,
1756                    struct filedes *out_fd,
1757                    int out_ctype,
1758                    u32 out_chunk_size,
1759                    int write_resource_flags)
1760 {
1761         LIST_HEAD(blob_list);
1762         list_add(&blob->write_blobs_list, &blob_list);
1763         blob->will_be_in_output_wim = 1;
1764         return write_blob_list(&blob_list,
1765                                out_fd,
1766                                write_resource_flags & ~WRITE_RESOURCE_FLAG_SOLID,
1767                                out_ctype,
1768                                out_chunk_size,
1769                                1,
1770                                NULL,
1771                                NULL,
1772                                NULL,
1773                                NULL);
1774 }
1775
1776 /* Write the contents of the specified buffer as a WIM resource.  */
1777 int
1778 write_wim_resource_from_buffer(const void *buf,
1779                                size_t buf_size,
1780                                bool is_metadata,
1781                                struct filedes *out_fd,
1782                                int out_ctype,
1783                                u32 out_chunk_size,
1784                                struct wim_reshdr *out_reshdr,
1785                                u8 *hash_ret,
1786                                int write_resource_flags)
1787 {
1788         int ret;
1789         struct blob_descriptor blob;
1790
1791         blob.blob_location = BLOB_IN_ATTACHED_BUFFER;
1792         blob.attached_buffer = (void*)buf;
1793         blob.size = buf_size;
1794         sha1_buffer(buf, buf_size, blob.hash);
1795         blob.unhashed = 0;
1796         blob.is_metadata = is_metadata;
1797
1798         ret = write_wim_resource(&blob, out_fd, out_ctype, out_chunk_size,
1799                                  write_resource_flags);
1800         if (ret)
1801                 return ret;
1802
1803         copy_reshdr(out_reshdr, &blob.out_reshdr);
1804
1805         if (hash_ret)
1806                 copy_hash(hash_ret, blob.hash);
1807         return 0;
1808 }
1809
1810 struct blob_size_table {
1811         struct hlist_head *array;
1812         size_t num_entries;
1813         size_t capacity;
1814 };
1815
1816 static int
1817 init_blob_size_table(struct blob_size_table *tab, size_t capacity)
1818 {
1819         tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1820         if (tab->array == NULL)
1821                 return WIMLIB_ERR_NOMEM;
1822         tab->num_entries = 0;
1823         tab->capacity = capacity;
1824         return 0;
1825 }
1826
1827 static void
1828 destroy_blob_size_table(struct blob_size_table *tab)
1829 {
1830         FREE(tab->array);
1831 }
1832
1833 static int
1834 blob_size_table_insert(struct blob_descriptor *blob, void *_tab)
1835 {
1836         struct blob_size_table *tab = _tab;
1837         size_t pos;
1838         struct blob_descriptor *same_size_blob;
1839         struct hlist_node *tmp;
1840
1841         pos = hash_u64(blob->size) % tab->capacity;
1842         blob->unique_size = 1;
1843         hlist_for_each_entry(same_size_blob, tmp, &tab->array[pos], hash_list_2) {
1844                 if (same_size_blob->size == blob->size) {
1845                         blob->unique_size = 0;
1846                         same_size_blob->unique_size = 0;
1847                         break;
1848                 }
1849         }
1850
1851         hlist_add_head(&blob->hash_list_2, &tab->array[pos]);
1852         tab->num_entries++;
1853         return 0;
1854 }
1855
1856 struct find_blobs_ctx {
1857         WIMStruct *wim;
1858         int write_flags;
1859         struct list_head blob_list;
1860         struct blob_size_table blob_size_tab;
1861 };
1862
1863 static void
1864 reference_blob_for_write(struct blob_descriptor *blob,
1865                          struct list_head *blob_list, u32 nref)
1866 {
1867         if (!blob->will_be_in_output_wim) {
1868                 blob->out_refcnt = 0;
1869                 list_add_tail(&blob->write_blobs_list, blob_list);
1870                 blob->will_be_in_output_wim = 1;
1871         }
1872         blob->out_refcnt += nref;
1873 }
1874
1875 static int
1876 fully_reference_blob_for_write(struct blob_descriptor *blob, void *_blob_list)
1877 {
1878         struct list_head *blob_list = _blob_list;
1879         blob->will_be_in_output_wim = 0;
1880         reference_blob_for_write(blob, blob_list, blob->refcnt);
1881         return 0;
1882 }
1883
1884 static int
1885 inode_find_blobs_to_reference(const struct wim_inode *inode,
1886                               const struct blob_table *table,
1887                               struct list_head *blob_list)
1888 {
1889         wimlib_assert(inode->i_nlink > 0);
1890
1891         for (unsigned i = 0; i < inode->i_num_streams; i++) {
1892                 struct blob_descriptor *blob;
1893
1894                 blob = stream_blob(&inode->i_streams[i], table);
1895                 if (blob)
1896                         reference_blob_for_write(blob, blob_list, inode->i_nlink);
1897                 else if (!is_zero_hash(stream_hash(&inode->i_streams[i])))
1898                         return WIMLIB_ERR_RESOURCE_NOT_FOUND;
1899         }
1900         return 0;
1901 }
1902
1903 static int
1904 do_blob_set_not_in_output_wim(struct blob_descriptor *blob, void *_ignore)
1905 {
1906         blob->will_be_in_output_wim = 0;
1907         return 0;
1908 }
1909
1910 static int
1911 image_find_blobs_to_reference(WIMStruct *wim)
1912 {
1913         struct wim_image_metadata *imd;
1914         struct wim_inode *inode;
1915         struct blob_descriptor *blob;
1916         struct list_head *blob_list;
1917         int ret;
1918
1919         imd = wim_get_current_image_metadata(wim);
1920
1921         image_for_each_unhashed_blob(blob, imd)
1922                 blob->will_be_in_output_wim = 0;
1923
1924         blob_list = wim->private;
1925         image_for_each_inode(inode, imd) {
1926                 ret = inode_find_blobs_to_reference(inode,
1927                                                     wim->blob_table,
1928                                                     blob_list);
1929                 if (ret)
1930                         return ret;
1931         }
1932         return 0;
1933 }
1934
1935 static int
1936 prepare_unfiltered_list_of_blobs_in_output_wim(WIMStruct *wim,
1937                                                int image,
1938                                                int blobs_ok,
1939                                                struct list_head *blob_list_ret)
1940 {
1941         int ret;
1942
1943         INIT_LIST_HEAD(blob_list_ret);
1944
1945         if (blobs_ok && (image == WIMLIB_ALL_IMAGES ||
1946                          (image == 1 && wim->hdr.image_count == 1)))
1947         {
1948                 /* Fast case:  Assume that all blobs are being written and that
1949                  * the reference counts are correct.  */
1950                 struct blob_descriptor *blob;
1951                 struct wim_image_metadata *imd;
1952                 unsigned i;
1953
1954                 for_blob_in_table(wim->blob_table,
1955                                   fully_reference_blob_for_write,
1956                                   blob_list_ret);
1957
1958                 for (i = 0; i < wim->hdr.image_count; i++) {
1959                         imd = wim->image_metadata[i];
1960                         image_for_each_unhashed_blob(blob, imd)
1961                                 fully_reference_blob_for_write(blob, blob_list_ret);
1962                 }
1963         } else {
1964                 /* Slow case:  Walk through the images being written and
1965                  * determine the blobs referenced.  */
1966                 for_blob_in_table(wim->blob_table,
1967                                   do_blob_set_not_in_output_wim, NULL);
1968                 wim->private = blob_list_ret;
1969                 ret = for_image(wim, image, image_find_blobs_to_reference);
1970                 if (ret)
1971                         return ret;
1972         }
1973
1974         return 0;
1975 }
1976
1977 struct insert_other_if_hard_filtered_ctx {
1978         struct blob_size_table *tab;
1979         struct filter_context *filter_ctx;
1980 };
1981
1982 static int
1983 insert_other_if_hard_filtered(struct blob_descriptor *blob, void *_ctx)
1984 {
1985         struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
1986
1987         if (!blob->will_be_in_output_wim &&
1988             blob_hard_filtered(blob, ctx->filter_ctx))
1989                 blob_size_table_insert(blob, ctx->tab);
1990         return 0;
1991 }
1992
1993 static int
1994 determine_blob_size_uniquity(struct list_head *blob_list,
1995                              struct blob_table *lt,
1996                              struct filter_context *filter_ctx)
1997 {
1998         int ret;
1999         struct blob_size_table tab;
2000         struct blob_descriptor *blob;
2001
2002         ret = init_blob_size_table(&tab, 9001);
2003         if (ret)
2004                 return ret;
2005
2006         if (may_hard_filter_blobs(filter_ctx)) {
2007                 struct insert_other_if_hard_filtered_ctx ctx = {
2008                         .tab = &tab,
2009                         .filter_ctx = filter_ctx,
2010                 };
2011                 for_blob_in_table(lt, insert_other_if_hard_filtered, &ctx);
2012         }
2013
2014         list_for_each_entry(blob, blob_list, write_blobs_list)
2015                 blob_size_table_insert(blob, &tab);
2016
2017         destroy_blob_size_table(&tab);
2018         return 0;
2019 }
2020
2021 static void
2022 filter_blob_list_for_write(struct list_head *blob_list,
2023                            struct filter_context *filter_ctx)
2024 {
2025         struct blob_descriptor *blob, *tmp;
2026
2027         list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
2028                 int status = blob_filtered(blob, filter_ctx);
2029
2030                 if (status == 0) {
2031                         /* Not filtered.  */
2032                         continue;
2033                 } else {
2034                         if (status > 0) {
2035                                 /* Soft filtered.  */
2036                         } else {
2037                                 /* Hard filtered.  */
2038                                 blob->will_be_in_output_wim = 0;
2039                                 list_del(&blob->blob_table_list);
2040                         }
2041                         list_del(&blob->write_blobs_list);
2042                 }
2043         }
2044 }
2045
2046 /*
2047  * prepare_blob_list_for_write() -
2048  *
2049  * Prepare the list of blobs to write for writing a WIM containing the specified
2050  * image(s) with the specified write flags.
2051  *
2052  * @wim
2053  *      The WIMStruct on whose behalf the write is occurring.
2054  *
2055  * @image
2056  *      Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES.
2057  *
2058  * @write_flags
2059  *      WIMLIB_WRITE_FLAG_* flags for the write operation:
2060  *
2061  *      STREAMS_OK:  For writes of all images, assume that all blobs in the blob
2062  *      table of @wim and the per-image lists of unhashed blobs should be taken
2063  *      as-is, and image metadata should not be searched for references.  This
2064  *      does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below.
2065  *
2066  *      OVERWRITE:  Blobs already present in @wim shall not be returned in
2067  *      @blob_list_ret.
2068  *
2069  *      SKIP_EXTERNAL_WIMS:  Blobs already present in a WIM file, but not @wim,
2070  *      shall be returned in neither @blob_list_ret nor @blob_table_list_ret.
2071  *
2072  * @blob_list_ret
2073  *      List of blobs, linked by write_blobs_list, that need to be written will
2074  *      be returned here.
2075  *
2076  *      Note that this function assumes that unhashed blobs will be written; it
2077  *      does not take into account that they may become duplicates when actually
2078  *      hashed.
2079  *
2080  * @blob_table_list_ret
2081  *      List of blobs, linked by blob_table_list, that need to be included in
2082  *      the WIM's blob table will be returned here.  This will be a superset of
2083  *      the blobs in @blob_list_ret.
2084  *
2085  *      This list will be a proper superset of @blob_list_ret if and only if
2086  *      WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
2087  *      the blobs that would otherwise need to be written were already located
2088  *      in the WIM file.
2089  *
2090  *      All blobs in this list will have @out_refcnt set to the number of
2091  *      references to the blob in the output WIM.  If
2092  *      WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt
2093  *      may be as low as 0.
2094  *
2095  * @filter_ctx_ret
2096  *      A context for queries of blob filter status with blob_filtered() is
2097  *      returned in this location.
2098  *
2099  * In addition, @will_be_in_output_wim will be set to 1 in all blobs inserted
2100  * into @blob_table_list_ret and to 0 in all blobs in the blob table of @wim not
2101  * inserted into @blob_table_list_ret.
2102  *
2103  * Still furthermore, @unique_size will be set to 1 on all blobs in
2104  * @blob_list_ret that have unique size among all blobs in @blob_list_ret and
2105  * among all blobs in the blob table of @wim that are ineligible for being
2106  * written due to filtering.
2107  *
2108  * Returns 0 on success; nonzero on read error, memory allocation error, or
2109  * otherwise.
2110  */
2111 static int
2112 prepare_blob_list_for_write(WIMStruct *wim, int image,
2113                             int write_flags,
2114                             struct list_head *blob_list_ret,
2115                             struct list_head *blob_table_list_ret,
2116                             struct filter_context *filter_ctx_ret)
2117 {
2118         int ret;
2119         struct blob_descriptor *blob;
2120
2121         filter_ctx_ret->write_flags = write_flags;
2122         filter_ctx_ret->wim = wim;
2123
2124         ret = prepare_unfiltered_list_of_blobs_in_output_wim(
2125                                 wim,
2126                                 image,
2127                                 write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK,
2128                                 blob_list_ret);
2129         if (ret)
2130                 return ret;
2131
2132         INIT_LIST_HEAD(blob_table_list_ret);
2133         list_for_each_entry(blob, blob_list_ret, write_blobs_list)
2134                 list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2135
2136         ret = determine_blob_size_uniquity(blob_list_ret, wim->blob_table,
2137                                            filter_ctx_ret);
2138         if (ret)
2139                 return ret;
2140
2141         if (may_filter_blobs(filter_ctx_ret))
2142                 filter_blob_list_for_write(blob_list_ret, filter_ctx_ret);
2143
2144         return 0;
2145 }
2146
2147 static int
2148 write_file_blobs(WIMStruct *wim, int image, int write_flags,
2149                  unsigned num_threads,
2150                  struct list_head *blob_list_override,
2151                  struct list_head *blob_table_list_ret)
2152 {
2153         int ret;
2154         struct list_head _blob_list;
2155         struct list_head *blob_list;
2156         struct blob_descriptor *blob;
2157         struct filter_context _filter_ctx;
2158         struct filter_context *filter_ctx;
2159
2160         if (blob_list_override == NULL) {
2161                 /* Normal case: prepare blob list from image(s) being written.
2162                  */
2163                 blob_list = &_blob_list;
2164                 filter_ctx = &_filter_ctx;
2165                 ret = prepare_blob_list_for_write(wim, image, write_flags,
2166                                                   blob_list,
2167                                                   blob_table_list_ret,
2168                                                   filter_ctx);
2169                 if (ret)
2170                         return ret;
2171         } else {
2172                 /* Currently only as a result of wimlib_split() being called:
2173                  * use blob list already explicitly provided.  Use existing
2174                  * reference counts.  */
2175                 blob_list = blob_list_override;
2176                 filter_ctx = NULL;
2177                 INIT_LIST_HEAD(blob_table_list_ret);
2178                 list_for_each_entry(blob, blob_list, write_blobs_list) {
2179                         blob->out_refcnt = blob->refcnt;
2180                         blob->will_be_in_output_wim = 1;
2181                         blob->unique_size = 0;
2182                         list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2183                 }
2184         }
2185
2186         return wim_write_blob_list(wim,
2187                                    blob_list,
2188                                    write_flags,
2189                                    num_threads,
2190                                    filter_ctx);
2191 }
2192
2193 static int
2194 write_metadata_resources(WIMStruct *wim, int image, int write_flags)
2195 {
2196         int ret;
2197         int start_image;
2198         int end_image;
2199         int write_resource_flags;
2200
2201         if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA) {
2202                 DEBUG("Not writing any metadata resources.");
2203                 return 0;
2204         }
2205
2206         write_resource_flags = write_flags_to_resource_flags(write_flags);
2207
2208         write_resource_flags &= ~WRITE_RESOURCE_FLAG_SOLID;
2209
2210         DEBUG("Writing metadata resources (offset=%"PRIu64")",
2211               wim->out_fd.offset);
2212
2213         ret = call_progress(wim->progfunc,
2214                             WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN,
2215                             NULL, wim->progctx);
2216         if (ret)
2217                 return ret;
2218
2219         if (image == WIMLIB_ALL_IMAGES) {
2220                 start_image = 1;
2221                 end_image = wim->hdr.image_count;
2222         } else {
2223                 start_image = image;
2224                 end_image = image;
2225         }
2226
2227         for (int i = start_image; i <= end_image; i++) {
2228                 struct wim_image_metadata *imd;
2229
2230                 imd = wim->image_metadata[i - 1];
2231                 /* Build a new metadata resource only if image was modified from
2232                  * the original (or was newly added).  Otherwise just copy the
2233                  * existing one.  */
2234                 if (imd->modified) {
2235                         DEBUG("Image %u was modified; building and writing new "
2236                               "metadata resource", i);
2237                         ret = write_metadata_resource(wim, i,
2238                                                       write_resource_flags);
2239                 } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2240                         DEBUG("Image %u was not modified; re-using existing "
2241                               "metadata resource.", i);
2242                         blob_set_out_reshdr_for_reuse(imd->metadata_blob);
2243                         ret = 0;
2244                 } else {
2245                         DEBUG("Image %u was not modified; copying existing "
2246                               "metadata resource.", i);
2247                         ret = write_wim_resource(imd->metadata_blob,
2248                                                  &wim->out_fd,
2249                                                  wim->out_compression_type,
2250                                                  wim->out_chunk_size,
2251                                                  write_resource_flags);
2252                 }
2253                 if (ret)
2254                         return ret;
2255         }
2256
2257         return call_progress(wim->progfunc,
2258                              WIMLIB_PROGRESS_MSG_WRITE_METADATA_END,
2259                              NULL, wim->progctx);
2260 }
2261
2262 static int
2263 open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
2264 {
2265         int raw_fd;
2266         DEBUG("Opening \"%"TS"\" for writing.", path);
2267
2268         raw_fd = topen(path, open_flags | O_BINARY, 0644);
2269         if (raw_fd < 0) {
2270                 ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
2271                 return WIMLIB_ERR_OPEN;
2272         }
2273         filedes_init(&wim->out_fd, raw_fd);
2274         return 0;
2275 }
2276
2277 static int
2278 close_wim_writable(WIMStruct *wim, int write_flags)
2279 {
2280         int ret = 0;
2281
2282         if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)) {
2283                 DEBUG("Closing WIM file.");
2284                 if (filedes_valid(&wim->out_fd))
2285                         if (filedes_close(&wim->out_fd))
2286                                 ret = WIMLIB_ERR_WRITE;
2287         }
2288         filedes_invalidate(&wim->out_fd);
2289         return ret;
2290 }
2291
2292 static int
2293 cmp_blobs_by_out_rdesc(const void *p1, const void *p2)
2294 {
2295         const struct blob_descriptor *blob1, *blob2;
2296
2297         blob1 = *(const struct blob_descriptor**)p1;
2298         blob2 = *(const struct blob_descriptor**)p2;
2299
2300         if (blob1->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2301                 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2302                         if (blob1->out_res_offset_in_wim != blob2->out_res_offset_in_wim)
2303                                 return cmp_u64(blob1->out_res_offset_in_wim,
2304                                                blob2->out_res_offset_in_wim);
2305                 } else {
2306                         return 1;
2307                 }
2308         } else {
2309                 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID)
2310                         return -1;
2311         }
2312         return cmp_u64(blob1->out_reshdr.offset_in_wim,
2313                        blob2->out_reshdr.offset_in_wim);
2314 }
2315
2316 static int
2317 write_blob_table(WIMStruct *wim, int image, int write_flags,
2318                  struct wim_reshdr *out_reshdr,
2319                  struct list_head *blob_table_list)
2320 {
2321         int ret;
2322
2323         /* Set output resource metadata for blobs already present in WIM.  */
2324         if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2325                 struct blob_descriptor *blob;
2326                 list_for_each_entry(blob, blob_table_list, blob_table_list) {
2327                         if (blob->blob_location == BLOB_IN_WIM &&
2328                             blob->rdesc->wim == wim)
2329                         {
2330                                 blob_set_out_reshdr_for_reuse(blob);
2331                         }
2332                 }
2333         }
2334
2335         ret = sort_blob_list(blob_table_list,
2336                              offsetof(struct blob_descriptor, blob_table_list),
2337                              cmp_blobs_by_out_rdesc);
2338         if (ret)
2339                 return ret;
2340
2341         /* Add entries for metadata resources.  */
2342         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)) {
2343                 int start_image;
2344                 int end_image;
2345
2346                 if (image == WIMLIB_ALL_IMAGES) {
2347                         start_image = 1;
2348                         end_image = wim->hdr.image_count;
2349                 } else {
2350                         start_image = image;
2351                         end_image = image;
2352                 }
2353
2354                 /* Push metadata blob table entries onto the front of the list
2355                  * in reverse order, so that they're written in order.
2356                  */
2357                 for (int i = end_image; i >= start_image; i--) {
2358                         struct blob_descriptor *metadata_blob;
2359
2360                         metadata_blob = wim->image_metadata[i - 1]->metadata_blob;
2361                         wimlib_assert(metadata_blob->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA);
2362                         metadata_blob->out_refcnt = 1;
2363                         list_add(&metadata_blob->blob_table_list, blob_table_list);
2364                 }
2365         }
2366
2367         return write_blob_table_from_blob_list(blob_table_list,
2368                                                &wim->out_fd,
2369                                                wim->hdr.part_number,
2370                                                out_reshdr,
2371                                                write_flags_to_resource_flags(write_flags));
2372 }
2373
2374 /*
2375  * finish_write():
2376  *
2377  * Finish writing a WIM file: write the blob table, xml data, and integrity
2378  * table, then overwrite the WIM header.  By default, closes the WIM file
2379  * descriptor (@wim->out_fd) if successful.
2380  *
2381  * write_flags is a bitwise OR of the following:
2382  *
2383  *      (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
2384  *              Include an integrity table.
2385  *
2386  *      (public) WIMLIB_WRITE_FLAG_FSYNC:
2387  *              fsync() the output file before closing it.
2388  *
2389  *      (public) WIMLIB_WRITE_FLAG_PIPABLE:
2390  *              Writing a pipable WIM, possibly to a pipe; include pipable WIM
2391  *              blob headers before the blob table and XML data, and also write
2392  *              the WIM header at the end instead of seeking to the beginning.
2393  *              Can't be combined with WIMLIB_WRITE_FLAG_CHECK_INTEGRITY.
2394  *
2395  *      (private) WIMLIB_WRITE_FLAG_NO_BLOB_TABLE:
2396  *              Don't write the blob table.
2397  *
2398  *      (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
2399  *              After writing the XML data but before writing the integrity
2400  *              table, write a temporary WIM header and flush the file
2401  *              descriptor so that the WIM is less likely to become corrupted
2402  *              upon abrupt program termination.
2403  *      (private) WIMLIB_WRITE_FLAG_HEADER_AT_END:
2404  *              Instead of overwriting the WIM header at the beginning of the
2405  *              file, simply append it to the end of the file.  (Used when
2406  *              writing to pipe.)
2407  *      (private) WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR:
2408  *              Do not close the file descriptor @wim->out_fd on either success
2409  *              on failure.
2410  *      (private) WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES:
2411  *              Use the existing <TOTALBYTES> stored in the in-memory XML
2412  *              information, rather than setting it to the offset of the XML
2413  *              data being written.
2414  *      (private) WIMLIB_WRITE_FLAG_OVERWRITE
2415  *              The existing WIM file is being updated in-place.  The entries
2416  *              from its integrity table may be re-used.
2417  */
2418 static int
2419 finish_write(WIMStruct *wim, int image, int write_flags,
2420              struct list_head *blob_table_list)
2421 {
2422         int ret;
2423         off_t hdr_offset;
2424         int write_resource_flags;
2425         off_t old_blob_table_end = 0;
2426         off_t new_blob_table_end;
2427         u64 xml_totalbytes;
2428         struct integrity_table *old_integrity_table = NULL;
2429
2430         DEBUG("image=%d, write_flags=%08x", image, write_flags);
2431
2432         write_resource_flags = write_flags_to_resource_flags(write_flags);
2433
2434         /* In the WIM header, there is room for the resource entry for a
2435          * metadata resource labeled as the "boot metadata".  This entry should
2436          * be zeroed out if there is no bootable image (boot_idx 0).  Otherwise,
2437          * it should be a copy of the resource entry for the image that is
2438          * marked as bootable.  This is not well documented...  */
2439         if (wim->hdr.boot_idx == 0) {
2440                 zero_reshdr(&wim->hdr.boot_metadata_reshdr);
2441         } else {
2442                 copy_reshdr(&wim->hdr.boot_metadata_reshdr,
2443                             &wim->image_metadata[
2444                                 wim->hdr.boot_idx - 1]->metadata_blob->out_reshdr);
2445         }
2446
2447         /* If overwriting the WIM file containing an integrity table in-place,
2448          * we'd like to re-use the information in the old integrity table
2449          * instead of recalculating it.  But we might overwrite the old
2450          * integrity table when we expand the XML data.  Read it into memory
2451          * just in case.  */
2452         if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
2453                             WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) ==
2454                 (WIMLIB_WRITE_FLAG_OVERWRITE |
2455                  WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2456             && wim_has_integrity_table(wim))
2457         {
2458                 old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
2459                                        wim->hdr.blob_table_reshdr.size_in_wim;
2460                 (void)read_integrity_table(wim,
2461                                            old_blob_table_end - WIM_HEADER_DISK_SIZE,
2462                                            &old_integrity_table);
2463                 /* If we couldn't read the old integrity table, we can still
2464                  * re-calculate the full integrity table ourselves.  Hence the
2465                  * ignoring of the return value.  */
2466         }
2467
2468         /* Write blob table.  */
2469         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_BLOB_TABLE)) {
2470                 ret = write_blob_table(wim, image, write_flags,
2471                                        &wim->hdr.blob_table_reshdr,
2472                                        blob_table_list);
2473                 if (ret) {
2474                         free_integrity_table(old_integrity_table);
2475                         return ret;
2476                 }
2477         }
2478
2479         /* Write XML data.  */
2480         xml_totalbytes = wim->out_fd.offset;
2481         if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2482                 xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING;
2483         ret = write_wim_xml_data(wim, image, xml_totalbytes,
2484                                  &wim->hdr.xml_data_reshdr,
2485                                  write_resource_flags);
2486         if (ret) {
2487                 free_integrity_table(old_integrity_table);
2488                 return ret;
2489         }
2490
2491         /* Write integrity table (optional).  */
2492         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2493                 if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
2494                         struct wim_header checkpoint_hdr;
2495                         memcpy(&checkpoint_hdr, &wim->hdr, sizeof(struct wim_header));
2496                         zero_reshdr(&checkpoint_hdr.integrity_table_reshdr);
2497                         checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2498                         ret = write_wim_header_at_offset(&checkpoint_hdr,
2499                                                          &wim->out_fd, 0);
2500                         if (ret) {
2501                                 free_integrity_table(old_integrity_table);
2502                                 return ret;
2503                         }
2504                 }
2505
2506                 new_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
2507                                      wim->hdr.blob_table_reshdr.size_in_wim;
2508
2509                 ret = write_integrity_table(wim,
2510                                             new_blob_table_end,
2511                                             old_blob_table_end,
2512                                             old_integrity_table);
2513                 free_integrity_table(old_integrity_table);
2514                 if (ret)
2515                         return ret;
2516         } else {
2517                 /* No integrity table.  */
2518                 zero_reshdr(&wim->hdr.integrity_table_reshdr);
2519         }
2520
2521         /* Now that all information in the WIM header has been determined, the
2522          * preliminary header written earlier can be overwritten, the header of
2523          * the existing WIM file can be overwritten, or the final header can be
2524          * written to the end of the pipable WIM.  */
2525         wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2526         hdr_offset = 0;
2527         if (write_flags & WIMLIB_WRITE_FLAG_HEADER_AT_END)
2528                 hdr_offset = wim->out_fd.offset;
2529         DEBUG("Writing new header @ %"PRIu64".", hdr_offset);
2530         ret = write_wim_header_at_offset(&wim->hdr, &wim->out_fd, hdr_offset);
2531         if (ret)
2532                 return ret;
2533
2534         /* Possibly sync file data to disk before closing.  On POSIX systems, it
2535          * is necessary to do this before using rename() to overwrite an
2536          * existing file with a new file.  Otherwise, data loss would occur if
2537          * the system is abruptly terminated when the metadata for the rename
2538          * operation has been written to disk, but the new file data has not.
2539          */
2540         if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
2541                 DEBUG("Syncing WIM file.");
2542                 if (fsync(wim->out_fd.fd)) {
2543                         ERROR_WITH_ERRNO("Error syncing data to WIM file");
2544                         return WIMLIB_ERR_WRITE;
2545                 }
2546         }
2547
2548         if (close_wim_writable(wim, write_flags)) {
2549                 ERROR_WITH_ERRNO("Failed to close the output WIM file");
2550                 return WIMLIB_ERR_WRITE;
2551         }
2552
2553         return 0;
2554 }
2555
2556 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
2557
2558 /* Set advisory lock on WIM file (if not already done so)  */
2559 int
2560 lock_wim_for_append(WIMStruct *wim)
2561 {
2562         if (wim->locked_for_append)
2563                 return 0;
2564         if (!flock(wim->in_fd.fd, LOCK_EX | LOCK_NB)) {
2565                 wim->locked_for_append = 1;
2566                 return 0;
2567         }
2568         if (errno != EWOULDBLOCK)
2569                 return 0;
2570         return WIMLIB_ERR_ALREADY_LOCKED;
2571 }
2572
2573 /* Remove advisory lock on WIM file (if present)  */
2574 void
2575 unlock_wim_for_append(WIMStruct *wim)
2576 {
2577         if (wim->locked_for_append) {
2578                 flock(wim->in_fd.fd, LOCK_UN);
2579                 wim->locked_for_append = 0;
2580         }
2581 }
2582 #endif
2583
2584 /*
2585  * write_pipable_wim():
2586  *
2587  * Perform the intermediate stages of creating a "pipable" WIM (i.e. a WIM
2588  * capable of being applied from a pipe).
2589  *
2590  * Pipable WIMs are a wimlib-specific modification of the WIM format such that
2591  * images can be applied from them sequentially when the file data is sent over
2592  * a pipe.  In addition, a pipable WIM can be written sequentially to a pipe.
2593  * The modifications made to the WIM format for pipable WIMs are:
2594  *
2595  * - Magic characters in header are "WLPWM\0\0\0" (wimlib pipable WIM) instead
2596  *   of "MSWIM\0\0\0".  This lets wimlib know that the WIM is pipable and also
2597  *   stops other software from trying to read the file as a normal WIM.
2598  *
2599  * - The header at the beginning of the file does not contain all the normal
2600  *   information; in particular it will have all 0's for the blob table and XML
2601  *   data resource entries.  This is because this information cannot be
2602  *   determined until the blob table and XML data have been written.
2603  *   Consequently, wimlib will write the full header at the very end of the
2604  *   file.  The header at the end, however, is only used when reading the WIM
2605  *   from a seekable file (not a pipe).
2606  *
2607  * - An extra copy of the XML data is placed directly after the header.  This
2608  *   allows image names and sizes to be determined at an appropriate time when
2609  *   reading the WIM from a pipe.  This copy of the XML data is ignored if the
2610  *   WIM is read from a seekable file (not a pipe).
2611  *
2612  * - The format of resources, or blobs, has been modified to allow them to be
2613  *   used before the "blob table" has been read.  Each blob is prefixed with a
2614  *   `struct pwm_blob_hdr' that is basically an abbreviated form of `struct
2615  *   blob_descriptor_disk' that only contains the SHA-1 message digest,
2616  *   uncompressed blob size, and flags that indicate whether the blob is
2617  *   compressed.  The data of uncompressed blobs then follows literally, while
2618  *   the data of compressed blobs follows in a modified format.  Compressed
2619  *   blobs do not begin with a chunk table, since the chunk table cannot be
2620  *   written until all chunks have been compressed.  Instead, each compressed
2621  *   chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size.
2622  *   Furthermore, the chunk table is written at the end of the resource instead
2623  *   of the start.  Note: chunk offsets are given in the chunk table as if the
2624  *   `struct pwm_chunk_hdr's were not present; also, the chunk table is only
2625  *   used if the WIM is being read from a seekable file (not a pipe).
2626  *
2627  * - Metadata blobs always come before non-metadata blobs.  (This does not by
2628  *   itself constitute an incompatibility with normal WIMs, since this is valid
2629  *   in normal WIMs.)
2630  *
2631  * - At least up to the end of the blobs, all components must be packed as
2632  *   tightly as possible; there cannot be any "holes" in the WIM.  (This does
2633  *   not by itself consititute an incompatibility with normal WIMs, since this
2634  *   is valid in normal WIMs.)
2635  *
2636  * Note: the blob table, XML data, and header at the end are not used when
2637  * applying from a pipe.  They exist to support functionality such as image
2638  * application and export when the WIM is *not* read from a pipe.
2639  *
2640  *   Layout of pipable WIM:
2641  *
2642  * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2643  * | Header | XML data | Metadata resources | File resources |  Blob table  | XML data  | Header |
2644  * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2645  *
2646  *   Layout of normal WIM:
2647  *
2648  * +--------+-----------------------------+-------------------------+
2649  * | Header | File and metadata resources |  Blob table  | XML data |
2650  * +--------+-----------------------------+-------------------------+
2651  *
2652  * An optional integrity table can follow the final XML data in both normal and
2653  * pipable WIMs.  However, due to implementation details, wimlib currently can
2654  * only include an integrity table in a pipable WIM when writing it to a
2655  * seekable file (not a pipe).
2656  *
2657  * Do note that since pipable WIMs are not supported by Microsoft's software,
2658  * wimlib does not create them unless explicitly requested (with
2659  * WIMLIB_WRITE_FLAG_PIPABLE) and as stated above they use different magic
2660  * characters to identify the file.
2661  */
2662 static int
2663 write_pipable_wim(WIMStruct *wim, int image, int write_flags,
2664                   unsigned num_threads,
2665                   struct list_head *blob_list_override,
2666                   struct list_head *blob_table_list_ret)
2667 {
2668         int ret;
2669         struct wim_reshdr xml_reshdr;
2670
2671         WARNING("Creating a pipable WIM, which will "
2672                 "be incompatible\n"
2673                 "          with Microsoft's software (wimgapi/imagex/Dism).");
2674
2675         /* At this point, the header at the beginning of the file has already
2676          * been written.  */
2677
2678         /* For efficiency, when wimlib adds an image to the WIM with
2679          * wimlib_add_image(), the SHA-1 message digests of files is not
2680          * calculated; instead, they are calculated while the files are being
2681          * written.  However, this does not work when writing a pipable WIM,
2682          * since when writing a blob to a pipable WIM, its SHA-1 message digest
2683          * needs to be known before the blob data is written.  Therefore, before
2684          * getting much farther, we need to pre-calculate the SHA-1 message
2685          * digests of all blobs that will be written.  */
2686         ret = wim_checksum_unhashed_blobs(wim);
2687         if (ret)
2688                 return ret;
2689
2690         /* Write extra copy of the XML data.  */
2691         ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT,
2692                                  &xml_reshdr, WRITE_RESOURCE_FLAG_PIPABLE);
2693         if (ret)
2694                 return ret;
2695
2696         /* Write metadata resources for the image(s) being included in the
2697          * output WIM.  */
2698         ret = write_metadata_resources(wim, image, write_flags);
2699         if (ret)
2700                 return ret;
2701
2702         /* Write blobs needed for the image(s) being included in the output WIM,
2703          * or blobs needed for the split WIM part.  */
2704         return write_file_blobs(wim, image, write_flags,
2705                                 num_threads, blob_list_override,
2706                                 blob_table_list_ret);
2707
2708         /* The blob table, XML data, and header at end are handled by
2709          * finish_write().  */
2710 }
2711
2712 /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file
2713  * descriptor.  */
2714 int
2715 write_wim_part(WIMStruct *wim,
2716                const void *path_or_fd,
2717                int image,
2718                int write_flags,
2719                unsigned num_threads,
2720                unsigned part_number,
2721                unsigned total_parts,
2722                struct list_head *blob_list_override,
2723                const u8 *guid)
2724 {
2725         int ret;
2726         struct wim_header hdr_save;
2727         struct list_head blob_table_list;
2728
2729         if (total_parts == 1)
2730                 DEBUG("Writing standalone WIM.");
2731         else
2732                 DEBUG("Writing split WIM part %u/%u", part_number, total_parts);
2733         if (image == WIMLIB_ALL_IMAGES)
2734                 DEBUG("Including all images.");
2735         else
2736                 DEBUG("Including image %d only.", image);
2737         if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2738                 DEBUG("File descriptor: %d", *(const int*)path_or_fd);
2739         else
2740                 DEBUG("Path: \"%"TS"\"", (const tchar*)path_or_fd);
2741         DEBUG("Write flags: 0x%08x", write_flags);
2742
2743         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2744                 DEBUG("\tCHECK_INTEGRITY");
2745
2746         if (write_flags & WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)
2747                 DEBUG("\tNO_CHECK_INTEGRITY");
2748
2749         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2750                 DEBUG("\tPIPABLE");
2751
2752         if (write_flags & WIMLIB_WRITE_FLAG_NOT_PIPABLE)
2753                 DEBUG("\tNOT_PIPABLE");
2754
2755         if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
2756                 DEBUG("\tRECOMPRESS");
2757
2758         if (write_flags & WIMLIB_WRITE_FLAG_FSYNC)
2759                 DEBUG("\tFSYNC");
2760
2761         if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
2762                 DEBUG("\tREBUILD");
2763
2764         if (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE)
2765                 DEBUG("\tSOFT_DELETE");
2766
2767         if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
2768                 DEBUG("\tIGNORE_READONLY_FLAG");
2769
2770         if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS)
2771                 DEBUG("\tSKIP_EXTERNAL_WIMS");
2772
2773         if (write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK)
2774                 DEBUG("\tSTREAMS_OK");
2775
2776         if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)
2777                 DEBUG("\tRETAIN_GUID");
2778
2779         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
2780                 DEBUG("\tSOLID");
2781
2782         if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2783                 DEBUG("\tFILE_DESCRIPTOR");
2784
2785         if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
2786                 DEBUG("\tNO_METADATA");
2787
2788         if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2789                 DEBUG("\tUSE_EXISTING_TOTALBYTES");
2790
2791         if (num_threads == 0)
2792                 DEBUG("Number of threads: autodetect");
2793         else
2794                 DEBUG("Number of threads: %u", num_threads);
2795         DEBUG("Progress function: %s", (wim->progfunc ? "yes" : "no"));
2796         DEBUG("Blob list:         %s", (blob_list_override ? "specified" : "autodetect"));
2797         DEBUG("GUID:              %s", (write_flags &
2798                                         WIMLIB_WRITE_FLAG_RETAIN_GUID) ? "retain"
2799                                                 : guid ? "explicit" : "generate new");
2800
2801         /* Internally, this is always called with a valid part number and total
2802          * parts.  */
2803         wimlib_assert(total_parts >= 1);
2804         wimlib_assert(part_number >= 1 && part_number <= total_parts);
2805
2806         /* A valid image (or all images) must be specified.  */
2807         if (image != WIMLIB_ALL_IMAGES &&
2808              (image < 1 || image > wim->hdr.image_count))
2809                 return WIMLIB_ERR_INVALID_IMAGE;
2810
2811         /* If we need to write metadata resources, make sure the ::WIMStruct has
2812          * the needed information attached (e.g. is not a resource-only WIM,
2813          * such as a non-first part of a split WIM).  */
2814         if (!wim_has_metadata(wim) &&
2815             !(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA))
2816                 return WIMLIB_ERR_METADATA_NOT_FOUND;
2817
2818         /* Check for contradictory flags.  */
2819         if ((write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2820                             WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2821                                 == (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2822                                     WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2823                 return WIMLIB_ERR_INVALID_PARAM;
2824
2825         if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2826                             WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2827                                 == (WIMLIB_WRITE_FLAG_PIPABLE |
2828                                     WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2829                 return WIMLIB_ERR_INVALID_PARAM;
2830
2831         /* Save previous header, then start initializing the new one.  */
2832         memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
2833
2834         /* Set default integrity, pipable, and solid flags.  */
2835         if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2836                              WIMLIB_WRITE_FLAG_NOT_PIPABLE)))
2837                 if (wim_is_pipable(wim)) {
2838                         DEBUG("WIM is pipable; default to PIPABLE.");
2839                         write_flags |= WIMLIB_WRITE_FLAG_PIPABLE;
2840                 }
2841
2842         if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2843                              WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2844                 if (wim_has_integrity_table(wim)) {
2845                         DEBUG("Integrity table present; default to CHECK_INTEGRITY.");
2846                         write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2847                 }
2848
2849         if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2850                             WIMLIB_WRITE_FLAG_SOLID))
2851                                     == (WIMLIB_WRITE_FLAG_PIPABLE |
2852                                         WIMLIB_WRITE_FLAG_SOLID))
2853         {
2854                 ERROR("Cannot specify both PIPABLE and SOLID!");
2855                 return WIMLIB_ERR_INVALID_PARAM;
2856         }
2857
2858         /* Set appropriate magic number.  */
2859         if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2860                 wim->hdr.magic = PWM_MAGIC;
2861         else
2862                 wim->hdr.magic = WIM_MAGIC;
2863
2864         /* Set appropriate version number.  */
2865         if ((write_flags & WIMLIB_WRITE_FLAG_SOLID) ||
2866             wim->out_compression_type == WIMLIB_COMPRESSION_TYPE_LZMS)
2867                 wim->hdr.wim_version = WIM_VERSION_SOLID;
2868         else
2869                 wim->hdr.wim_version = WIM_VERSION_DEFAULT;
2870
2871         /* Clear header flags that will be set automatically.  */
2872         wim->hdr.flags &= ~(WIM_HDR_FLAG_METADATA_ONLY          |
2873                             WIM_HDR_FLAG_RESOURCE_ONLY          |
2874                             WIM_HDR_FLAG_SPANNED                |
2875                             WIM_HDR_FLAG_WRITE_IN_PROGRESS);
2876
2877         /* Set SPANNED header flag if writing part of a split WIM.  */
2878         if (total_parts != 1)
2879                 wim->hdr.flags |= WIM_HDR_FLAG_SPANNED;
2880
2881         /* Set part number and total parts of split WIM.  This will be 1 and 1
2882          * if the WIM is standalone.  */
2883         wim->hdr.part_number = part_number;
2884         wim->hdr.total_parts = total_parts;
2885
2886         /* Set compression type if different.  */
2887         if (wim->compression_type != wim->out_compression_type) {
2888                 ret = set_wim_hdr_cflags(wim->out_compression_type, &wim->hdr);
2889                 wimlib_assert(ret == 0);
2890         }
2891
2892         /* Set chunk size if different.  */
2893         wim->hdr.chunk_size = wim->out_chunk_size;
2894
2895         /* Set GUID.  */
2896         if (!(write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)) {
2897                 if (guid)
2898                         memcpy(wim->hdr.guid, guid, WIMLIB_GUID_LEN);
2899                 else
2900                         randomize_byte_array(wim->hdr.guid, WIMLIB_GUID_LEN);
2901         }
2902
2903         /* Clear references to resources that have not been written yet.  */
2904         zero_reshdr(&wim->hdr.blob_table_reshdr);
2905         zero_reshdr(&wim->hdr.xml_data_reshdr);
2906         zero_reshdr(&wim->hdr.boot_metadata_reshdr);
2907         zero_reshdr(&wim->hdr.integrity_table_reshdr);
2908
2909         /* Set image count and boot index correctly for single image writes.  */
2910         if (image != WIMLIB_ALL_IMAGES) {
2911                 wim->hdr.image_count = 1;
2912                 if (wim->hdr.boot_idx == image)
2913                         wim->hdr.boot_idx = 1;
2914                 else
2915                         wim->hdr.boot_idx = 0;
2916         }
2917
2918         /* Split WIMs can't be bootable.  */
2919         if (total_parts != 1)
2920                 wim->hdr.boot_idx = 0;
2921
2922         /* Initialize output file descriptor.  */
2923         if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) {
2924                 /* File descriptor was explicitly provided.  Return error if
2925                  * file descriptor is not seekable, unless writing a pipable WIM
2926                  * was requested.  */
2927                 wim->out_fd.fd = *(const int*)path_or_fd;
2928                 wim->out_fd.offset = 0;
2929                 if (!filedes_is_seekable(&wim->out_fd)) {
2930                         ret = WIMLIB_ERR_INVALID_PARAM;
2931                         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2932                                 goto out_restore_hdr;
2933                         if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2934                                 ERROR("Can't include integrity check when "
2935                                       "writing pipable WIM to pipe!");
2936                                 goto out_restore_hdr;
2937                         }
2938                 }
2939
2940         } else {
2941                 /* Filename of WIM to write was provided; open file descriptor
2942                  * to it.  */
2943                 ret = open_wim_writable(wim, (const tchar*)path_or_fd,
2944                                         O_TRUNC | O_CREAT | O_RDWR);
2945                 if (ret)
2946                         goto out_restore_hdr;
2947         }
2948
2949         /* Write initial header.  This is merely a "dummy" header since it
2950          * doesn't have all the information yet, so it will be overwritten later
2951          * (unless writing a pipable WIM).  */
2952         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2953                 wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2954         ret = write_wim_header(&wim->hdr, &wim->out_fd);
2955         wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2956         if (ret)
2957                 goto out_restore_hdr;
2958
2959         /* Write file blobs and metadata resources.  */
2960         if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) {
2961                 /* Default case: create a normal (non-pipable) WIM.  */
2962                 ret = write_file_blobs(wim, image, write_flags,
2963                                        num_threads,
2964                                        blob_list_override,
2965                                        &blob_table_list);
2966                 if (ret)
2967                         goto out_restore_hdr;
2968
2969                 ret = write_metadata_resources(wim, image, write_flags);
2970                 if (ret)
2971                         goto out_restore_hdr;
2972         } else {
2973                 /* Non-default case: create pipable WIM.  */
2974                 ret = write_pipable_wim(wim, image, write_flags, num_threads,
2975                                         blob_list_override,
2976                                         &blob_table_list);
2977                 if (ret)
2978                         goto out_restore_hdr;
2979                 write_flags |= WIMLIB_WRITE_FLAG_HEADER_AT_END;
2980         }
2981
2982
2983         /* Write blob table, XML data, and (optional) integrity table.  */
2984         ret = finish_write(wim, image, write_flags, &blob_table_list);
2985 out_restore_hdr:
2986         memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
2987         (void)close_wim_writable(wim, write_flags);
2988         DEBUG("ret=%d", ret);
2989         return ret;
2990 }
2991
2992 /* Write a standalone WIM to a file or file descriptor.  */
2993 static int
2994 write_standalone_wim(WIMStruct *wim, const void *path_or_fd,
2995                      int image, int write_flags, unsigned num_threads)
2996 {
2997         return write_wim_part(wim, path_or_fd, image, write_flags,
2998                               num_threads, 1, 1, NULL, NULL);
2999 }
3000
3001 /* API function documented in wimlib.h  */
3002 WIMLIBAPI int
3003 wimlib_write(WIMStruct *wim, const tchar *path,
3004              int image, int write_flags, unsigned num_threads)
3005 {
3006         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3007                 return WIMLIB_ERR_INVALID_PARAM;
3008
3009         if (path == NULL || path[0] == T('\0'))
3010                 return WIMLIB_ERR_INVALID_PARAM;
3011
3012         return write_standalone_wim(wim, path, image, write_flags, num_threads);
3013 }
3014
3015 /* API function documented in wimlib.h  */
3016 WIMLIBAPI int
3017 wimlib_write_to_fd(WIMStruct *wim, int fd,
3018                    int image, int write_flags, unsigned num_threads)
3019 {
3020         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3021                 return WIMLIB_ERR_INVALID_PARAM;
3022
3023         if (fd < 0)
3024                 return WIMLIB_ERR_INVALID_PARAM;
3025
3026         write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR;
3027
3028         return write_standalone_wim(wim, &fd, image, write_flags, num_threads);
3029 }
3030
3031 static bool
3032 any_images_modified(WIMStruct *wim)
3033 {
3034         for (int i = 0; i < wim->hdr.image_count; i++)
3035                 if (wim->image_metadata[i]->modified)
3036                         return true;
3037         return false;
3038 }
3039
3040 static int
3041 check_resource_offset(struct blob_descriptor *blob, void *_wim)
3042 {
3043         const WIMStruct *wim = _wim;
3044         off_t end_offset = *(const off_t*)wim->private;
3045
3046         if (blob->blob_location == BLOB_IN_WIM &&
3047             blob->rdesc->wim == wim &&
3048             blob->rdesc->offset_in_wim + blob->rdesc->size_in_wim > end_offset)
3049                 return WIMLIB_ERR_RESOURCE_ORDER;
3050         return 0;
3051 }
3052
3053 /* Make sure no file or metadata resources are located after the XML data (or
3054  * integrity table if present)--- otherwise we can't safely overwrite the WIM in
3055  * place and we return WIMLIB_ERR_RESOURCE_ORDER.  */
3056 static int
3057 check_resource_offsets(WIMStruct *wim, off_t end_offset)
3058 {
3059         int ret;
3060         unsigned i;
3061
3062         wim->private = &end_offset;
3063         ret = for_blob_in_table(wim->blob_table, check_resource_offset, wim);
3064         if (ret)
3065                 return ret;
3066
3067         for (i = 0; i < wim->hdr.image_count; i++) {
3068                 ret = check_resource_offset(wim->image_metadata[i]->metadata_blob, wim);
3069                 if (ret)
3070                         return ret;
3071         }
3072         return 0;
3073 }
3074
3075 /*
3076  * Overwrite a WIM, possibly appending new resources to it.
3077  *
3078  * A WIM looks like (or is supposed to look like) the following:
3079  *
3080  *                   Header (212 bytes)
3081  *                   Resources for metadata and files (variable size)
3082  *                   Blob table (variable size)
3083  *                   XML data (variable size)
3084  *                   Integrity table (optional) (variable size)
3085  *
3086  * If we are not adding any new files or metadata, then the blob table is
3087  * unchanged--- so we only need to overwrite the XML data, integrity table, and
3088  * header.  This operation is potentially unsafe if the program is abruptly
3089  * terminated while the XML data or integrity table are being overwritten, but
3090  * before the new header has been written.  To partially alleviate this problem,
3091  * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to
3092  * finish_write() to cause a temporary WIM header to be written after the XML
3093  * data has been written.  This may prevent the WIM from becoming corrupted if
3094  * the program is terminated while the integrity table is being calculated (but
3095  * no guarantees, due to write re-ordering...).
3096  *
3097  * If we are adding new blobs, including new file data as well as any metadata
3098  * for any new images, then the blob table needs to be changed, and those blobs
3099  * need to be written.  In this case, we try to perform a safe update of the WIM
3100  * file by writing the blobs *after* the end of the previous WIM, then writing
3101  * the new blob table, XML data, and (optionally) integrity table following the
3102  * new blobs.  This will produce a layout like the following:
3103  *
3104  *                   Header (212 bytes)
3105  *                   (OLD) Resources for metadata and files (variable size)
3106  *                   (OLD) Blob table (variable size)
3107  *                   (OLD) XML data (variable size)
3108  *                   (OLD) Integrity table (optional) (variable size)
3109  *                   (NEW) Resources for metadata and files (variable size)
3110  *                   (NEW) Blob table (variable size)
3111  *                   (NEW) XML data (variable size)
3112  *                   (NEW) Integrity table (optional) (variable size)
3113  *
3114  * At all points, the WIM is valid as nothing points to the new data yet.  Then,
3115  * the header is overwritten to point to the new blob table, XML data, and
3116  * integrity table, to produce the following layout:
3117  *
3118  *                   Header (212 bytes)
3119  *                   Resources for metadata and files (variable size)
3120  *                   Nothing (variable size)
3121  *                   Resources for metadata and files (variable size)
3122  *                   Blob table (variable size)
3123  *                   XML data (variable size)
3124  *                   Integrity table (optional) (variable size)
3125  *
3126  * This method allows an image to be appended to a large WIM very quickly, and
3127  * is crash-safe except in the case of write re-ordering, but the disadvantage
3128  * is that a small hole is left in the WIM where the old blob table, xml data,
3129  * and integrity table were.  (These usually only take up a small amount of
3130  * space compared to the blobs, however.)
3131  */
3132 static int
3133 overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads)
3134 {
3135         int ret;
3136         off_t old_wim_end;
3137         u64 old_blob_table_end, old_xml_begin, old_xml_end;
3138         struct wim_header hdr_save;
3139         struct list_head blob_list;
3140         struct list_head blob_table_list;
3141         struct filter_context filter_ctx;
3142
3143         DEBUG("Overwriting `%"TS"' in-place", wim->filename);
3144
3145         /* Save original header so it can be restored in case of error  */
3146         memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
3147
3148         /* Set default integrity flag.  */
3149         if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
3150                              WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
3151                 if (wim_has_integrity_table(wim))
3152                         write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
3153
3154         /* Set WIM version if writing solid resources.  */
3155         if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
3156                 wim->hdr.wim_version = WIM_VERSION_SOLID;
3157
3158         /* Set additional flags for overwrite.  */
3159         write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
3160                        WIMLIB_WRITE_FLAG_STREAMS_OK;
3161
3162         /* Make sure there is no data after the XML data, except possibily an
3163          * integrity table.  If this were the case, then this data would be
3164          * overwritten.  */
3165         old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
3166         old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
3167         old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
3168                              wim->hdr.blob_table_reshdr.size_in_wim;
3169         if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0 &&
3170             wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
3171                 WARNING("Didn't expect the integrity table to be before the XML data");
3172                 ret = WIMLIB_ERR_RESOURCE_ORDER;
3173                 goto out_restore_memory_hdr;
3174         }
3175
3176         if (old_blob_table_end > old_xml_begin) {
3177                 WARNING("Didn't expect the blob table to be after the XML data");
3178                 ret = WIMLIB_ERR_RESOURCE_ORDER;
3179                 goto out_restore_memory_hdr;
3180         }
3181
3182         /* Set @old_wim_end, which indicates the point beyond which we don't
3183          * allow any file and metadata resources to appear without returning
3184          * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
3185          * overwrite these resources). */
3186         if (!wim->image_deletion_occurred && !any_images_modified(wim)) {
3187                 /* If no images have been modified and no images have been
3188                  * deleted, a new blob table does not need to be written.  We
3189                  * shall write the new XML data and optional integrity table
3190                  * immediately after the blob table.  Note that this may
3191                  * overwrite an existing integrity table. */
3192                 DEBUG("Skipping writing blob table "
3193                       "(no images modified or deleted)");
3194                 old_wim_end = old_blob_table_end;
3195                 write_flags |= WIMLIB_WRITE_FLAG_NO_BLOB_TABLE |
3196                                WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
3197         } else if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0) {
3198                 /* Old WIM has an integrity table; begin writing new blobs after
3199                  * it. */
3200                 old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
3201                               wim->hdr.integrity_table_reshdr.size_in_wim;
3202         } else {
3203                 /* No existing integrity table; begin writing new blobs after
3204                  * the old XML data. */
3205                 old_wim_end = old_xml_end;
3206         }
3207
3208         ret = check_resource_offsets(wim, old_wim_end);
3209         if (ret)
3210                 goto out_restore_memory_hdr;
3211
3212         ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3213                                           &blob_list, &blob_table_list,
3214                                           &filter_ctx);
3215         if (ret)
3216                 goto out_restore_memory_hdr;
3217
3218         ret = open_wim_writable(wim, wim->filename, O_RDWR);
3219         if (ret)
3220                 goto out_restore_memory_hdr;
3221
3222         ret = lock_wim_for_append(wim);
3223         if (ret)
3224                 goto out_close_wim;
3225
3226         /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */
3227         wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3228         ret = write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3229         if (ret) {
3230                 ERROR_WITH_ERRNO("Error updating WIM header flags");
3231                 goto out_unlock_wim;
3232         }
3233
3234         if (filedes_seek(&wim->out_fd, old_wim_end) == -1) {
3235                 ERROR_WITH_ERRNO("Can't seek to end of WIM");
3236                 ret = WIMLIB_ERR_WRITE;
3237                 goto out_restore_physical_hdr;
3238         }
3239
3240         ret = wim_write_blob_list(wim, &blob_list, write_flags,
3241                                   num_threads, &filter_ctx);
3242         if (ret)
3243                 goto out_truncate;
3244
3245         ret = write_metadata_resources(wim, WIMLIB_ALL_IMAGES, write_flags);
3246         if (ret)
3247                 goto out_truncate;
3248
3249         ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3250                            &blob_table_list);
3251         if (ret)
3252                 goto out_truncate;
3253
3254         unlock_wim_for_append(wim);
3255         return 0;
3256
3257 out_truncate:
3258         if (!(write_flags & WIMLIB_WRITE_FLAG_NO_BLOB_TABLE)) {
3259                 WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",
3260                         wim->filename, old_wim_end);
3261                 /* Return value of ftruncate() is ignored because this is
3262                  * already an error path.  */
3263                 (void)ftruncate(wim->out_fd.fd, old_wim_end);
3264         }
3265 out_restore_physical_hdr:
3266         (void)write_wim_header_flags(hdr_save.flags, &wim->out_fd);
3267 out_unlock_wim:
3268         unlock_wim_for_append(wim);
3269 out_close_wim:
3270         (void)close_wim_writable(wim, write_flags);
3271 out_restore_memory_hdr:
3272         memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
3273         return ret;
3274 }
3275
3276 static int
3277 overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, unsigned num_threads)
3278 {
3279         size_t wim_name_len;
3280         int ret;
3281
3282         DEBUG("Overwriting `%"TS"' via a temporary file", wim->filename);
3283
3284         /* Write the WIM to a temporary file in the same directory as the
3285          * original WIM. */
3286         wim_name_len = tstrlen(wim->filename);
3287         tchar tmpfile[wim_name_len + 10];
3288         tmemcpy(tmpfile, wim->filename, wim_name_len);
3289         randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
3290         tmpfile[wim_name_len + 9] = T('\0');
3291
3292         ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
3293                            write_flags |
3294                                 WIMLIB_WRITE_FLAG_FSYNC |
3295                                 WIMLIB_WRITE_FLAG_RETAIN_GUID,
3296                            num_threads);
3297         if (ret) {
3298                 tunlink(tmpfile);
3299                 return ret;
3300         }
3301
3302         if (filedes_valid(&wim->in_fd)) {
3303                 filedes_close(&wim->in_fd);
3304                 filedes_invalidate(&wim->in_fd);
3305         }
3306
3307         /* Rename the new WIM file to the original WIM file.  Note: on Windows
3308          * this actually calls win32_rename_replacement(), not _wrename(), so
3309          * that removing the existing destination file can be handled.  */
3310         DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, wim->filename);
3311         ret = trename(tmpfile, wim->filename);
3312         if (ret) {
3313                 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
3314                                  tmpfile, wim->filename);
3315         #ifdef __WIN32__
3316                 if (ret < 0)
3317         #endif
3318                 {
3319                         tunlink(tmpfile);
3320                 }
3321                 return WIMLIB_ERR_RENAME;
3322         }
3323
3324         union wimlib_progress_info progress;
3325         progress.rename.from = tmpfile;
3326         progress.rename.to = wim->filename;
3327         return call_progress(wim->progfunc, WIMLIB_PROGRESS_MSG_RENAME,
3328                              &progress, wim->progctx);
3329 }
3330
3331 /* Determine if the specified WIM file may be updated by appending in-place
3332  * rather than writing and replacing it with an entirely new file.  */
3333 static bool
3334 can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags)
3335 {
3336         /* REBUILD flag forces full rebuild.  */
3337         if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
3338                 return false;
3339
3340         /* Image deletions cause full rebuild by default.  */
3341         if (wim->image_deletion_occurred &&
3342             !(write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
3343                 return false;
3344
3345         /* Pipable WIMs cannot be updated in place, nor can a non-pipable WIM be
3346          * turned into a pipable WIM in-place.  */
3347         if (wim_is_pipable(wim) || (write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
3348                 return false;
3349
3350         /* The default compression type and compression chunk size selected for
3351          * the output WIM must be the same as those currently used for the WIM.
3352          */
3353         if (wim->compression_type != wim->out_compression_type)
3354                 return false;
3355         if (wim->chunk_size != wim->out_chunk_size)
3356                 return false;
3357
3358         return true;
3359 }
3360
3361 /* API function documented in wimlib.h  */
3362 WIMLIBAPI int
3363 wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads)
3364 {
3365         int ret;
3366         u32 orig_hdr_flags;
3367
3368         if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3369                 return WIMLIB_ERR_INVALID_PARAM;
3370
3371         if (!wim->filename)
3372                 return WIMLIB_ERR_NO_FILENAME;
3373
3374         orig_hdr_flags = wim->hdr.flags;
3375         if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
3376                 wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
3377         ret = can_modify_wim(wim);
3378         wim->hdr.flags = orig_hdr_flags;
3379         if (ret)
3380                 return ret;
3381
3382         if (can_overwrite_wim_inplace(wim, write_flags)) {
3383                 ret = overwrite_wim_inplace(wim, write_flags, num_threads);
3384                 if (ret != WIMLIB_ERR_RESOURCE_ORDER)
3385                         return ret;
3386                 WARNING("Falling back to re-building entire WIM");
3387         }
3388         return overwrite_wim_via_tmpfile(wim, write_flags, num_threads);
3389 }