4 * Support for writing WIM files; write a WIM file, overwrite a WIM file, write
5 * compressed file resources, etc.
9 * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers
11 * This file is free software; you can redistribute it and/or modify it under
12 * the terms of the GNU Lesser General Public License as published by the Free
13 * Software Foundation; either version 3 of the License, or (at your option) any
16 * This file is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this file; if not, see http://www.gnu.org/licenses/.
29 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
30 /* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
31 * overwrite the LIST_HEAD macro. */
32 # include <sys/file.h>
40 #include "wimlib/alloca.h"
41 #include "wimlib/assert.h"
42 #include "wimlib/blob_table.h"
43 #include "wimlib/chunk_compressor.h"
44 #include "wimlib/endianness.h"
45 #include "wimlib/error.h"
46 #include "wimlib/file_io.h"
47 #include "wimlib/header.h"
48 #include "wimlib/inode.h"
49 #include "wimlib/integrity.h"
50 #include "wimlib/metadata.h"
51 #include "wimlib/paths.h"
52 #include "wimlib/progress.h"
53 #include "wimlib/resource.h"
54 #include "wimlib/solid.h"
56 # include "wimlib/win32.h" /* win32_rename_replacement() */
58 #include "wimlib/write.h"
59 #include "wimlib/xml.h"
62 /* wimlib internal flags used when writing resources. */
63 #define WRITE_RESOURCE_FLAG_RECOMPRESS 0x00000001
64 #define WRITE_RESOURCE_FLAG_PIPABLE 0x00000002
65 #define WRITE_RESOURCE_FLAG_SOLID 0x00000004
66 #define WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE 0x00000008
67 #define WRITE_RESOURCE_FLAG_SOLID_SORT 0x00000010
70 write_flags_to_resource_flags(int write_flags)
72 int write_resource_flags = 0;
74 if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
75 write_resource_flags |= WRITE_RESOURCE_FLAG_RECOMPRESS;
76 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
77 write_resource_flags |= WRITE_RESOURCE_FLAG_PIPABLE;
78 if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
79 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
80 if (write_flags & WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES)
81 write_resource_flags |= WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE;
82 if ((write_flags & (WIMLIB_WRITE_FLAG_SOLID |
83 WIMLIB_WRITE_FLAG_NO_SOLID_SORT)) ==
84 WIMLIB_WRITE_FLAG_SOLID)
85 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID_SORT;
86 return write_resource_flags;
89 struct filter_context {
95 * Determine whether the specified blob should be filtered out from the write.
99 * < 0 : The blob should be hard-filtered; that is, not included in the output
101 * 0 : The blob should not be filtered out.
102 * > 0 : The blob should be soft-filtered; that is, it already exists in the
103 * WIM file and may not need to be written again.
106 blob_filtered(const struct blob_descriptor *blob,
107 const struct filter_context *ctx)
115 write_flags = ctx->write_flags;
118 if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE &&
119 blob->blob_location == BLOB_IN_WIM &&
120 blob->rdesc->wim == wim)
123 if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS &&
124 blob->blob_location == BLOB_IN_WIM &&
125 blob->rdesc->wim != wim)
132 blob_hard_filtered(const struct blob_descriptor *blob,
133 struct filter_context *ctx)
135 return blob_filtered(blob, ctx) < 0;
139 may_soft_filter_blobs(const struct filter_context *ctx)
143 return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE;
147 may_hard_filter_blobs(const struct filter_context *ctx)
151 return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS;
155 may_filter_blobs(const struct filter_context *ctx)
157 return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx));
160 /* Return true if the specified resource is compressed and the compressed data
161 * can be reused with the specified output parameters. */
163 can_raw_copy(const struct blob_descriptor *blob,
164 int write_resource_flags, int out_ctype, u32 out_chunk_size)
166 const struct wim_resource_descriptor *rdesc;
168 if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS)
171 if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
174 if (blob->blob_location != BLOB_IN_WIM)
179 if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
182 if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
183 /* Normal compressed resource: Must use same compression type
185 return (rdesc->compression_type == out_ctype &&
186 rdesc->chunk_size == out_chunk_size);
189 if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) &&
190 (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
192 /* Solid resource: Such resources may contain multiple blobs,
193 * and in general only a subset of them need to be written. As
194 * a heuristic, re-use the raw data if more than two-thirds the
195 * uncompressed size is being written. */
197 /* Note: solid resources contain a header that specifies the
198 * compression type and chunk size; therefore we don't need to
199 * check if they are compatible with @out_ctype and
200 * @out_chunk_size. */
202 struct blob_descriptor *res_blob;
205 list_for_each_entry(res_blob, &rdesc->blob_list, rdesc_node)
206 if (res_blob->will_be_in_output_wim)
207 write_size += res_blob->size;
209 return (write_size > rdesc->uncompressed_size * 2 / 3);
216 filter_resource_flags(u8 flags)
218 return (flags & ~(WIM_RESHDR_FLAG_SOLID |
219 WIM_RESHDR_FLAG_COMPRESSED |
220 WIM_RESHDR_FLAG_SPANNED |
221 WIM_RESHDR_FLAG_FREE));
225 blob_set_out_reshdr_for_reuse(struct blob_descriptor *blob)
227 const struct wim_resource_descriptor *rdesc;
229 wimlib_assert(blob->blob_location == BLOB_IN_WIM);
232 if (rdesc->flags & WIM_RESHDR_FLAG_SOLID) {
234 wimlib_assert(blob->flags & WIM_RESHDR_FLAG_SOLID);
236 blob->out_reshdr.offset_in_wim = blob->offset_in_res;
237 blob->out_reshdr.uncompressed_size = 0;
238 blob->out_reshdr.size_in_wim = blob->size;
240 blob->out_res_offset_in_wim = rdesc->offset_in_wim;
241 blob->out_res_size_in_wim = rdesc->size_in_wim;
242 blob->out_res_uncompressed_size = rdesc->uncompressed_size;
244 wimlib_assert(!(blob->flags & WIM_RESHDR_FLAG_SOLID));
246 blob->out_reshdr.offset_in_wim = rdesc->offset_in_wim;
247 blob->out_reshdr.uncompressed_size = rdesc->uncompressed_size;
248 blob->out_reshdr.size_in_wim = rdesc->size_in_wim;
250 blob->out_reshdr.flags = blob->flags;
254 /* Write the header for a blob in a pipable WIM. */
256 write_pwm_blob_header(const struct blob_descriptor *blob,
257 struct filedes *out_fd, int additional_reshdr_flags)
259 struct pwm_blob_hdr blob_hdr;
263 blob_hdr.magic = cpu_to_le64(PWM_BLOB_MAGIC);
264 blob_hdr.uncompressed_size = cpu_to_le64(blob->size);
265 if (additional_reshdr_flags & PWM_RESHDR_FLAG_UNHASHED) {
266 zero_out_hash(blob_hdr.hash);
268 wimlib_assert(!blob->unhashed);
269 copy_hash(blob_hdr.hash, blob->hash);
272 reshdr_flags = filter_resource_flags(blob->flags);
273 reshdr_flags |= additional_reshdr_flags;
274 blob_hdr.flags = cpu_to_le32(reshdr_flags);
275 ret = full_write(out_fd, &blob_hdr, sizeof(blob_hdr));
277 ERROR_WITH_ERRNO("Write error");
281 struct write_blobs_progress_data {
282 wimlib_progress_func_t progfunc;
284 union wimlib_progress_info progress;
285 uint64_t next_progress;
289 do_write_blobs_progress(struct write_blobs_progress_data *progress_data,
290 u64 complete_size, u32 complete_count, bool discarded)
292 union wimlib_progress_info *progress = &progress_data->progress;
296 progress->write_streams.total_bytes -= complete_size;
297 progress->write_streams.total_streams -= complete_count;
298 if (progress_data->next_progress != ~(uint64_t)0 &&
299 progress_data->next_progress > progress->write_streams.total_bytes)
301 progress_data->next_progress = progress->write_streams.total_bytes;
304 progress->write_streams.completed_bytes += complete_size;
305 progress->write_streams.completed_streams += complete_count;
308 if (progress->write_streams.completed_bytes >= progress_data->next_progress)
310 ret = call_progress(progress_data->progfunc,
311 WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
313 progress_data->progctx);
317 if (progress_data->next_progress == progress->write_streams.total_bytes) {
318 progress_data->next_progress = ~(uint64_t)0;
320 /* Handle rate-limiting of messages */
322 /* Send new message as soon as another 1/128 of the
323 * total has been written. (Arbitrary number.) */
324 progress_data->next_progress =
325 progress->write_streams.completed_bytes +
326 progress->write_streams.total_bytes / 128;
328 /* ... Unless that would be more than 5000000 bytes, in
329 * which case send the next after the next 5000000
330 * bytes. (Another arbitrary number.) */
331 if (progress->write_streams.completed_bytes + 5000000 <
332 progress_data->next_progress)
333 progress_data->next_progress =
334 progress->write_streams.completed_bytes + 5000000;
336 /* ... But always send a message as soon as we're
337 * completely done. */
338 if (progress->write_streams.total_bytes <
339 progress_data->next_progress)
340 progress_data->next_progress =
341 progress->write_streams.total_bytes;
347 struct write_blobs_ctx {
348 /* File descriptor to which the blobs are being written. */
349 struct filedes *out_fd;
351 /* Blob table for the WIMStruct on whose behalf the blobs are being
353 struct blob_table *blob_table;
355 /* Compression format to use. */
358 /* Maximum uncompressed chunk size in compressed resources to use. */
361 /* Flags that affect how the blobs will be written. */
362 int write_resource_flags;
364 /* Data used for issuing WRITE_STREAMS progress. */
365 struct write_blobs_progress_data progress_data;
367 struct filter_context *filter_ctx;
369 /* Upper bound on the total number of bytes that need to be compressed.
371 u64 num_bytes_to_compress;
373 /* Pointer to the chunk_compressor implementation being used for
374 * compressing chunks of data, or NULL if chunks are being written
376 struct chunk_compressor *compressor;
378 /* A buffer of size @out_chunk_size that has been loaned out from the
379 * chunk compressor and is currently being filled with the uncompressed
380 * data of the next chunk. */
383 /* Number of bytes in @cur_chunk_buf that are currently filled. */
384 size_t cur_chunk_buf_filled;
386 /* List of blobs that currently have chunks being compressed. */
387 struct list_head blobs_being_compressed;
389 /* List of blobs in the solid resource. Blobs are moved here after
390 * @blobs_being_compressed only when writing a solid resource. */
391 struct list_head blobs_in_solid_resource;
393 /* Current uncompressed offset in the blob being read. */
394 u64 cur_read_blob_offset;
396 /* Uncompressed size of the blob currently being read. */
397 u64 cur_read_blob_size;
399 /* Current uncompressed offset in the blob being written. */
400 u64 cur_write_blob_offset;
402 /* Uncompressed size of resource currently being written. */
403 u64 cur_write_res_size;
405 /* Array that is filled in with compressed chunk sizes as a resource is
409 /* Index of next entry in @chunk_csizes to fill in. */
412 /* Number of entries in @chunk_csizes currently allocated. */
413 size_t num_alloc_chunks;
415 /* Offset in the output file of the start of the chunks of the resource
416 * currently being written. */
417 u64 chunks_start_offset;
420 /* Reserve space for the chunk table and prepare to accumulate the chunk table
423 begin_chunk_table(struct write_blobs_ctx *ctx, u64 res_expected_size)
425 u64 expected_num_chunks;
426 u64 expected_num_chunk_entries;
430 /* Calculate the number of chunks and chunk entries that should be
431 * needed for the resource. These normally will be the final values,
432 * but in SOLID mode some of the blobs we're planning to write into the
433 * resource may be duplicates, and therefore discarded, potentially
434 * decreasing the number of chunk entries needed. */
435 expected_num_chunks = DIV_ROUND_UP(res_expected_size, ctx->out_chunk_size);
436 expected_num_chunk_entries = expected_num_chunks;
437 if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
438 expected_num_chunk_entries--;
440 /* Make sure the chunk_csizes array is long enough to store the
441 * compressed size of each chunk. */
442 if (expected_num_chunks > ctx->num_alloc_chunks) {
443 u64 new_length = expected_num_chunks + 50;
445 if ((size_t)new_length != new_length) {
446 ERROR("Resource size too large (%"PRIu64" bytes!",
448 return WIMLIB_ERR_NOMEM;
451 FREE(ctx->chunk_csizes);
452 ctx->chunk_csizes = MALLOC(new_length * sizeof(ctx->chunk_csizes[0]));
453 if (ctx->chunk_csizes == NULL) {
454 ctx->num_alloc_chunks = 0;
455 return WIMLIB_ERR_NOMEM;
457 ctx->num_alloc_chunks = new_length;
460 ctx->chunk_index = 0;
462 if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) {
463 /* Reserve space for the chunk table in the output file. In the
464 * case of solid resources this reserves the upper bound for the
465 * needed space, not necessarily the exact space which will
466 * prove to be needed. At this point, we just use @chunk_csizes
467 * for a buffer of 0's because the actual compressed chunk sizes
469 reserve_size = expected_num_chunk_entries *
470 get_chunk_entry_size(res_expected_size,
471 0 != (ctx->write_resource_flags &
472 WRITE_RESOURCE_FLAG_SOLID));
473 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
474 reserve_size += sizeof(struct alt_chunk_table_header_disk);
475 memset(ctx->chunk_csizes, 0, reserve_size);
476 ret = full_write(ctx->out_fd, ctx->chunk_csizes, reserve_size);
484 begin_write_resource(struct write_blobs_ctx *ctx, u64 res_expected_size)
488 wimlib_assert(res_expected_size != 0);
490 if (ctx->compressor != NULL) {
491 ret = begin_chunk_table(ctx, res_expected_size);
496 /* Output file descriptor is now positioned at the offset at which to
497 * write the first chunk of the resource. */
498 ctx->chunks_start_offset = ctx->out_fd->offset;
499 ctx->cur_write_blob_offset = 0;
500 ctx->cur_write_res_size = res_expected_size;
505 end_chunk_table(struct write_blobs_ctx *ctx, u64 res_actual_size,
506 u64 *res_start_offset_ret, u64 *res_store_size_ret)
508 size_t actual_num_chunks;
509 size_t actual_num_chunk_entries;
510 size_t chunk_entry_size;
513 actual_num_chunks = ctx->chunk_index;
514 actual_num_chunk_entries = actual_num_chunks;
515 if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
516 actual_num_chunk_entries--;
518 chunk_entry_size = get_chunk_entry_size(res_actual_size,
519 0 != (ctx->write_resource_flags &
520 WRITE_RESOURCE_FLAG_SOLID));
522 typedef le64 _may_alias_attribute aliased_le64_t;
523 typedef le32 _may_alias_attribute aliased_le32_t;
525 if (chunk_entry_size == 4) {
526 aliased_le32_t *entries = (aliased_le32_t*)ctx->chunk_csizes;
528 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
529 for (size_t i = 0; i < actual_num_chunk_entries; i++)
530 entries[i] = cpu_to_le32(ctx->chunk_csizes[i]);
532 u32 offset = ctx->chunk_csizes[0];
533 for (size_t i = 0; i < actual_num_chunk_entries; i++) {
534 u32 next_size = ctx->chunk_csizes[i + 1];
535 entries[i] = cpu_to_le32(offset);
540 aliased_le64_t *entries = (aliased_le64_t*)ctx->chunk_csizes;
542 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
543 for (size_t i = 0; i < actual_num_chunk_entries; i++)
544 entries[i] = cpu_to_le64(ctx->chunk_csizes[i]);
546 u64 offset = ctx->chunk_csizes[0];
547 for (size_t i = 0; i < actual_num_chunk_entries; i++) {
548 u64 next_size = ctx->chunk_csizes[i + 1];
549 entries[i] = cpu_to_le64(offset);
555 size_t chunk_table_size = actual_num_chunk_entries * chunk_entry_size;
556 u64 res_start_offset;
559 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
560 ret = full_write(ctx->out_fd, ctx->chunk_csizes, chunk_table_size);
563 res_end_offset = ctx->out_fd->offset;
564 res_start_offset = ctx->chunks_start_offset;
566 res_end_offset = ctx->out_fd->offset;
568 u64 chunk_table_offset;
570 chunk_table_offset = ctx->chunks_start_offset - chunk_table_size;
572 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
573 struct alt_chunk_table_header_disk hdr;
575 hdr.res_usize = cpu_to_le64(res_actual_size);
576 hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size);
577 hdr.compression_format = cpu_to_le32(ctx->out_ctype);
579 BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1);
580 BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2);
581 BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
583 ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr),
584 chunk_table_offset - sizeof(hdr));
587 res_start_offset = chunk_table_offset - sizeof(hdr);
589 res_start_offset = chunk_table_offset;
592 ret = full_pwrite(ctx->out_fd, ctx->chunk_csizes,
593 chunk_table_size, chunk_table_offset);
598 *res_start_offset_ret = res_start_offset;
599 *res_store_size_ret = res_end_offset - res_start_offset;
604 ERROR_WITH_ERRNO("Write error");
608 /* Finish writing a WIM resource by writing or updating the chunk table (if not
609 * writing the data uncompressed) and loading its metadata into @out_reshdr. */
611 end_write_resource(struct write_blobs_ctx *ctx, struct wim_reshdr *out_reshdr)
615 u64 res_uncompressed_size;
616 u64 res_offset_in_wim;
618 wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size ||
619 (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID));
620 res_uncompressed_size = ctx->cur_write_res_size;
622 if (ctx->compressor) {
623 ret = end_chunk_table(ctx, res_uncompressed_size,
624 &res_offset_in_wim, &res_size_in_wim);
628 res_offset_in_wim = ctx->chunks_start_offset;
629 res_size_in_wim = ctx->out_fd->offset - res_offset_in_wim;
631 out_reshdr->uncompressed_size = res_uncompressed_size;
632 out_reshdr->size_in_wim = res_size_in_wim;
633 out_reshdr->offset_in_wim = res_offset_in_wim;
634 DEBUG("Finished writing resource: %"PRIu64" => %"PRIu64" @ %"PRIu64"",
635 res_uncompressed_size, res_size_in_wim, res_offset_in_wim);
639 /* Call when no more data from the file at @path is needed. */
641 done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx)
643 union wimlib_progress_info info;
645 info.done_with_file.path_to_file = path;
647 return call_progress(progfunc, WIMLIB_PROGRESS_MSG_DONE_WITH_FILE,
652 do_done_with_blob(struct blob_descriptor *blob,
653 wimlib_progress_func_t progfunc, void *progctx)
656 struct wim_inode *inode;
658 if (!blob->may_send_done_with_file)
661 inode = blob->file_inode;
663 wimlib_assert(inode != NULL);
664 wimlib_assert(inode->num_remaining_streams > 0);
665 if (--inode->num_remaining_streams > 0)
669 /* XXX: This logic really should be somewhere else. */
671 /* We want the path to the file, but blob->file_on_disk might actually
672 * refer to a named data stream. Temporarily strip the named data
673 * stream from the path. */
674 wchar_t *p_colon = NULL;
675 wchar_t *p_question_mark = NULL;
676 const wchar_t *p_stream_name;
678 p_stream_name = path_stream_name(blob->file_on_disk);
679 if (unlikely(p_stream_name)) {
680 p_colon = (wchar_t *)(p_stream_name - 1);
681 wimlib_assert(*p_colon == L':');
685 /* We also should use a fake Win32 path instead of a NT path */
686 if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) {
687 p_question_mark = &blob->file_on_disk[1];
688 *p_question_mark = L'\\';
692 ret = done_with_file(blob->file_on_disk, progfunc, progctx);
698 *p_question_mark = L'?';
703 /* Handle WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES mode. */
705 done_with_blob(struct blob_descriptor *blob, struct write_blobs_ctx *ctx)
707 if (likely(!(ctx->write_resource_flags &
708 WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE)))
710 return do_done_with_blob(blob, ctx->progress_data.progfunc,
711 ctx->progress_data.progctx);
714 /* Begin processing a blob for writing. */
716 write_blob_begin_read(struct blob_descriptor *blob, void *_ctx)
718 struct write_blobs_ctx *ctx = _ctx;
721 wimlib_assert(blob->size > 0);
723 ctx->cur_read_blob_offset = 0;
724 ctx->cur_read_blob_size = blob->size;
726 /* As an optimization, we allow some blobs to be "unhashed", meaning
727 * their SHA-1 message digests are unknown. This is the case with blobs
728 * that are added by scanning a directory tree with wimlib_add_image(),
729 * for example. Since WIM uses single-instance blobs, we don't know
730 * whether such each such blob really need to written until it is
731 * actually checksummed, unless it has a unique size. In such cases we
732 * read and checksum the blob in this function, thereby advancing ahead
733 * of read_blob_list(), which will still provide the data again to
734 * write_blob_process_chunk(). This is okay because an unhashed blob
735 * cannot be in a WIM resource, which might be costly to decompress. */
736 if (ctx->blob_table != NULL && blob->unhashed && !blob->unique_size) {
738 struct blob_descriptor *new_blob;
740 ret = hash_unhashed_blob(blob, ctx->blob_table, &new_blob);
743 if (new_blob != blob) {
744 /* Duplicate blob detected. */
746 if (new_blob->will_be_in_output_wim ||
747 blob_filtered(new_blob, ctx->filter_ctx))
749 /* The duplicate blob is already being included
750 * in the output WIM, or it would be filtered
751 * out if it had been. Skip writing this blob
752 * (and reading it again) entirely, passing its
753 * output reference count to the duplicate blob
754 * in the former case. */
755 DEBUG("Discarding duplicate blob of "
756 "length %"PRIu64, blob->size);
757 ret = do_write_blobs_progress(&ctx->progress_data,
758 blob->size, 1, true);
759 list_del(&blob->write_blobs_list);
760 list_del(&blob->blob_table_list);
761 if (new_blob->will_be_in_output_wim)
762 new_blob->out_refcnt += blob->out_refcnt;
763 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)
764 ctx->cur_write_res_size -= blob->size;
766 ret = done_with_blob(blob, ctx);
767 free_blob_descriptor(blob);
770 return BEGIN_BLOB_STATUS_SKIP_BLOB;
772 /* The duplicate blob can validly be written,
773 * but was not marked as such. Discard the
774 * current blob descriptor and use the
775 * duplicate, but actually freeing the current
776 * blob descriptor must wait until
777 * read_blob_list() has finished reading its
779 DEBUG("Blob duplicate, but not already "
780 "selected for writing.");
781 list_replace(&blob->write_blobs_list,
782 &new_blob->write_blobs_list);
783 list_replace(&blob->blob_table_list,
784 &new_blob->blob_table_list);
785 blob->will_be_in_output_wim = 0;
786 new_blob->out_refcnt = blob->out_refcnt;
787 new_blob->will_be_in_output_wim = 1;
788 new_blob->may_send_done_with_file = 0;
793 list_move_tail(&blob->write_blobs_list, &ctx->blobs_being_compressed);
797 /* Rewrite a blob that was just written compressed as uncompressed instead.
800 write_blob_uncompressed(struct blob_descriptor *blob, struct filedes *out_fd)
803 u64 begin_offset = blob->out_reshdr.offset_in_wim;
804 u64 end_offset = out_fd->offset;
806 if (filedes_seek(out_fd, begin_offset) == -1)
809 ret = extract_full_blob_to_fd(blob, out_fd);
811 /* Error reading the uncompressed data. */
812 if (out_fd->offset == begin_offset &&
813 filedes_seek(out_fd, end_offset) != -1)
815 /* Nothing was actually written yet, and we successfully
816 * seeked to the end of the compressed resource, so
817 * don't issue a hard error; just keep the compressed
818 * resource instead. */
819 WARNING("Recovered compressed blob of "
820 "size %"PRIu64", continuing on.", blob->size);
826 wimlib_assert(out_fd->offset - begin_offset == blob->size);
828 if (out_fd->offset < end_offset &&
829 0 != ftruncate(out_fd->fd, out_fd->offset))
831 ERROR_WITH_ERRNO("Can't truncate output file to "
832 "offset %"PRIu64, out_fd->offset);
833 return WIMLIB_ERR_WRITE;
836 blob->out_reshdr.size_in_wim = blob->size;
837 blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED |
838 WIM_RESHDR_FLAG_SOLID);
842 /* Returns true if the specified blob, which was written as a non-solid
843 * resource, should be truncated from the WIM file and re-written uncompressed.
844 * blob->out_reshdr must be filled in from the initial write of the blob. */
846 should_rewrite_blob_uncompressed(const struct write_blobs_ctx *ctx,
847 const struct blob_descriptor *blob)
849 /* If the compressed data is smaller than the uncompressed data, prefer
850 * the compressed data. */
851 if (blob->out_reshdr.size_in_wim < blob->out_reshdr.uncompressed_size)
854 /* If we're not actually writing compressed data, then there's no need
856 if (!ctx->compressor)
859 /* If writing a pipable WIM, everything we write to the output is final
860 * (it might actually be a pipe!). */
861 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)
864 /* If the blob that would need to be re-read is located in a solid
865 * resource in another WIM file, then re-reading it would be costly. So
868 * Exception: if the compressed size happens to be *exactly* the same as
869 * the uncompressed size, then the blob *must* be written uncompressed
870 * in order to remain compatible with the Windows Overlay Filesystem
871 * Filter Driver (WOF).
873 * TODO: we are currently assuming that the optimization for
874 * single-chunk resources in maybe_rewrite_blob_uncompressed() prevents
875 * this case from being triggered too often. To fully prevent excessive
876 * decompressions in degenerate cases, we really should obtain the
877 * uncompressed data by decompressing the compressed data we wrote to
880 if ((blob->flags & WIM_RESHDR_FLAG_SOLID) &&
881 (blob->out_reshdr.size_in_wim != blob->out_reshdr.uncompressed_size))
888 maybe_rewrite_blob_uncompressed(struct write_blobs_ctx *ctx,
889 struct blob_descriptor *blob)
891 if (!should_rewrite_blob_uncompressed(ctx, blob))
894 /* Regular (non-solid) WIM resources with exactly one chunk and
895 * compressed size equal to uncompressed size are exactly the same as
896 * the corresponding compressed data --- since there must be 0 entries
897 * in the chunk table and the only chunk must be stored uncompressed.
898 * In this case, there's no need to rewrite anything. */
899 if (ctx->chunk_index == 1 &&
900 blob->out_reshdr.size_in_wim == blob->out_reshdr.uncompressed_size)
902 blob->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
906 return write_blob_uncompressed(blob, ctx->out_fd);
909 /* Write the next chunk of (typically compressed) data to the output WIM,
910 * handling the writing of the chunk table. */
912 write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
913 size_t csize, size_t usize)
916 struct blob_descriptor *blob;
917 u32 completed_blob_count;
920 blob = list_entry(ctx->blobs_being_compressed.next,
921 struct blob_descriptor, write_blobs_list);
923 if (ctx->cur_write_blob_offset == 0 &&
924 !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
926 /* Starting to write a new blob in non-solid mode. */
928 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
929 int additional_reshdr_flags = 0;
930 if (ctx->compressor != NULL)
931 additional_reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED;
933 DEBUG("Writing pipable WIM blob header "
934 "(offset=%"PRIu64")", ctx->out_fd->offset);
936 ret = write_pwm_blob_header(blob, ctx->out_fd,
937 additional_reshdr_flags);
942 ret = begin_write_resource(ctx, blob->size);
947 if (ctx->compressor != NULL) {
948 /* Record the compresed chunk size. */
949 wimlib_assert(ctx->chunk_index < ctx->num_alloc_chunks);
950 ctx->chunk_csizes[ctx->chunk_index++] = csize;
952 /* If writing a pipable WIM, before the chunk data write a chunk
953 * header that provides the compressed chunk size. */
954 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
955 struct pwm_chunk_hdr chunk_hdr = {
956 .compressed_size = cpu_to_le32(csize),
958 ret = full_write(ctx->out_fd, &chunk_hdr,
965 /* Write the chunk data. */
966 ret = full_write(ctx->out_fd, cchunk, csize);
970 ctx->cur_write_blob_offset += usize;
972 completed_size = usize;
973 completed_blob_count = 0;
974 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
975 /* Wrote chunk in solid mode. It may have finished multiple
977 struct blob_descriptor *next_blob;
979 while (blob && ctx->cur_write_blob_offset >= blob->size) {
981 ctx->cur_write_blob_offset -= blob->size;
983 if (ctx->cur_write_blob_offset)
984 next_blob = list_entry(blob->write_blobs_list.next,
985 struct blob_descriptor,
990 ret = done_with_blob(blob, ctx);
993 list_move_tail(&blob->write_blobs_list, &ctx->blobs_in_solid_resource);
994 completed_blob_count++;
999 /* Wrote chunk in non-solid mode. It may have finished a
1001 if (ctx->cur_write_blob_offset == blob->size) {
1003 wimlib_assert(ctx->cur_write_blob_offset ==
1004 ctx->cur_write_res_size);
1006 ret = end_write_resource(ctx, &blob->out_reshdr);
1010 blob->out_reshdr.flags = filter_resource_flags(blob->flags);
1011 if (ctx->compressor != NULL)
1012 blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
1014 ret = maybe_rewrite_blob_uncompressed(ctx, blob);
1018 wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size);
1020 ctx->cur_write_blob_offset = 0;
1022 ret = done_with_blob(blob, ctx);
1025 list_del(&blob->write_blobs_list);
1026 completed_blob_count++;
1030 return do_write_blobs_progress(&ctx->progress_data, completed_size,
1031 completed_blob_count, false);
1034 ERROR_WITH_ERRNO("Write error");
1039 prepare_chunk_buffer(struct write_blobs_ctx *ctx)
1041 /* While we are unable to get a new chunk buffer due to too many chunks
1042 * already outstanding, retrieve and write the next compressed chunk. */
1043 while (!(ctx->cur_chunk_buf =
1044 ctx->compressor->get_chunk_buffer(ctx->compressor)))
1052 bret = ctx->compressor->get_compression_result(ctx->compressor,
1056 wimlib_assert(bret);
1058 ret = write_chunk(ctx, cchunk, csize, usize);
1065 /* Process the next chunk of data to be written to a WIM resource. */
1067 write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
1069 struct write_blobs_ctx *ctx = _ctx;
1071 const u8 *chunkptr, *chunkend;
1073 wimlib_assert(size != 0);
1075 if (ctx->compressor == NULL) {
1076 /* Write chunk uncompressed. */
1077 ret = write_chunk(ctx, chunk, size, size);
1080 ctx->cur_read_blob_offset += size;
1084 /* Submit the chunk for compression, but take into account that the
1085 * @size the chunk was provided in may not correspond to the
1086 * @out_chunk_size being used for compression. */
1088 chunkend = chunkptr + size;
1090 size_t needed_chunk_size;
1091 size_t bytes_consumed;
1093 if (!ctx->cur_chunk_buf) {
1094 ret = prepare_chunk_buffer(ctx);
1099 if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1100 needed_chunk_size = ctx->out_chunk_size;
1102 needed_chunk_size = min(ctx->out_chunk_size,
1103 ctx->cur_chunk_buf_filled +
1104 (ctx->cur_read_blob_size -
1105 ctx->cur_read_blob_offset));
1108 bytes_consumed = min(chunkend - chunkptr,
1109 needed_chunk_size - ctx->cur_chunk_buf_filled);
1111 memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled],
1112 chunkptr, bytes_consumed);
1114 chunkptr += bytes_consumed;
1115 ctx->cur_read_blob_offset += bytes_consumed;
1116 ctx->cur_chunk_buf_filled += bytes_consumed;
1118 if (ctx->cur_chunk_buf_filled == needed_chunk_size) {
1119 ctx->compressor->signal_chunk_filled(ctx->compressor,
1120 ctx->cur_chunk_buf_filled);
1121 ctx->cur_chunk_buf = NULL;
1122 ctx->cur_chunk_buf_filled = 0;
1124 } while (chunkptr != chunkend);
1128 /* Finish processing a blob for writing. It may not have been completely
1129 * written yet, as the chunk_compressor implementation may still have chunks
1130 * buffered or being compressed. */
1132 write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx)
1134 struct write_blobs_ctx *ctx = _ctx;
1136 wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status);
1138 if (!blob->will_be_in_output_wim) {
1139 /* The blob was a duplicate. Now that its data has finished
1140 * being read, it is being discarded in favor of the duplicate
1141 * entry. It therefore is no longer needed, and we can fire the
1142 * DONE_WITH_FILE callback because the file will not be read
1145 * Note: we can't yet fire DONE_WITH_FILE for non-duplicate
1146 * blobs, since it needs to be possible to re-read the file if
1147 * it does not compress to less than its original size. */
1149 status = done_with_blob(blob, ctx);
1150 free_blob_descriptor(blob);
1151 } else if (!status && blob->unhashed && ctx->blob_table != NULL) {
1152 /* The blob was not a duplicate and was previously unhashed.
1153 * Since we passed COMPUTE_MISSING_BLOB_HASHES to
1154 * read_blob_list(), blob->hash is now computed and valid. So
1155 * turn this blob into a "hashed" blob. */
1156 list_del(&blob->unhashed_list);
1157 blob_table_insert(ctx->blob_table, blob);
1163 /* Compute statistics about a list of blobs that will be written.
1165 * Assumes the blobs are sorted such that all blobs located in each distinct WIM
1166 * (specified by WIMStruct) are together. */
1168 compute_blob_list_stats(struct list_head *blob_list,
1169 struct write_blobs_ctx *ctx)
1171 struct blob_descriptor *blob;
1172 u64 total_bytes = 0;
1174 u64 total_parts = 0;
1175 WIMStruct *prev_wim_part = NULL;
1177 list_for_each_entry(blob, blob_list, write_blobs_list) {
1179 total_bytes += blob->size;
1180 if (blob->blob_location == BLOB_IN_WIM) {
1181 if (prev_wim_part != blob->rdesc->wim) {
1182 prev_wim_part = blob->rdesc->wim;
1187 ctx->progress_data.progress.write_streams.total_bytes = total_bytes;
1188 ctx->progress_data.progress.write_streams.total_streams = num_blobs;
1189 ctx->progress_data.progress.write_streams.completed_bytes = 0;
1190 ctx->progress_data.progress.write_streams.completed_streams = 0;
1191 ctx->progress_data.progress.write_streams.compression_type = ctx->out_ctype;
1192 ctx->progress_data.progress.write_streams.total_parts = total_parts;
1193 ctx->progress_data.progress.write_streams.completed_parts = 0;
1194 ctx->progress_data.next_progress = 0;
1197 /* Find blobs in @blob_list that can be copied to the output WIM in raw form
1198 * rather than compressed. Delete these blobs from @blob_list and move them to
1199 * @raw_copy_blobs. Return the total uncompressed size of the blobs that need
1200 * to be compressed. */
1202 find_raw_copy_blobs(struct list_head *blob_list,
1203 int write_resource_flags,
1206 struct list_head *raw_copy_blobs)
1208 struct blob_descriptor *blob, *tmp;
1209 u64 num_bytes_to_compress = 0;
1211 INIT_LIST_HEAD(raw_copy_blobs);
1213 /* Initialize temporary raw_copy_ok flag. */
1214 list_for_each_entry(blob, blob_list, write_blobs_list)
1215 if (blob->blob_location == BLOB_IN_WIM)
1216 blob->rdesc->raw_copy_ok = 0;
1218 list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1219 if (blob->blob_location == BLOB_IN_WIM &&
1220 blob->rdesc->raw_copy_ok)
1222 list_move_tail(&blob->write_blobs_list,
1224 } else if (can_raw_copy(blob, write_resource_flags,
1225 out_ctype, out_chunk_size))
1227 blob->rdesc->raw_copy_ok = 1;
1228 list_move_tail(&blob->write_blobs_list,
1231 num_bytes_to_compress += blob->size;
1235 return num_bytes_to_compress;
1238 /* Copy a raw compressed resource located in another WIM file to the WIM file
1241 write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc,
1242 struct filedes *out_fd)
1244 u64 cur_read_offset;
1245 u64 end_read_offset;
1246 u8 buf[BUFFER_SIZE];
1247 size_t bytes_to_read;
1249 struct filedes *in_fd;
1250 struct blob_descriptor *blob;
1251 u64 out_offset_in_wim;
1253 DEBUG("Copying raw compressed data (size_in_wim=%"PRIu64", "
1254 "uncompressed_size=%"PRIu64")",
1255 in_rdesc->size_in_wim, in_rdesc->uncompressed_size);
1257 /* Copy the raw data. */
1258 cur_read_offset = in_rdesc->offset_in_wim;
1259 end_read_offset = cur_read_offset + in_rdesc->size_in_wim;
1261 out_offset_in_wim = out_fd->offset;
1263 if (in_rdesc->is_pipable) {
1264 if (cur_read_offset < sizeof(struct pwm_blob_hdr))
1265 return WIMLIB_ERR_INVALID_PIPABLE_WIM;
1266 cur_read_offset -= sizeof(struct pwm_blob_hdr);
1267 out_offset_in_wim += sizeof(struct pwm_blob_hdr);
1269 in_fd = &in_rdesc->wim->in_fd;
1270 wimlib_assert(cur_read_offset != end_read_offset);
1273 bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
1275 ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
1279 ret = full_write(out_fd, buf, bytes_to_read);
1283 cur_read_offset += bytes_to_read;
1285 } while (cur_read_offset != end_read_offset);
1287 list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) {
1288 if (blob->will_be_in_output_wim) {
1289 blob_set_out_reshdr_for_reuse(blob);
1290 if (in_rdesc->flags & WIM_RESHDR_FLAG_SOLID)
1291 blob->out_res_offset_in_wim = out_offset_in_wim;
1293 blob->out_reshdr.offset_in_wim = out_offset_in_wim;
1300 /* Copy a list of raw compressed resources located in other WIM file(s) to the
1301 * WIM file being written. */
1303 write_raw_copy_resources(struct list_head *raw_copy_blobs,
1304 struct filedes *out_fd,
1305 struct write_blobs_progress_data *progress_data)
1307 struct blob_descriptor *blob;
1310 list_for_each_entry(blob, raw_copy_blobs, write_blobs_list)
1311 blob->rdesc->raw_copy_ok = 1;
1313 list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) {
1314 if (blob->rdesc->raw_copy_ok) {
1315 /* Write each solid resource only one time. */
1316 ret = write_raw_copy_resource(blob->rdesc, out_fd);
1319 blob->rdesc->raw_copy_ok = 0;
1321 ret = do_write_blobs_progress(progress_data, blob->size,
1329 /* Wait for and write all chunks pending in the compressor. */
1331 finish_remaining_chunks(struct write_blobs_ctx *ctx)
1338 if (ctx->compressor == NULL)
1341 if (ctx->cur_chunk_buf_filled != 0) {
1342 ctx->compressor->signal_chunk_filled(ctx->compressor,
1343 ctx->cur_chunk_buf_filled);
1346 while (ctx->compressor->get_compression_result(ctx->compressor, &cdata,
1349 ret = write_chunk(ctx, cdata, csize, usize);
1357 remove_empty_blobs(struct list_head *blob_list)
1359 struct blob_descriptor *blob, *tmp;
1361 list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
1362 wimlib_assert(blob->will_be_in_output_wim);
1363 if (blob->size == 0) {
1364 list_del(&blob->write_blobs_list);
1365 blob->out_reshdr.offset_in_wim = 0;
1366 blob->out_reshdr.size_in_wim = 0;
1367 blob->out_reshdr.uncompressed_size = 0;
1368 blob->out_reshdr.flags = filter_resource_flags(blob->flags);
1374 init_done_with_file_info(struct list_head *blob_list)
1376 struct blob_descriptor *blob;
1378 list_for_each_entry(blob, blob_list, write_blobs_list) {
1379 if (blob_is_in_file(blob)) {
1380 blob->file_inode->num_remaining_streams = 0;
1381 blob->may_send_done_with_file = 1;
1383 blob->may_send_done_with_file = 0;
1387 list_for_each_entry(blob, blob_list, write_blobs_list)
1388 if (blob->may_send_done_with_file)
1389 blob->file_inode->num_remaining_streams++;
1393 * Write a list of blobs to the output WIM file.
1396 * The list of blobs to write, specified by a list of 'struct blob_descriptor' linked
1397 * by the 'write_blobs_list' member.
1400 * The file descriptor, opened for writing, to which to write the blobs.
1402 * @write_resource_flags
1403 * Flags to modify how the blobs are written:
1405 * WRITE_RESOURCE_FLAG_RECOMPRESS:
1406 * Force compression of all resources, even if they could otherwise
1407 * be re-used by copying the raw data, due to being located in a WIM
1408 * file with compatible compression parameters.
1410 * WRITE_RESOURCE_FLAG_PIPABLE:
1411 * Write the resources in the wimlib-specific pipable format, and
1412 * furthermore do so in such a way that no seeking backwards in
1413 * @out_fd will be performed (so it may be a pipe).
1415 * WRITE_RESOURCE_FLAG_SOLID:
1416 * Combine all the blobs into a single resource rather than writing
1417 * them in separate resources. This flag is only valid if the WIM
1418 * version number has been, or will be, set to WIM_VERSION_SOLID.
1419 * This flag may not be combined with WRITE_RESOURCE_FLAG_PIPABLE.
1422 * Compression format to use in the output resources, specified as one of
1423 * the WIMLIB_COMPRESSION_TYPE_* constants. WIMLIB_COMPRESSION_TYPE_NONE
1427 * Compression chunk size to use in the output resources. It must be a
1428 * valid chunk size for the specified compression format @out_ctype, unless
1429 * @out_ctype is WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter
1433 * Number of threads to use to compress data. If 0, a default number of
1434 * threads will be chosen. The number of threads still may be decreased
1435 * from the specified value if insufficient memory is detected.
1438 * If on-the-fly deduplication of unhashed blobs is desired, this parameter
1439 * must be pointer to the blob table for the WIMStruct on whose behalf the
1440 * blobs are being written. Otherwise, this parameter can be NULL.
1443 * If on-the-fly deduplication of unhashed blobs is desired, this parameter
1444 * can be a pointer to a context for blob filtering used to detect whether
1445 * the duplicate blob has been hard-filtered or not. If no blobs are
1446 * hard-filtered or no blobs are unhashed, this parameter can be NULL.
1448 * This function will write the blobs in @blob_list to resources in
1449 * consecutive positions in the output WIM file, or to a single solid resource
1450 * if WRITE_RESOURCE_FLAG_SOLID was specified in @write_resource_flags. In both
1451 * cases, the @out_reshdr of the `struct blob_descriptor' for each blob written will be
1452 * updated to specify its location, size, and flags in the output WIM. In the
1453 * solid resource case, WIM_RESHDR_FLAG_SOLID will be set in the @flags field of
1454 * each @out_reshdr, and furthermore @out_res_offset_in_wim and
1455 * @out_res_size_in_wim of each @out_reshdr will be set to the offset and size,
1456 * respectively, in the output WIM of the solid resource containing the
1457 * corresponding blob.
1459 * Each of the blobs to write may be in any location supported by the
1460 * resource-handling code (specifically, read_blob_list()), such as the contents
1461 * of external file that has been logically added to the output WIM, or a blob
1462 * in another WIM file that has been imported, or even a blob in the "same" WIM
1463 * file of which a modified copy is being written. In the case that a blob is
1464 * already in a WIM file and uses compatible compression parameters, by default
1465 * this function will re-use the raw data instead of decompressing it, then
1466 * recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS
1467 * specified in @write_resource_flags, this is not done.
1469 * As a further requirement, this function requires that the
1470 * @will_be_in_output_wim member be set to 1 on all blobs in @blob_list as well
1471 * as any other blobs not in @blob_list that will be in the output WIM file, but
1472 * set to 0 on any other blobs in the output WIM's blob table or sharing a solid
1473 * resource with a blob in @blob_list. Still furthermore, if on-the-fly
1474 * deduplication of blobs is possible, then all blobs in @blob_list must also be
1475 * linked by @blob_table_list along with any other blobs that have
1476 * @will_be_in_output_wim set.
1478 * This function handles on-the-fly deduplication of blobs for which SHA-1
1479 * message digests have not yet been calculated. Such blobs may or may not need
1480 * to be written. If @blob_table is non-NULL, then each blob in @blob_list that
1481 * has @unhashed set but not @unique_size set is checksummed immediately before
1482 * it would otherwise be read for writing in order to determine if it is
1483 * identical to another blob already being written or one that would be filtered
1484 * out of the output WIM using blob_filtered() with the context @filter_ctx.
1485 * Each such duplicate blob will be removed from @blob_list, its reference count
1486 * transfered to the pre-existing duplicate blob, its memory freed, and will not
1487 * be written. Alternatively, if a blob in @blob_list is a duplicate with any
1488 * blob in @blob_table that has not been marked for writing or would not be
1489 * hard-filtered, it is freed and the pre-existing duplicate is written instead,
1490 * taking ownership of the reference count and slot in the @blob_table_list.
1492 * Returns 0 if every blob was either written successfully or did not need to be
1493 * written; otherwise returns a non-zero error code.
1496 write_blob_list(struct list_head *blob_list,
1497 struct filedes *out_fd,
1498 int write_resource_flags,
1501 unsigned num_threads,
1502 struct blob_table *blob_table,
1503 struct filter_context *filter_ctx,
1504 wimlib_progress_func_t progfunc,
1508 struct write_blobs_ctx ctx;
1509 struct list_head raw_copy_blobs;
1511 wimlib_assert((write_resource_flags &
1512 (WRITE_RESOURCE_FLAG_SOLID |
1513 WRITE_RESOURCE_FLAG_PIPABLE)) !=
1514 (WRITE_RESOURCE_FLAG_SOLID |
1515 WRITE_RESOURCE_FLAG_PIPABLE));
1517 remove_empty_blobs(blob_list);
1519 if (list_empty(blob_list)) {
1520 DEBUG("No blobs to write.");
1524 /* If needed, set auxiliary information so that we can detect when the
1525 * library has finished using each external file. */
1526 if (unlikely(write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE))
1527 init_done_with_file_info(blob_list);
1529 memset(&ctx, 0, sizeof(ctx));
1531 ctx.out_fd = out_fd;
1532 ctx.blob_table = blob_table;
1533 ctx.out_ctype = out_ctype;
1534 ctx.out_chunk_size = out_chunk_size;
1535 ctx.write_resource_flags = write_resource_flags;
1536 ctx.filter_ctx = filter_ctx;
1539 * We normally sort the blobs to write by a "sequential" order that is
1540 * optimized for reading. But when using solid compression, we instead
1541 * sort the blobs by file extension and file name (when applicable; and
1542 * we don't do this for blobs from solid resources) so that similar
1543 * files are grouped together, which improves the compression ratio.
1544 * This is somewhat of a hack since a blob does not necessarily
1545 * correspond one-to-one with a filename, nor is there any guarantee
1546 * that two files with similar names or extensions are actually similar
1547 * in content. A potential TODO is to sort the blobs based on some
1548 * measure of similarity of their actual contents.
1551 ret = sort_blob_list_by_sequential_order(blob_list,
1552 offsetof(struct blob_descriptor,
1557 compute_blob_list_stats(blob_list, &ctx);
1559 if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) {
1560 ret = sort_blob_list_for_solid_compression(blob_list);
1562 WARNING("Failed to sort blobs for solid compression. Continuing anyways.");
1565 ctx.progress_data.progfunc = progfunc;
1566 ctx.progress_data.progctx = progctx;
1568 ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list,
1569 write_resource_flags,
1574 DEBUG("Writing blob list "
1575 "(offset = %"PRIu64", write_resource_flags=0x%08x, "
1576 "out_ctype=%d, out_chunk_size=%u, num_threads=%u, "
1577 "total_bytes=%"PRIu64", num_bytes_to_compress=%"PRIu64")",
1578 out_fd->offset, write_resource_flags,
1579 out_ctype, out_chunk_size, num_threads,
1580 ctx.progress_data.progress.write_streams.total_bytes,
1581 ctx.num_bytes_to_compress);
1583 if (ctx.num_bytes_to_compress == 0) {
1584 DEBUG("No compression needed; skipping to raw copy!");
1585 goto out_write_raw_copy_resources;
1588 /* Unless uncompressed output was required, allocate a chunk_compressor
1589 * to do compression. There are serial and parallel implementations of
1590 * the chunk_compressor interface. We default to parallel using the
1591 * specified number of threads, unless the upper bound on the number
1592 * bytes needing to be compressed is less than a heuristic value. */
1593 if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
1595 #ifdef ENABLE_MULTITHREADED_COMPRESSION
1596 if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) {
1597 ret = new_parallel_chunk_compressor(out_ctype,
1602 WARNING("Couldn't create parallel chunk compressor: %"TS".\n"
1603 " Falling back to single-threaded compression.",
1604 wimlib_get_error_string(ret));
1609 if (ctx.compressor == NULL) {
1610 ret = new_serial_chunk_compressor(out_ctype, out_chunk_size,
1613 goto out_destroy_context;
1618 ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads;
1620 ctx.progress_data.progress.write_streams.num_threads = 1;
1622 DEBUG("Actually using %u threads",
1623 ctx.progress_data.progress.write_streams.num_threads);
1625 INIT_LIST_HEAD(&ctx.blobs_being_compressed);
1626 INIT_LIST_HEAD(&ctx.blobs_in_solid_resource);
1628 ret = call_progress(ctx.progress_data.progfunc,
1629 WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
1630 &ctx.progress_data.progress,
1631 ctx.progress_data.progctx);
1633 goto out_destroy_context;
1635 if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1636 ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
1638 goto out_destroy_context;
1641 /* Read the list of blobs needing to be compressed, using the specified
1642 * callbacks to execute processing of the data. */
1644 struct read_blob_list_callbacks cbs = {
1645 .begin_blob = write_blob_begin_read,
1646 .begin_blob_ctx = &ctx,
1647 .consume_chunk = write_blob_process_chunk,
1648 .consume_chunk_ctx = &ctx,
1649 .end_blob = write_blob_end_read,
1650 .end_blob_ctx = &ctx,
1653 ret = read_blob_list(blob_list,
1654 offsetof(struct blob_descriptor, write_blobs_list),
1656 BLOB_LIST_ALREADY_SORTED |
1657 VERIFY_BLOB_HASHES |
1658 COMPUTE_MISSING_BLOB_HASHES);
1661 goto out_destroy_context;
1663 ret = finish_remaining_chunks(&ctx);
1665 goto out_destroy_context;
1667 if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1668 struct wim_reshdr reshdr;
1669 struct blob_descriptor *blob;
1672 ret = end_write_resource(&ctx, &reshdr);
1674 goto out_destroy_context;
1676 DEBUG("Ending solid resource: %lu %lu %lu.",
1677 reshdr.offset_in_wim,
1679 reshdr.uncompressed_size);
1682 list_for_each_entry(blob, &ctx.blobs_in_solid_resource, write_blobs_list) {
1683 blob->out_reshdr.size_in_wim = blob->size;
1684 blob->out_reshdr.flags = filter_resource_flags(blob->flags);
1685 blob->out_reshdr.flags |= WIM_RESHDR_FLAG_SOLID;
1686 blob->out_reshdr.uncompressed_size = 0;
1687 blob->out_reshdr.offset_in_wim = offset_in_res;
1688 blob->out_res_offset_in_wim = reshdr.offset_in_wim;
1689 blob->out_res_size_in_wim = reshdr.size_in_wim;
1690 blob->out_res_uncompressed_size = reshdr.uncompressed_size;
1691 offset_in_res += blob->size;
1693 wimlib_assert(offset_in_res == reshdr.uncompressed_size);
1696 out_write_raw_copy_resources:
1697 /* Copy any compressed resources for which the raw data can be reused
1698 * without decompression. */
1699 ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd,
1700 &ctx.progress_data);
1702 out_destroy_context:
1703 FREE(ctx.chunk_csizes);
1705 ctx.compressor->destroy(ctx.compressor);
1706 DEBUG("Done (ret=%d)", ret);
1711 is_blob_in_solid_resource(struct blob_descriptor *blob, void *_ignore)
1713 return blob_is_in_solid_wim_resource(blob);
1717 wim_has_solid_resources(WIMStruct *wim)
1719 return for_blob_in_table(wim->blob_table, is_blob_in_solid_resource, NULL);
1723 wim_write_blob_list(WIMStruct *wim,
1724 struct list_head *blob_list,
1726 unsigned num_threads,
1727 struct filter_context *filter_ctx)
1731 int write_resource_flags;
1733 write_resource_flags = write_flags_to_resource_flags(write_flags);
1735 /* wimlib v1.7.0: create a solid WIM file by default if the WIM version
1736 * has been set to WIM_VERSION_SOLID and at least one blob in the WIM's
1737 * blob table is located in a solid resource (may be the same WIM, or a
1738 * different one in the case of export). */
1739 if (wim->hdr.wim_version == WIM_VERSION_SOLID &&
1740 wim_has_solid_resources(wim))
1742 write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID;
1745 if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
1746 out_chunk_size = wim->out_solid_chunk_size;
1747 out_ctype = wim->out_solid_compression_type;
1749 out_chunk_size = wim->out_chunk_size;
1750 out_ctype = wim->out_compression_type;
1753 return write_blob_list(blob_list,
1755 write_resource_flags,
1766 write_wim_resource(struct blob_descriptor *blob,
1767 struct filedes *out_fd,
1770 int write_resource_flags)
1772 LIST_HEAD(blob_list);
1773 list_add(&blob->write_blobs_list, &blob_list);
1774 blob->will_be_in_output_wim = 1;
1775 return write_blob_list(&blob_list,
1777 write_resource_flags & ~WRITE_RESOURCE_FLAG_SOLID,
1788 write_wim_resource_from_buffer(const void *buf, size_t buf_size,
1789 int reshdr_flags, struct filedes *out_fd,
1792 struct wim_reshdr *out_reshdr,
1794 int write_resource_flags)
1797 struct blob_descriptor *blob;
1799 /* Set up a temporary blob descriptor to provide to
1800 * write_wim_resource(). */
1802 blob = new_blob_descriptor();
1804 return WIMLIB_ERR_NOMEM;
1806 blob->blob_location = BLOB_IN_ATTACHED_BUFFER;
1807 blob->attached_buffer = (void*)buf;
1808 blob->size = buf_size;
1809 blob->flags = reshdr_flags;
1811 if (write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) {
1812 sha1_buffer(buf, buf_size, blob->hash);
1818 ret = write_wim_resource(blob, out_fd, out_ctype, out_chunk_size,
1819 write_resource_flags);
1823 copy_reshdr(out_reshdr, &blob->out_reshdr);
1826 copy_hash(hash, blob->hash);
1829 blob->blob_location = BLOB_NONEXISTENT;
1830 free_blob_descriptor(blob);
1834 struct blob_size_table {
1835 struct hlist_head *array;
1841 init_blob_size_table(struct blob_size_table *tab, size_t capacity)
1843 tab->array = CALLOC(capacity, sizeof(tab->array[0]));
1844 if (tab->array == NULL)
1845 return WIMLIB_ERR_NOMEM;
1846 tab->num_entries = 0;
1847 tab->capacity = capacity;
1852 destroy_blob_size_table(struct blob_size_table *tab)
1858 blob_size_table_insert(struct blob_descriptor *blob, void *_tab)
1860 struct blob_size_table *tab = _tab;
1862 struct blob_descriptor *same_size_blob;
1863 struct hlist_node *tmp;
1865 pos = hash_u64(blob->size) % tab->capacity;
1866 blob->unique_size = 1;
1867 hlist_for_each_entry(same_size_blob, tmp, &tab->array[pos], hash_list_2) {
1868 if (same_size_blob->size == blob->size) {
1869 blob->unique_size = 0;
1870 same_size_blob->unique_size = 0;
1875 hlist_add_head(&blob->hash_list_2, &tab->array[pos]);
1880 struct find_blobs_ctx {
1883 struct list_head blob_list;
1884 struct blob_size_table blob_size_tab;
1888 reference_blob_for_write(struct blob_descriptor *blob,
1889 struct list_head *blob_list, u32 nref)
1891 if (!blob->will_be_in_output_wim) {
1892 blob->out_refcnt = 0;
1893 list_add_tail(&blob->write_blobs_list, blob_list);
1894 blob->will_be_in_output_wim = 1;
1896 blob->out_refcnt += nref;
1900 fully_reference_blob_for_write(struct blob_descriptor *blob, void *_blob_list)
1902 struct list_head *blob_list = _blob_list;
1903 blob->will_be_in_output_wim = 0;
1904 reference_blob_for_write(blob, blob_list, blob->refcnt);
1909 inode_find_blobs_to_reference(const struct wim_inode *inode,
1910 const struct blob_table *table,
1911 struct list_head *blob_list)
1913 wimlib_assert(inode->i_nlink > 0);
1915 for (unsigned i = 0; i < inode->i_num_streams; i++) {
1916 struct blob_descriptor *blob;
1918 blob = stream_blob(&inode->i_streams[i], table);
1920 reference_blob_for_write(blob, blob_list, inode->i_nlink);
1921 else if (!is_zero_hash(stream_hash(&inode->i_streams[i])))
1922 return WIMLIB_ERR_RESOURCE_NOT_FOUND;
1928 do_blob_set_not_in_output_wim(struct blob_descriptor *blob, void *_ignore)
1930 blob->will_be_in_output_wim = 0;
1935 image_find_blobs_to_reference(WIMStruct *wim)
1937 struct wim_image_metadata *imd;
1938 struct wim_inode *inode;
1939 struct blob_descriptor *blob;
1940 struct list_head *blob_list;
1943 imd = wim_get_current_image_metadata(wim);
1945 image_for_each_unhashed_blob(blob, imd)
1946 blob->will_be_in_output_wim = 0;
1948 blob_list = wim->private;
1949 image_for_each_inode(inode, imd) {
1950 ret = inode_find_blobs_to_reference(inode,
1960 prepare_unfiltered_list_of_blobs_in_output_wim(WIMStruct *wim,
1963 struct list_head *blob_list_ret)
1967 INIT_LIST_HEAD(blob_list_ret);
1969 if (blobs_ok && (image == WIMLIB_ALL_IMAGES ||
1970 (image == 1 && wim->hdr.image_count == 1)))
1972 /* Fast case: Assume that all blobs are being written and that
1973 * the reference counts are correct. */
1974 struct blob_descriptor *blob;
1975 struct wim_image_metadata *imd;
1978 for_blob_in_table(wim->blob_table,
1979 fully_reference_blob_for_write,
1982 for (i = 0; i < wim->hdr.image_count; i++) {
1983 imd = wim->image_metadata[i];
1984 image_for_each_unhashed_blob(blob, imd)
1985 fully_reference_blob_for_write(blob, blob_list_ret);
1988 /* Slow case: Walk through the images being written and
1989 * determine the blobs referenced. */
1990 for_blob_in_table(wim->blob_table,
1991 do_blob_set_not_in_output_wim, NULL);
1992 wim->private = blob_list_ret;
1993 ret = for_image(wim, image, image_find_blobs_to_reference);
2001 struct insert_other_if_hard_filtered_ctx {
2002 struct blob_size_table *tab;
2003 struct filter_context *filter_ctx;
2007 insert_other_if_hard_filtered(struct blob_descriptor *blob, void *_ctx)
2009 struct insert_other_if_hard_filtered_ctx *ctx = _ctx;
2011 if (!blob->will_be_in_output_wim &&
2012 blob_hard_filtered(blob, ctx->filter_ctx))
2013 blob_size_table_insert(blob, ctx->tab);
2018 determine_blob_size_uniquity(struct list_head *blob_list,
2019 struct blob_table *lt,
2020 struct filter_context *filter_ctx)
2023 struct blob_size_table tab;
2024 struct blob_descriptor *blob;
2026 ret = init_blob_size_table(&tab, 9001);
2030 if (may_hard_filter_blobs(filter_ctx)) {
2031 struct insert_other_if_hard_filtered_ctx ctx = {
2033 .filter_ctx = filter_ctx,
2035 for_blob_in_table(lt, insert_other_if_hard_filtered, &ctx);
2038 list_for_each_entry(blob, blob_list, write_blobs_list)
2039 blob_size_table_insert(blob, &tab);
2041 destroy_blob_size_table(&tab);
2046 filter_blob_list_for_write(struct list_head *blob_list,
2047 struct filter_context *filter_ctx)
2049 struct blob_descriptor *blob, *tmp;
2051 list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
2052 int status = blob_filtered(blob, filter_ctx);
2059 /* Soft filtered. */
2061 /* Hard filtered. */
2062 blob->will_be_in_output_wim = 0;
2063 list_del(&blob->blob_table_list);
2065 list_del(&blob->write_blobs_list);
2071 * prepare_blob_list_for_write() -
2073 * Prepare the list of blobs to write for writing a WIM containing the specified
2074 * image(s) with the specified write flags.
2077 * The WIMStruct on whose behalf the write is occurring.
2080 * Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES.
2083 * WIMLIB_WRITE_FLAG_* flags for the write operation:
2085 * STREAMS_OK: For writes of all images, assume that all blobs in the blob
2086 * table of @wim and the per-image lists of unhashed blobs should be taken
2087 * as-is, and image metadata should not be searched for references. This
2088 * does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below.
2090 * OVERWRITE: Blobs already present in @wim shall not be returned in
2093 * SKIP_EXTERNAL_WIMS: Blobs already present in a WIM file, but not @wim,
2094 * shall be returned in neither @blob_list_ret nor @blob_table_list_ret.
2097 * List of blobs, linked by write_blobs_list, that need to be written will
2100 * Note that this function assumes that unhashed blobs will be written; it
2101 * does not take into account that they may become duplicates when actually
2104 * @blob_table_list_ret
2105 * List of blobs, linked by blob_table_list, that need to be included in
2106 * the WIM's blob table will be returned here. This will be a superset of
2107 * the blobs in @blob_list_ret.
2109 * This list will be a proper superset of @blob_list_ret if and only if
2110 * WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
2111 * the blobs that would otherwise need to be written were already located
2114 * All blobs in this list will have @out_refcnt set to the number of
2115 * references to the blob in the output WIM. If
2116 * WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt
2117 * may be as low as 0.
2120 * A context for queries of blob filter status with blob_filtered() is
2121 * returned in this location.
2123 * In addition, @will_be_in_output_wim will be set to 1 in all blobs inserted
2124 * into @blob_table_list_ret and to 0 in all blobs in the blob table of @wim not
2125 * inserted into @blob_table_list_ret.
2127 * Still furthermore, @unique_size will be set to 1 on all blobs in
2128 * @blob_list_ret that have unique size among all blobs in @blob_list_ret and
2129 * among all blobs in the blob table of @wim that are ineligible for being
2130 * written due to filtering.
2132 * Returns 0 on success; nonzero on read error, memory allocation error, or
2136 prepare_blob_list_for_write(WIMStruct *wim, int image,
2138 struct list_head *blob_list_ret,
2139 struct list_head *blob_table_list_ret,
2140 struct filter_context *filter_ctx_ret)
2143 struct blob_descriptor *blob;
2145 filter_ctx_ret->write_flags = write_flags;
2146 filter_ctx_ret->wim = wim;
2148 ret = prepare_unfiltered_list_of_blobs_in_output_wim(
2151 write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK,
2156 INIT_LIST_HEAD(blob_table_list_ret);
2157 list_for_each_entry(blob, blob_list_ret, write_blobs_list)
2158 list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2160 ret = determine_blob_size_uniquity(blob_list_ret, wim->blob_table,
2165 if (may_filter_blobs(filter_ctx_ret))
2166 filter_blob_list_for_write(blob_list_ret, filter_ctx_ret);
2172 write_file_blobs(WIMStruct *wim, int image, int write_flags,
2173 unsigned num_threads,
2174 struct list_head *blob_list_override,
2175 struct list_head *blob_table_list_ret)
2178 struct list_head _blob_list;
2179 struct list_head *blob_list;
2180 struct blob_descriptor *blob;
2181 struct filter_context _filter_ctx;
2182 struct filter_context *filter_ctx;
2184 if (blob_list_override == NULL) {
2185 /* Normal case: prepare blob list from image(s) being written.
2187 blob_list = &_blob_list;
2188 filter_ctx = &_filter_ctx;
2189 ret = prepare_blob_list_for_write(wim, image, write_flags,
2191 blob_table_list_ret,
2196 /* Currently only as a result of wimlib_split() being called:
2197 * use blob list already explicitly provided. Use existing
2198 * reference counts. */
2199 blob_list = blob_list_override;
2201 INIT_LIST_HEAD(blob_table_list_ret);
2202 list_for_each_entry(blob, blob_list, write_blobs_list) {
2203 blob->out_refcnt = blob->refcnt;
2204 blob->will_be_in_output_wim = 1;
2205 blob->unique_size = 0;
2206 list_add_tail(&blob->blob_table_list, blob_table_list_ret);
2210 return wim_write_blob_list(wim,
2218 write_metadata_blobs(WIMStruct *wim, int image, int write_flags)
2223 int write_resource_flags;
2225 if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA) {
2226 DEBUG("Not writing any metadata resources.");
2230 write_resource_flags = write_flags_to_resource_flags(write_flags);
2232 write_resource_flags &= ~WRITE_RESOURCE_FLAG_SOLID;
2234 DEBUG("Writing metadata resources (offset=%"PRIu64")",
2235 wim->out_fd.offset);
2237 ret = call_progress(wim->progfunc,
2238 WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN,
2239 NULL, wim->progctx);
2243 if (image == WIMLIB_ALL_IMAGES) {
2245 end_image = wim->hdr.image_count;
2247 start_image = image;
2251 for (int i = start_image; i <= end_image; i++) {
2252 struct wim_image_metadata *imd;
2254 imd = wim->image_metadata[i - 1];
2255 /* Build a new metadata resource only if image was modified from
2256 * the original (or was newly added). Otherwise just copy the
2258 if (imd->modified) {
2259 DEBUG("Image %u was modified; building and writing new "
2260 "metadata resource", i);
2261 ret = write_metadata_resource(wim, i,
2262 write_resource_flags);
2263 } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2264 DEBUG("Image %u was not modified; re-using existing "
2265 "metadata resource.", i);
2266 blob_set_out_reshdr_for_reuse(imd->metadata_blob);
2269 DEBUG("Image %u was not modified; copying existing "
2270 "metadata resource.", i);
2271 ret = write_wim_resource(imd->metadata_blob,
2273 wim->out_compression_type,
2274 wim->out_chunk_size,
2275 write_resource_flags);
2281 return call_progress(wim->progfunc,
2282 WIMLIB_PROGRESS_MSG_WRITE_METADATA_END,
2283 NULL, wim->progctx);
2287 open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags)
2290 DEBUG("Opening \"%"TS"\" for writing.", path);
2292 raw_fd = topen(path, open_flags | O_BINARY, 0644);
2294 ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path);
2295 return WIMLIB_ERR_OPEN;
2297 filedes_init(&wim->out_fd, raw_fd);
2302 close_wim_writable(WIMStruct *wim, int write_flags)
2306 if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)) {
2307 DEBUG("Closing WIM file.");
2308 if (filedes_valid(&wim->out_fd))
2309 if (filedes_close(&wim->out_fd))
2310 ret = WIMLIB_ERR_WRITE;
2312 filedes_invalidate(&wim->out_fd);
2317 cmp_blobs_by_out_rdesc(const void *p1, const void *p2)
2319 const struct blob_descriptor *blob1, *blob2;
2321 blob1 = *(const struct blob_descriptor**)p1;
2322 blob2 = *(const struct blob_descriptor**)p2;
2324 if (blob1->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2325 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) {
2326 if (blob1->out_res_offset_in_wim != blob2->out_res_offset_in_wim)
2327 return cmp_u64(blob1->out_res_offset_in_wim,
2328 blob2->out_res_offset_in_wim);
2333 if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID)
2336 return cmp_u64(blob1->out_reshdr.offset_in_wim,
2337 blob2->out_reshdr.offset_in_wim);
2341 write_blob_table(WIMStruct *wim, int image, int write_flags,
2342 struct wim_reshdr *out_reshdr,
2343 struct list_head *blob_table_list)
2347 /* Set output resource metadata for blobs already present in WIM. */
2348 if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
2349 struct blob_descriptor *blob;
2350 list_for_each_entry(blob, blob_table_list, blob_table_list) {
2351 if (blob->blob_location == BLOB_IN_WIM &&
2352 blob->rdesc->wim == wim)
2354 blob_set_out_reshdr_for_reuse(blob);
2359 ret = sort_blob_list(blob_table_list,
2360 offsetof(struct blob_descriptor, blob_table_list),
2361 cmp_blobs_by_out_rdesc);
2365 /* Add entries for metadata resources. */
2366 if (!(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)) {
2370 if (image == WIMLIB_ALL_IMAGES) {
2372 end_image = wim->hdr.image_count;
2374 start_image = image;
2378 /* Push metadata blob table entries onto the front of the list
2379 * in reverse order, so that they're written in order.
2381 for (int i = end_image; i >= start_image; i--) {
2382 struct blob_descriptor *metadata_blob;
2384 metadata_blob = wim->image_metadata[i - 1]->metadata_blob;
2385 wimlib_assert(metadata_blob->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA);
2386 metadata_blob->out_refcnt = 1;
2387 list_add(&metadata_blob->blob_table_list, blob_table_list);
2391 return write_blob_table_from_blob_list(blob_table_list,
2393 wim->hdr.part_number,
2395 write_flags_to_resource_flags(write_flags));
2401 * Finish writing a WIM file: write the blob table, xml data, and integrity
2402 * table, then overwrite the WIM header. By default, closes the WIM file
2403 * descriptor (@wim->out_fd) if successful.
2405 * write_flags is a bitwise OR of the following:
2407 * (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY:
2408 * Include an integrity table.
2410 * (public) WIMLIB_WRITE_FLAG_FSYNC:
2411 * fsync() the output file before closing it.
2413 * (public) WIMLIB_WRITE_FLAG_PIPABLE:
2414 * Writing a pipable WIM, possibly to a pipe; include pipable WIM
2415 * blob headers before the blob table and XML data, and also write
2416 * the WIM header at the end instead of seeking to the beginning.
2417 * Can't be combined with WIMLIB_WRITE_FLAG_CHECK_INTEGRITY.
2419 * (private) WIMLIB_WRITE_FLAG_NO_BLOB_TABLE:
2420 * Don't write the blob table.
2422 * (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML:
2423 * After writing the XML data but before writing the integrity
2424 * table, write a temporary WIM header and flush the file
2425 * descriptor so that the WIM is less likely to become corrupted
2426 * upon abrupt program termination.
2427 * (private) WIMLIB_WRITE_FLAG_HEADER_AT_END:
2428 * Instead of overwriting the WIM header at the beginning of the
2429 * file, simply append it to the end of the file. (Used when
2431 * (private) WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR:
2432 * Do not close the file descriptor @wim->out_fd on either success
2434 * (private) WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES:
2435 * Use the existing <TOTALBYTES> stored in the in-memory XML
2436 * information, rather than setting it to the offset of the XML
2437 * data being written.
2438 * (private) WIMLIB_WRITE_FLAG_OVERWRITE
2439 * The existing WIM file is being updated in-place. The entries
2440 * from its integrity table may be re-used.
2443 finish_write(WIMStruct *wim, int image, int write_flags,
2444 struct list_head *blob_table_list)
2448 int write_resource_flags;
2449 off_t old_blob_table_end = 0;
2450 off_t new_blob_table_end;
2452 struct integrity_table *old_integrity_table = NULL;
2454 DEBUG("image=%d, write_flags=%08x", image, write_flags);
2456 write_resource_flags = write_flags_to_resource_flags(write_flags);
2458 /* In the WIM header, there is room for the resource entry for a
2459 * metadata resource labeled as the "boot metadata". This entry should
2460 * be zeroed out if there is no bootable image (boot_idx 0). Otherwise,
2461 * it should be a copy of the resource entry for the image that is
2462 * marked as bootable. This is not well documented... */
2463 if (wim->hdr.boot_idx == 0) {
2464 zero_reshdr(&wim->hdr.boot_metadata_reshdr);
2466 copy_reshdr(&wim->hdr.boot_metadata_reshdr,
2467 &wim->image_metadata[
2468 wim->hdr.boot_idx - 1]->metadata_blob->out_reshdr);
2471 /* If overwriting the WIM file containing an integrity table in-place,
2472 * we'd like to re-use the information in the old integrity table
2473 * instead of recalculating it. But we might overwrite the old
2474 * integrity table when we expand the XML data. Read it into memory
2476 if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
2477 WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) ==
2478 (WIMLIB_WRITE_FLAG_OVERWRITE |
2479 WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2480 && wim_has_integrity_table(wim))
2482 old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
2483 wim->hdr.blob_table_reshdr.size_in_wim;
2484 (void)read_integrity_table(wim,
2485 old_blob_table_end - WIM_HEADER_DISK_SIZE,
2486 &old_integrity_table);
2487 /* If we couldn't read the old integrity table, we can still
2488 * re-calculate the full integrity table ourselves. Hence the
2489 * ignoring of the return value. */
2492 /* Write blob table. */
2493 if (!(write_flags & WIMLIB_WRITE_FLAG_NO_BLOB_TABLE)) {
2494 ret = write_blob_table(wim, image, write_flags,
2495 &wim->hdr.blob_table_reshdr,
2498 free_integrity_table(old_integrity_table);
2503 /* Write XML data. */
2504 xml_totalbytes = wim->out_fd.offset;
2505 if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2506 xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING;
2507 ret = write_wim_xml_data(wim, image, xml_totalbytes,
2508 &wim->hdr.xml_data_reshdr,
2509 write_resource_flags);
2511 free_integrity_table(old_integrity_table);
2515 /* Write integrity table (optional). */
2516 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2517 if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) {
2518 struct wim_header checkpoint_hdr;
2519 memcpy(&checkpoint_hdr, &wim->hdr, sizeof(struct wim_header));
2520 zero_reshdr(&checkpoint_hdr.integrity_table_reshdr);
2521 checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2522 ret = write_wim_header_at_offset(&checkpoint_hdr,
2525 free_integrity_table(old_integrity_table);
2530 new_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
2531 wim->hdr.blob_table_reshdr.size_in_wim;
2533 ret = write_integrity_table(wim,
2536 old_integrity_table);
2537 free_integrity_table(old_integrity_table);
2541 /* No integrity table. */
2542 zero_reshdr(&wim->hdr.integrity_table_reshdr);
2545 /* Now that all information in the WIM header has been determined, the
2546 * preliminary header written earlier can be overwritten, the header of
2547 * the existing WIM file can be overwritten, or the final header can be
2548 * written to the end of the pipable WIM. */
2549 wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2551 if (write_flags & WIMLIB_WRITE_FLAG_HEADER_AT_END)
2552 hdr_offset = wim->out_fd.offset;
2553 DEBUG("Writing new header @ %"PRIu64".", hdr_offset);
2554 ret = write_wim_header_at_offset(&wim->hdr, &wim->out_fd, hdr_offset);
2558 /* Possibly sync file data to disk before closing. On POSIX systems, it
2559 * is necessary to do this before using rename() to overwrite an
2560 * existing file with a new file. Otherwise, data loss would occur if
2561 * the system is abruptly terminated when the metadata for the rename
2562 * operation has been written to disk, but the new file data has not.
2564 if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
2565 DEBUG("Syncing WIM file.");
2566 if (fsync(wim->out_fd.fd)) {
2567 ERROR_WITH_ERRNO("Error syncing data to WIM file");
2568 return WIMLIB_ERR_WRITE;
2572 if (close_wim_writable(wim, write_flags)) {
2573 ERROR_WITH_ERRNO("Failed to close the output WIM file");
2574 return WIMLIB_ERR_WRITE;
2580 #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
2582 /* Set advisory lock on WIM file (if not already done so) */
2584 lock_wim_for_append(WIMStruct *wim)
2586 if (wim->locked_for_append)
2588 if (!flock(wim->in_fd.fd, LOCK_EX | LOCK_NB)) {
2589 wim->locked_for_append = 1;
2592 if (errno != EWOULDBLOCK)
2594 return WIMLIB_ERR_ALREADY_LOCKED;
2597 /* Remove advisory lock on WIM file (if present) */
2599 unlock_wim_for_append(WIMStruct *wim)
2601 if (wim->locked_for_append) {
2602 flock(wim->in_fd.fd, LOCK_UN);
2603 wim->locked_for_append = 0;
2609 * write_pipable_wim():
2611 * Perform the intermediate stages of creating a "pipable" WIM (i.e. a WIM
2612 * capable of being applied from a pipe).
2614 * Pipable WIMs are a wimlib-specific modification of the WIM format such that
2615 * images can be applied from them sequentially when the file data is sent over
2616 * a pipe. In addition, a pipable WIM can be written sequentially to a pipe.
2617 * The modifications made to the WIM format for pipable WIMs are:
2619 * - Magic characters in header are "WLPWM\0\0\0" (wimlib pipable WIM) instead
2620 * of "MSWIM\0\0\0". This lets wimlib know that the WIM is pipable and also
2621 * stops other software from trying to read the file as a normal WIM.
2623 * - The header at the beginning of the file does not contain all the normal
2624 * information; in particular it will have all 0's for the blob table and XML
2625 * data resource entries. This is because this information cannot be
2626 * determined until the blob table and XML data have been written.
2627 * Consequently, wimlib will write the full header at the very end of the
2628 * file. The header at the end, however, is only used when reading the WIM
2629 * from a seekable file (not a pipe).
2631 * - An extra copy of the XML data is placed directly after the header. This
2632 * allows image names and sizes to be determined at an appropriate time when
2633 * reading the WIM from a pipe. This copy of the XML data is ignored if the
2634 * WIM is read from a seekable file (not a pipe).
2636 * - The format of resources, or blobs, has been modified to allow them to be
2637 * used before the "blob table" has been read. Each blob is prefixed with a
2638 * `struct pwm_blob_hdr' that is basically an abbreviated form of `struct
2639 * blob_descriptor_disk' that only contains the SHA-1 message digest,
2640 * uncompressed blob size, and flags that indicate whether the blob is
2641 * compressed. The data of uncompressed blobs then follows literally, while
2642 * the data of compressed blobs follows in a modified format. Compressed
2643 * blobs do not begin with a chunk table, since the chunk table cannot be
2644 * written until all chunks have been compressed. Instead, each compressed
2645 * chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size.
2646 * Furthermore, the chunk table is written at the end of the resource instead
2647 * of the start. Note: chunk offsets are given in the chunk table as if the
2648 * `struct pwm_chunk_hdr's were not present; also, the chunk table is only
2649 * used if the WIM is being read from a seekable file (not a pipe).
2651 * - Metadata blobs always come before non-metadata blobs. (This does not by
2652 * itself constitute an incompatibility with normal WIMs, since this is valid
2655 * - At least up to the end of the blobs, all components must be packed as
2656 * tightly as possible; there cannot be any "holes" in the WIM. (This does
2657 * not by itself consititute an incompatibility with normal WIMs, since this
2658 * is valid in normal WIMs.)
2660 * Note: the blob table, XML data, and header at the end are not used when
2661 * applying from a pipe. They exist to support functionality such as image
2662 * application and export when the WIM is *not* read from a pipe.
2664 * Layout of pipable WIM:
2666 * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2667 * | Header | XML data | Metadata resources | File resources | Blob table | XML data | Header |
2668 * ---------+----------+--------------------+----------------+--------------+-----------+--------+
2670 * Layout of normal WIM:
2672 * +--------+-----------------------------+-------------------------+
2673 * | Header | File and metadata resources | Blob table | XML data |
2674 * +--------+-----------------------------+-------------------------+
2676 * An optional integrity table can follow the final XML data in both normal and
2677 * pipable WIMs. However, due to implementation details, wimlib currently can
2678 * only include an integrity table in a pipable WIM when writing it to a
2679 * seekable file (not a pipe).
2681 * Do note that since pipable WIMs are not supported by Microsoft's software,
2682 * wimlib does not create them unless explicitly requested (with
2683 * WIMLIB_WRITE_FLAG_PIPABLE) and as stated above they use different magic
2684 * characters to identify the file.
2687 write_pipable_wim(WIMStruct *wim, int image, int write_flags,
2688 unsigned num_threads,
2689 struct list_head *blob_list_override,
2690 struct list_head *blob_table_list_ret)
2693 struct wim_reshdr xml_reshdr;
2695 WARNING("Creating a pipable WIM, which will "
2697 " with Microsoft's software (wimgapi/imagex/Dism).");
2699 /* At this point, the header at the beginning of the file has already
2702 /* For efficiency, when wimlib adds an image to the WIM with
2703 * wimlib_add_image(), the SHA-1 message digests of files is not
2704 * calculated; instead, they are calculated while the files are being
2705 * written. However, this does not work when writing a pipable WIM,
2706 * since when writing a blob to a pipable WIM, its SHA-1 message digest
2707 * needs to be known before the blob data is written. Therefore, before
2708 * getting much farther, we need to pre-calculate the SHA-1 message
2709 * digests of all blobs that will be written. */
2710 ret = wim_checksum_unhashed_blobs(wim);
2714 /* Write extra copy of the XML data. */
2715 ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT,
2716 &xml_reshdr, WRITE_RESOURCE_FLAG_PIPABLE);
2720 /* Write metadata resources for the image(s) being included in the
2722 ret = write_metadata_blobs(wim, image, write_flags);
2726 /* Write blobs needed for the image(s) being included in the output WIM,
2727 * or blobs needed for the split WIM part. */
2728 return write_file_blobs(wim, image, write_flags,
2729 num_threads, blob_list_override,
2730 blob_table_list_ret);
2732 /* The blob table, XML data, and header at end are handled by
2733 * finish_write(). */
2736 /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file
2739 write_wim_part(WIMStruct *wim,
2740 const void *path_or_fd,
2743 unsigned num_threads,
2744 unsigned part_number,
2745 unsigned total_parts,
2746 struct list_head *blob_list_override,
2750 struct wim_header hdr_save;
2751 struct list_head blob_table_list;
2753 if (total_parts == 1)
2754 DEBUG("Writing standalone WIM.");
2756 DEBUG("Writing split WIM part %u/%u", part_number, total_parts);
2757 if (image == WIMLIB_ALL_IMAGES)
2758 DEBUG("Including all images.");
2760 DEBUG("Including image %d only.", image);
2761 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2762 DEBUG("File descriptor: %d", *(const int*)path_or_fd);
2764 DEBUG("Path: \"%"TS"\"", (const tchar*)path_or_fd);
2765 DEBUG("Write flags: 0x%08x", write_flags);
2767 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
2768 DEBUG("\tCHECK_INTEGRITY");
2770 if (write_flags & WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)
2771 DEBUG("\tNO_CHECK_INTEGRITY");
2773 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2776 if (write_flags & WIMLIB_WRITE_FLAG_NOT_PIPABLE)
2777 DEBUG("\tNOT_PIPABLE");
2779 if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
2780 DEBUG("\tRECOMPRESS");
2782 if (write_flags & WIMLIB_WRITE_FLAG_FSYNC)
2785 if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
2788 if (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE)
2789 DEBUG("\tSOFT_DELETE");
2791 if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
2792 DEBUG("\tIGNORE_READONLY_FLAG");
2794 if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS)
2795 DEBUG("\tSKIP_EXTERNAL_WIMS");
2797 if (write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK)
2798 DEBUG("\tSTREAMS_OK");
2800 if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)
2801 DEBUG("\tRETAIN_GUID");
2803 if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
2806 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)
2807 DEBUG("\tFILE_DESCRIPTOR");
2809 if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)
2810 DEBUG("\tNO_METADATA");
2812 if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES)
2813 DEBUG("\tUSE_EXISTING_TOTALBYTES");
2815 if (num_threads == 0)
2816 DEBUG("Number of threads: autodetect");
2818 DEBUG("Number of threads: %u", num_threads);
2819 DEBUG("Progress function: %s", (wim->progfunc ? "yes" : "no"));
2820 DEBUG("Blob list: %s", (blob_list_override ? "specified" : "autodetect"));
2821 DEBUG("GUID: %s", (write_flags &
2822 WIMLIB_WRITE_FLAG_RETAIN_GUID) ? "retain"
2823 : guid ? "explicit" : "generate new");
2825 /* Internally, this is always called with a valid part number and total
2827 wimlib_assert(total_parts >= 1);
2828 wimlib_assert(part_number >= 1 && part_number <= total_parts);
2830 /* A valid image (or all images) must be specified. */
2831 if (image != WIMLIB_ALL_IMAGES &&
2832 (image < 1 || image > wim->hdr.image_count))
2833 return WIMLIB_ERR_INVALID_IMAGE;
2835 /* If we need to write metadata resources, make sure the ::WIMStruct has
2836 * the needed information attached (e.g. is not a resource-only WIM,
2837 * such as a non-first part of a split WIM). */
2838 if (!wim_has_metadata(wim) &&
2839 !(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA))
2840 return WIMLIB_ERR_METADATA_NOT_FOUND;
2842 /* Check for contradictory flags. */
2843 if ((write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2844 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2845 == (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2846 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))
2847 return WIMLIB_ERR_INVALID_PARAM;
2849 if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2850 WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2851 == (WIMLIB_WRITE_FLAG_PIPABLE |
2852 WIMLIB_WRITE_FLAG_NOT_PIPABLE))
2853 return WIMLIB_ERR_INVALID_PARAM;
2855 /* Save previous header, then start initializing the new one. */
2856 memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
2858 /* Set default integrity, pipable, and solid flags. */
2859 if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2860 WIMLIB_WRITE_FLAG_NOT_PIPABLE)))
2861 if (wim_is_pipable(wim)) {
2862 DEBUG("WIM is pipable; default to PIPABLE.");
2863 write_flags |= WIMLIB_WRITE_FLAG_PIPABLE;
2866 if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
2867 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
2868 if (wim_has_integrity_table(wim)) {
2869 DEBUG("Integrity table present; default to CHECK_INTEGRITY.");
2870 write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
2873 if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE |
2874 WIMLIB_WRITE_FLAG_SOLID))
2875 == (WIMLIB_WRITE_FLAG_PIPABLE |
2876 WIMLIB_WRITE_FLAG_SOLID))
2878 ERROR("Cannot specify both PIPABLE and SOLID!");
2879 return WIMLIB_ERR_INVALID_PARAM;
2882 /* Set appropriate magic number. */
2883 if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)
2884 wim->hdr.magic = PWM_MAGIC;
2886 wim->hdr.magic = WIM_MAGIC;
2888 /* Set appropriate version number. */
2889 if ((write_flags & WIMLIB_WRITE_FLAG_SOLID) ||
2890 wim->out_compression_type == WIMLIB_COMPRESSION_TYPE_LZMS)
2891 wim->hdr.wim_version = WIM_VERSION_SOLID;
2893 wim->hdr.wim_version = WIM_VERSION_DEFAULT;
2895 /* Clear header flags that will be set automatically. */
2896 wim->hdr.flags &= ~(WIM_HDR_FLAG_METADATA_ONLY |
2897 WIM_HDR_FLAG_RESOURCE_ONLY |
2898 WIM_HDR_FLAG_SPANNED |
2899 WIM_HDR_FLAG_WRITE_IN_PROGRESS);
2901 /* Set SPANNED header flag if writing part of a split WIM. */
2902 if (total_parts != 1)
2903 wim->hdr.flags |= WIM_HDR_FLAG_SPANNED;
2905 /* Set part number and total parts of split WIM. This will be 1 and 1
2906 * if the WIM is standalone. */
2907 wim->hdr.part_number = part_number;
2908 wim->hdr.total_parts = total_parts;
2910 /* Set compression type if different. */
2911 if (wim->compression_type != wim->out_compression_type) {
2912 ret = set_wim_hdr_cflags(wim->out_compression_type, &wim->hdr);
2913 wimlib_assert(ret == 0);
2916 /* Set chunk size if different. */
2917 wim->hdr.chunk_size = wim->out_chunk_size;
2920 if (!(write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)) {
2922 memcpy(wim->hdr.guid, guid, WIMLIB_GUID_LEN);
2924 randomize_byte_array(wim->hdr.guid, WIMLIB_GUID_LEN);
2927 /* Clear references to resources that have not been written yet. */
2928 zero_reshdr(&wim->hdr.blob_table_reshdr);
2929 zero_reshdr(&wim->hdr.xml_data_reshdr);
2930 zero_reshdr(&wim->hdr.boot_metadata_reshdr);
2931 zero_reshdr(&wim->hdr.integrity_table_reshdr);
2933 /* Set image count and boot index correctly for single image writes. */
2934 if (image != WIMLIB_ALL_IMAGES) {
2935 wim->hdr.image_count = 1;
2936 if (wim->hdr.boot_idx == image)
2937 wim->hdr.boot_idx = 1;
2939 wim->hdr.boot_idx = 0;
2942 /* Split WIMs can't be bootable. */
2943 if (total_parts != 1)
2944 wim->hdr.boot_idx = 0;
2946 /* Initialize output file descriptor. */
2947 if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) {
2948 /* File descriptor was explicitly provided. Return error if
2949 * file descriptor is not seekable, unless writing a pipable WIM
2951 wim->out_fd.fd = *(const int*)path_or_fd;
2952 wim->out_fd.offset = 0;
2953 if (!filedes_is_seekable(&wim->out_fd)) {
2954 ret = WIMLIB_ERR_INVALID_PARAM;
2955 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2956 goto out_restore_hdr;
2957 if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) {
2958 ERROR("Can't include integrity check when "
2959 "writing pipable WIM to pipe!");
2960 goto out_restore_hdr;
2965 /* Filename of WIM to write was provided; open file descriptor
2967 ret = open_wim_writable(wim, (const tchar*)path_or_fd,
2968 O_TRUNC | O_CREAT | O_RDWR);
2970 goto out_restore_hdr;
2973 /* Write initial header. This is merely a "dummy" header since it
2974 * doesn't have all the information yet, so it will be overwritten later
2975 * (unless writing a pipable WIM). */
2976 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
2977 wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2978 ret = write_wim_header(&wim->hdr, &wim->out_fd);
2979 wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS;
2981 goto out_restore_hdr;
2983 /* Write metadata resources and blobs. */
2984 if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) {
2985 /* Default case: create a normal (non-pipable) WIM. */
2986 ret = write_file_blobs(wim, image, write_flags,
2991 goto out_restore_hdr;
2993 ret = write_metadata_blobs(wim, image, write_flags);
2995 goto out_restore_hdr;
2997 /* Non-default case: create pipable WIM. */
2998 ret = write_pipable_wim(wim, image, write_flags, num_threads,
3002 goto out_restore_hdr;
3003 write_flags |= WIMLIB_WRITE_FLAG_HEADER_AT_END;
3007 /* Write blob table, XML data, and (optional) integrity table. */
3008 ret = finish_write(wim, image, write_flags, &blob_table_list);
3010 memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
3011 (void)close_wim_writable(wim, write_flags);
3012 DEBUG("ret=%d", ret);
3016 /* Write a standalone WIM to a file or file descriptor. */
3018 write_standalone_wim(WIMStruct *wim, const void *path_or_fd,
3019 int image, int write_flags, unsigned num_threads)
3021 return write_wim_part(wim, path_or_fd, image, write_flags,
3022 num_threads, 1, 1, NULL, NULL);
3025 /* API function documented in wimlib.h */
3027 wimlib_write(WIMStruct *wim, const tchar *path,
3028 int image, int write_flags, unsigned num_threads)
3030 if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3031 return WIMLIB_ERR_INVALID_PARAM;
3033 if (path == NULL || path[0] == T('\0'))
3034 return WIMLIB_ERR_INVALID_PARAM;
3036 return write_standalone_wim(wim, path, image, write_flags, num_threads);
3039 /* API function documented in wimlib.h */
3041 wimlib_write_to_fd(WIMStruct *wim, int fd,
3042 int image, int write_flags, unsigned num_threads)
3044 if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3045 return WIMLIB_ERR_INVALID_PARAM;
3048 return WIMLIB_ERR_INVALID_PARAM;
3050 write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR;
3052 return write_standalone_wim(wim, &fd, image, write_flags, num_threads);
3056 any_images_modified(WIMStruct *wim)
3058 for (int i = 0; i < wim->hdr.image_count; i++)
3059 if (wim->image_metadata[i]->modified)
3065 check_resource_offset(struct blob_descriptor *blob, void *_wim)
3067 const WIMStruct *wim = _wim;
3068 off_t end_offset = *(const off_t*)wim->private;
3070 if (blob->blob_location == BLOB_IN_WIM &&
3071 blob->rdesc->wim == wim &&
3072 blob->rdesc->offset_in_wim + blob->rdesc->size_in_wim > end_offset)
3073 return WIMLIB_ERR_RESOURCE_ORDER;
3077 /* Make sure no file or metadata resources are located after the XML data (or
3078 * integrity table if present)--- otherwise we can't safely overwrite the WIM in
3079 * place and we return WIMLIB_ERR_RESOURCE_ORDER. */
3081 check_resource_offsets(WIMStruct *wim, off_t end_offset)
3086 wim->private = &end_offset;
3087 ret = for_blob_in_table(wim->blob_table, check_resource_offset, wim);
3091 for (i = 0; i < wim->hdr.image_count; i++) {
3092 ret = check_resource_offset(wim->image_metadata[i]->metadata_blob, wim);
3100 * Overwrite a WIM, possibly appending new resources to it.
3102 * A WIM looks like (or is supposed to look like) the following:
3104 * Header (212 bytes)
3105 * Resources for metadata and files (variable size)
3106 * Blob table (variable size)
3107 * XML data (variable size)
3108 * Integrity table (optional) (variable size)
3110 * If we are not adding any new files or metadata, then the blob table is
3111 * unchanged--- so we only need to overwrite the XML data, integrity table, and
3112 * header. This operation is potentially unsafe if the program is abruptly
3113 * terminated while the XML data or integrity table are being overwritten, but
3114 * before the new header has been written. To partially alleviate this problem,
3115 * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to
3116 * finish_write() to cause a temporary WIM header to be written after the XML
3117 * data has been written. This may prevent the WIM from becoming corrupted if
3118 * the program is terminated while the integrity table is being calculated (but
3119 * no guarantees, due to write re-ordering...).
3121 * If we are adding new blobs, including new file data as well as any metadata
3122 * for any new images, then the blob table needs to be changed, and those blobs
3123 * need to be written. In this case, we try to perform a safe update of the WIM
3124 * file by writing the blobs *after* the end of the previous WIM, then writing
3125 * the new blob table, XML data, and (optionally) integrity table following the
3126 * new blobs. This will produce a layout like the following:
3128 * Header (212 bytes)
3129 * (OLD) Resources for metadata and files (variable size)
3130 * (OLD) Blob table (variable size)
3131 * (OLD) XML data (variable size)
3132 * (OLD) Integrity table (optional) (variable size)
3133 * (NEW) Resources for metadata and files (variable size)
3134 * (NEW) Blob table (variable size)
3135 * (NEW) XML data (variable size)
3136 * (NEW) Integrity table (optional) (variable size)
3138 * At all points, the WIM is valid as nothing points to the new data yet. Then,
3139 * the header is overwritten to point to the new blob table, XML data, and
3140 * integrity table, to produce the following layout:
3142 * Header (212 bytes)
3143 * Resources for metadata and files (variable size)
3144 * Nothing (variable size)
3145 * Resources for metadata and files (variable size)
3146 * Blob table (variable size)
3147 * XML data (variable size)
3148 * Integrity table (optional) (variable size)
3150 * This method allows an image to be appended to a large WIM very quickly, and
3151 * is crash-safe except in the case of write re-ordering, but the disadvantage
3152 * is that a small hole is left in the WIM where the old blob table, xml data,
3153 * and integrity table were. (These usually only take up a small amount of
3154 * space compared to the blobs, however.)
3157 overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads)
3161 u64 old_blob_table_end, old_xml_begin, old_xml_end;
3162 struct wim_header hdr_save;
3163 struct list_head blob_list;
3164 struct list_head blob_table_list;
3165 struct filter_context filter_ctx;
3167 DEBUG("Overwriting `%"TS"' in-place", wim->filename);
3169 /* Save original header so it can be restored in case of error */
3170 memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header));
3172 /* Set default integrity flag. */
3173 if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
3174 WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY)))
3175 if (wim_has_integrity_table(wim))
3176 write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY;
3178 /* Set WIM version if writing solid resources. */
3179 if (write_flags & WIMLIB_WRITE_FLAG_SOLID)
3180 wim->hdr.wim_version = WIM_VERSION_SOLID;
3182 /* Set additional flags for overwrite. */
3183 write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
3184 WIMLIB_WRITE_FLAG_STREAMS_OK;
3186 /* Make sure there is no data after the XML data, except possibily an
3187 * integrity table. If this were the case, then this data would be
3189 old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
3190 old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
3191 old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
3192 wim->hdr.blob_table_reshdr.size_in_wim;
3193 if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0 &&
3194 wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
3195 WARNING("Didn't expect the integrity table to be before the XML data");
3196 ret = WIMLIB_ERR_RESOURCE_ORDER;
3197 goto out_restore_memory_hdr;
3200 if (old_blob_table_end > old_xml_begin) {
3201 WARNING("Didn't expect the blob table to be after the XML data");
3202 ret = WIMLIB_ERR_RESOURCE_ORDER;
3203 goto out_restore_memory_hdr;
3206 /* Set @old_wim_end, which indicates the point beyond which we don't
3207 * allow any file and metadata resources to appear without returning
3208 * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
3209 * overwrite these resources). */
3210 if (!wim->image_deletion_occurred && !any_images_modified(wim)) {
3211 /* If no images have been modified and no images have been
3212 * deleted, a new blob table does not need to be written. We
3213 * shall write the new XML data and optional integrity table
3214 * immediately after the blob table. Note that this may
3215 * overwrite an existing integrity table. */
3216 DEBUG("Skipping writing blob table "
3217 "(no images modified or deleted)");
3218 old_wim_end = old_blob_table_end;
3219 write_flags |= WIMLIB_WRITE_FLAG_NO_BLOB_TABLE |
3220 WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
3221 } else if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0) {
3222 /* Old WIM has an integrity table; begin writing new blobs after
3224 old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
3225 wim->hdr.integrity_table_reshdr.size_in_wim;
3227 /* No existing integrity table; begin writing new blobs after
3228 * the old XML data. */
3229 old_wim_end = old_xml_end;
3232 ret = check_resource_offsets(wim, old_wim_end);
3234 goto out_restore_memory_hdr;
3236 ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3237 &blob_list, &blob_table_list,
3240 goto out_restore_memory_hdr;
3242 ret = open_wim_writable(wim, wim->filename, O_RDWR);
3244 goto out_restore_memory_hdr;
3246 ret = lock_wim_for_append(wim);
3250 /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */
3251 wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS;
3252 ret = write_wim_header_flags(wim->hdr.flags, &wim->out_fd);
3254 ERROR_WITH_ERRNO("Error updating WIM header flags");
3255 goto out_unlock_wim;
3258 if (filedes_seek(&wim->out_fd, old_wim_end) == -1) {
3259 ERROR_WITH_ERRNO("Can't seek to end of WIM");
3260 ret = WIMLIB_ERR_WRITE;
3261 goto out_restore_physical_hdr;
3264 ret = wim_write_blob_list(wim, &blob_list, write_flags,
3265 num_threads, &filter_ctx);
3269 ret = write_metadata_blobs(wim, WIMLIB_ALL_IMAGES, write_flags);
3273 ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags,
3278 unlock_wim_for_append(wim);
3282 if (!(write_flags & WIMLIB_WRITE_FLAG_NO_BLOB_TABLE)) {
3283 WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",
3284 wim->filename, old_wim_end);
3285 /* Return value of ftruncate() is ignored because this is
3286 * already an error path. */
3287 (void)ftruncate(wim->out_fd.fd, old_wim_end);
3289 out_restore_physical_hdr:
3290 (void)write_wim_header_flags(hdr_save.flags, &wim->out_fd);
3292 unlock_wim_for_append(wim);
3294 (void)close_wim_writable(wim, write_flags);
3295 out_restore_memory_hdr:
3296 memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
3301 overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, unsigned num_threads)
3303 size_t wim_name_len;
3306 DEBUG("Overwriting `%"TS"' via a temporary file", wim->filename);
3308 /* Write the WIM to a temporary file in the same directory as the
3310 wim_name_len = tstrlen(wim->filename);
3311 tchar tmpfile[wim_name_len + 10];
3312 tmemcpy(tmpfile, wim->filename, wim_name_len);
3313 randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
3314 tmpfile[wim_name_len + 9] = T('\0');
3316 ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
3318 WIMLIB_WRITE_FLAG_FSYNC |
3319 WIMLIB_WRITE_FLAG_RETAIN_GUID,
3326 if (filedes_valid(&wim->in_fd)) {
3327 filedes_close(&wim->in_fd);
3328 filedes_invalidate(&wim->in_fd);
3331 /* Rename the new WIM file to the original WIM file. Note: on Windows
3332 * this actually calls win32_rename_replacement(), not _wrename(), so
3333 * that removing the existing destination file can be handled. */
3334 DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, wim->filename);
3335 ret = trename(tmpfile, wim->filename);
3337 ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
3338 tmpfile, wim->filename);
3345 return WIMLIB_ERR_RENAME;
3348 union wimlib_progress_info progress;
3349 progress.rename.from = tmpfile;
3350 progress.rename.to = wim->filename;
3351 return call_progress(wim->progfunc, WIMLIB_PROGRESS_MSG_RENAME,
3352 &progress, wim->progctx);
3355 /* Determine if the specified WIM file may be updated by appending in-place
3356 * rather than writing and replacing it with an entirely new file. */
3358 can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags)
3360 /* REBUILD flag forces full rebuild. */
3361 if (write_flags & WIMLIB_WRITE_FLAG_REBUILD)
3364 /* Image deletions cause full rebuild by default. */
3365 if (wim->image_deletion_occurred &&
3366 !(write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
3369 /* Pipable WIMs cannot be updated in place, nor can a non-pipable WIM be
3370 * turned into a pipable WIM in-place. */
3371 if (wim_is_pipable(wim) || (write_flags & WIMLIB_WRITE_FLAG_PIPABLE))
3374 /* The default compression type and compression chunk size selected for
3375 * the output WIM must be the same as those currently used for the WIM.
3377 if (wim->compression_type != wim->out_compression_type)
3379 if (wim->chunk_size != wim->out_chunk_size)
3385 /* API function documented in wimlib.h */
3387 wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads)
3392 if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC)
3393 return WIMLIB_ERR_INVALID_PARAM;
3396 return WIMLIB_ERR_NO_FILENAME;
3398 orig_hdr_flags = wim->hdr.flags;
3399 if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
3400 wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
3401 ret = can_modify_wim(wim);
3402 wim->hdr.flags = orig_hdr_flags;
3406 if (can_overwrite_wim_inplace(wim, write_flags)) {
3407 ret = overwrite_wim_inplace(wim, write_flags, num_threads);
3408 if (ret != WIMLIB_ERR_RESOURCE_ORDER)
3410 WARNING("Falling back to re-building entire WIM");
3412 return overwrite_wim_via_tmpfile(wim, write_flags, num_threads);