X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Fwrite.c;h=c1d56de3001d1a37d166d22ec6daa9ee6dce163e;hp=4ae01c6e36abe00f710ccf00859e5d9573052e6e;hb=c86d5f74e13e5aeff57aff84f7d65264607a29ac;hpb=7cfb9777313e1a2f60a49d6b9ef87910f27f1a51 diff --git a/src/write.c b/src/write.c index 4ae01c6e..c1d56de3 100644 --- a/src/write.c +++ b/src/write.c @@ -6,22 +6,20 @@ */ /* - * Copyright (C) 2012, 2013 Eric Biggers + * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers * - * This file is part of wimlib, a library for working with WIM files. + * This file is free software; you can redistribute it and/or modify it under + * the terms of the GNU Lesser General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) any + * later version. * - * wimlib is free software; you can redistribute it and/or modify it under the - * terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. - * - * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR - * A PARTICULAR PURPOSE. See the GNU General Public License for more + * This file is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. * - * You should have received a copy of the GNU General Public License - * along with wimlib; if not, see http://www.gnu.org/licenses/. + * You should have received a copy of the GNU Lesser General Public License + * along with this file; if not, see http://www.gnu.org/licenses/. */ #ifdef HAVE_CONFIG_H @@ -34,1801 +32,1703 @@ # include #endif +#include +#include +#include +#include + +#include "wimlib/alloca.h" +#include "wimlib/assert.h" +#include "wimlib/blob_table.h" +#include "wimlib/chunk_compressor.h" #include "wimlib/endianness.h" #include "wimlib/error.h" #include "wimlib/file_io.h" #include "wimlib/header.h" +#include "wimlib/inode.h" #include "wimlib/integrity.h" -#include "wimlib/lookup_table.h" #include "wimlib/metadata.h" +#include "wimlib/paths.h" +#include "wimlib/progress.h" #include "wimlib/resource.h" +#include "wimlib/solid.h" +#include "wimlib/win32.h" /* win32_rename_replacement() */ #include "wimlib/write.h" #include "wimlib/xml.h" -#ifdef __WIN32__ -# include "wimlib/win32.h" /* win32_get_number_of_processors() */ -#endif -#ifdef ENABLE_MULTITHREADED_COMPRESSION -# include -#endif +/* wimlib internal flags used when writing resources. */ +#define WRITE_RESOURCE_FLAG_RECOMPRESS 0x00000001 +#define WRITE_RESOURCE_FLAG_PIPABLE 0x00000002 +#define WRITE_RESOURCE_FLAG_SOLID 0x00000004 +#define WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE 0x00000008 +#define WRITE_RESOURCE_FLAG_SOLID_SORT 0x00000010 -#include -#include -#include -#include -#include +static int +write_flags_to_resource_flags(int write_flags) +{ + int write_resource_flags = 0; -#ifdef HAVE_ALLOCA_H -# include -#endif + if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) + write_resource_flags |= WRITE_RESOURCE_FLAG_RECOMPRESS; + if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE) + write_resource_flags |= WRITE_RESOURCE_FLAG_PIPABLE; -#ifndef __WIN32__ -# include /* for `struct iovec' */ -#endif + if (write_flags & WIMLIB_WRITE_FLAG_SOLID) + write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID; + + if (write_flags & WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES) + write_resource_flags |= WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE; + + if ((write_flags & (WIMLIB_WRITE_FLAG_SOLID | + WIMLIB_WRITE_FLAG_NO_SOLID_SORT)) == + WIMLIB_WRITE_FLAG_SOLID) + write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID_SORT; + + return write_resource_flags; +} + +struct filter_context { + int write_flags; + WIMStruct *wim; +}; + +/* + * Determine whether the specified blob should be filtered out from the write. + * + * Return values: + * + * < 0 : The blob should be hard-filtered; that is, not included in the output + * WIM file at all. + * 0 : The blob should not be filtered out. + * > 0 : The blob should be soft-filtered; that is, it already exists in the + * WIM file and may not need to be written again. + */ +static int +blob_filtered(const struct blob_descriptor *blob, + const struct filter_context *ctx) +{ + int write_flags; + WIMStruct *wim; + + if (ctx == NULL) + return 0; + + write_flags = ctx->write_flags; + wim = ctx->wim; + + if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE && + blob->blob_location == BLOB_IN_WIM && + blob->rdesc->wim == wim) + return 1; + + if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS && + blob->blob_location == BLOB_IN_WIM && + blob->rdesc->wim != wim) + return -1; + + return 0; +} + +static bool +blob_hard_filtered(const struct blob_descriptor *blob, + struct filter_context *ctx) +{ + return blob_filtered(blob, ctx) < 0; +} + +static inline int +may_soft_filter_blobs(const struct filter_context *ctx) +{ + if (ctx == NULL) + return 0; + return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE; +} + +static inline int +may_hard_filter_blobs(const struct filter_context *ctx) +{ + if (ctx == NULL) + return 0; + return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS; +} + +static inline int +may_filter_blobs(const struct filter_context *ctx) +{ + return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx)); +} /* Return true if the specified resource is compressed and the compressed data * can be reused with the specified output parameters. */ static bool -can_raw_copy(const struct wim_lookup_table_entry *lte, +can_raw_copy(const struct blob_descriptor *blob, int write_resource_flags, int out_ctype, u32 out_chunk_size) { - if (lte->resource_location != RESOURCE_IN_WIM) - return false; - if (lte->rspec->flags & WIM_RESHDR_FLAG_PACKED_STREAMS) + const struct wim_resource_descriptor *rdesc; + + if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS) return false; + if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) return false; - if (lte->rspec->wim->compression_type != out_ctype) - return false; - if (lte->rspec->wim->chunk_size != out_chunk_size) + + if (blob->blob_location != BLOB_IN_WIM) return false; - return true; -} + rdesc = blob->rdesc; -/* Return true if the specified resource must be recompressed when the specified - * output parameters are used. */ -static bool -must_compress_stream(const struct wim_lookup_table_entry *lte, - int write_resource_flags, int out_ctype, u32 out_chunk_size) -{ - return (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE - && ((write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS) - || !can_raw_copy(lte, write_resource_flags, - out_ctype, out_chunk_size))); -} - -static unsigned -compress_chunk(const void * uncompressed_data, - unsigned uncompressed_len, - void *compressed_data, - int out_ctype, - struct wimlib_lzx_context *comp_ctx) -{ - switch (out_ctype) { - case WIMLIB_COMPRESSION_TYPE_XPRESS: - return wimlib_xpress_compress(uncompressed_data, - uncompressed_len, - compressed_data); - case WIMLIB_COMPRESSION_TYPE_LZX: - return wimlib_lzx_compress2(uncompressed_data, - uncompressed_len, - compressed_data, - comp_ctx); - case WIMLIB_COMPRESSION_TYPE_LZMS: - /* TODO */ - WARNING("LZMS compression not yet implemented!"); - return 0; + if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) + return false; - default: - wimlib_assert(0); - return 0; + if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) { + /* Normal compressed resource: Must use same compression type + * and chunk size. */ + return (rdesc->compression_type == out_ctype && + rdesc->chunk_size == out_chunk_size); } -} -/* Chunk table that's located at the beginning of each compressed resource in - * the WIM. (This is not the on-disk format; the on-disk format just has an - * array of offsets.) */ -struct chunk_table { - u64 original_resource_size; - u64 num_chunks; - u64 table_disk_size; - unsigned bytes_per_chunk_entry; - void *cur_offset_p; - union { - u32 cur_offset_u32; - u64 cur_offset_u64; - }; - /* Beginning of chunk offsets, in either 32-bit or 64-bit little endian - * integers, including the first offset of 0, which will not be written. - * */ - u8 offsets[] _aligned_attribute(8); -}; + if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) && + (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)) + { + /* Solid resource: Such resources may contain multiple blobs, + * and in general only a subset of them need to be written. As + * a heuristic, re-use the raw data if more than two-thirds the + * uncompressed size is being written. */ -/* Allocate and initializes a chunk table, then reserve space for it in the - * output file unless writing a pipable resource. */ -static int -begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte, - struct filedes *out_fd, - u32 out_chunk_size, - struct chunk_table **chunk_tab_ret, - int resource_flags) -{ - u64 size; - u64 num_chunks; - unsigned bytes_per_chunk_entry; - size_t alloc_size; - struct chunk_table *chunk_tab; - int ret; + /* Note: solid resources contain a header that specifies the + * compression type and chunk size; therefore we don't need to + * check if they are compatible with @out_ctype and + * @out_chunk_size. */ - size = lte->size; - num_chunks = DIV_ROUND_UP(size, out_chunk_size); - bytes_per_chunk_entry = (size > (1ULL << 32)) ? 8 : 4; - alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64); - chunk_tab = CALLOC(1, alloc_size); + struct blob_descriptor *res_blob; + u64 write_size = 0; - if (!chunk_tab) { - ERROR("Failed to allocate chunk table for %"PRIu64" byte " - "resource", size); - return WIMLIB_ERR_NOMEM; - } - chunk_tab->num_chunks = num_chunks; - chunk_tab->original_resource_size = size; - chunk_tab->bytes_per_chunk_entry = bytes_per_chunk_entry; - chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry * - (num_chunks - 1); - chunk_tab->cur_offset_p = chunk_tab->offsets; + list_for_each_entry(res_blob, &rdesc->blob_list, rdesc_node) + if (res_blob->will_be_in_output_wim) + write_size += res_blob->size; - /* We don't know the correct offsets yet; so just write zeroes to - * reserve space for the table, so we can go back to it later after - * we've written the compressed chunks following it. - * - * Special case: if writing a pipable WIM, compressed resources are in a - * modified format (see comment above write_pipable_wim()) and do not - * have a chunk table at the beginning, so don't reserve any space for - * one. */ - if (!(resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) { - ret = full_write(out_fd, chunk_tab->offsets, - chunk_tab->table_disk_size); - if (ret) { - ERROR_WITH_ERRNO("Failed to write chunk table in compressed " - "file resource"); - FREE(chunk_tab); - return ret; - } + return (write_size > rdesc->uncompressed_size * 2 / 3); } - *chunk_tab_ret = chunk_tab; - return 0; + + return false; +} + +static u32 +reshdr_flags_for_blob(const struct blob_descriptor *blob) +{ + u32 reshdr_flags = 0; + if (blob->is_metadata) + reshdr_flags |= WIM_RESHDR_FLAG_METADATA; + return reshdr_flags; } -/* Add the offset for the next chunk to the chunk table being constructed for a - * compressed stream. */ static void -chunk_tab_record_chunk(struct chunk_table *chunk_tab, unsigned out_chunk_size) +blob_set_out_reshdr_for_reuse(struct blob_descriptor *blob) { - if (chunk_tab->bytes_per_chunk_entry == 4) { - *(le32*)chunk_tab->cur_offset_p = cpu_to_le32(chunk_tab->cur_offset_u32); - chunk_tab->cur_offset_p = (le32*)chunk_tab->cur_offset_p + 1; - chunk_tab->cur_offset_u32 += out_chunk_size; + const struct wim_resource_descriptor *rdesc; + + wimlib_assert(blob->blob_location == BLOB_IN_WIM); + rdesc = blob->rdesc; + + if (rdesc->flags & WIM_RESHDR_FLAG_SOLID) { + blob->out_reshdr.offset_in_wim = blob->offset_in_res; + blob->out_reshdr.uncompressed_size = 0; + blob->out_reshdr.size_in_wim = blob->size; + + blob->out_res_offset_in_wim = rdesc->offset_in_wim; + blob->out_res_size_in_wim = rdesc->size_in_wim; + blob->out_res_uncompressed_size = rdesc->uncompressed_size; } else { - *(le64*)chunk_tab->cur_offset_p = cpu_to_le64(chunk_tab->cur_offset_u64); - chunk_tab->cur_offset_p = (le64*)chunk_tab->cur_offset_p + 1; - chunk_tab->cur_offset_u64 += out_chunk_size; + blob->out_reshdr.offset_in_wim = rdesc->offset_in_wim; + blob->out_reshdr.uncompressed_size = rdesc->uncompressed_size; + blob->out_reshdr.size_in_wim = rdesc->size_in_wim; } + blob->out_reshdr.flags = rdesc->flags; } -/* Finishes a WIM chunk table and writes it to the output file at the correct - * offset. */ + +/* Write the header for a blob in a pipable WIM. */ static int -finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab, - struct filedes *out_fd, - off_t res_start_offset, - int write_resource_flags) +write_pwm_blob_header(const struct blob_descriptor *blob, + struct filedes *out_fd, bool compressed) { + struct pwm_blob_hdr blob_hdr; + u32 reshdr_flags; int ret; - if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) { - ret = full_write(out_fd, - chunk_tab->offsets + - chunk_tab->bytes_per_chunk_entry, - chunk_tab->table_disk_size); - } else { - ret = full_pwrite(out_fd, - chunk_tab->offsets + - chunk_tab->bytes_per_chunk_entry, - chunk_tab->table_disk_size, - res_start_offset); - } + wimlib_assert(!blob->unhashed); + + blob_hdr.magic = cpu_to_le64(PWM_BLOB_MAGIC); + blob_hdr.uncompressed_size = cpu_to_le64(blob->size); + copy_hash(blob_hdr.hash, blob->hash); + reshdr_flags = reshdr_flags_for_blob(blob); + if (compressed) + reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED; + blob_hdr.flags = cpu_to_le32(reshdr_flags); + ret = full_write(out_fd, &blob_hdr, sizeof(blob_hdr)); if (ret) ERROR_WITH_ERRNO("Write error"); return ret; } -/* Write the header for a stream in a pipable WIM. - */ +struct write_blobs_progress_data { + wimlib_progress_func_t progfunc; + void *progctx; + union wimlib_progress_info progress; + u64 next_progress; +}; + static int -write_pwm_stream_header(const struct wim_lookup_table_entry *lte, - struct filedes *out_fd, - int additional_reshdr_flags) +do_write_blobs_progress(struct write_blobs_progress_data *progress_data, + u64 complete_size, u32 complete_count, bool discarded) { - struct pwm_stream_hdr stream_hdr; - u32 reshdr_flags; + union wimlib_progress_info *progress = &progress_data->progress; int ret; - stream_hdr.magic = PWM_STREAM_MAGIC; - stream_hdr.uncompressed_size = cpu_to_le64(lte->size); - if (additional_reshdr_flags & PWM_RESHDR_FLAG_UNHASHED) { - zero_out_hash(stream_hdr.hash); + if (discarded) { + progress->write_streams.total_bytes -= complete_size; + progress->write_streams.total_streams -= complete_count; + if (progress_data->next_progress != ~(u64)0 && + progress_data->next_progress > progress->write_streams.total_bytes) + { + progress_data->next_progress = progress->write_streams.total_bytes; + } } else { - wimlib_assert(!lte->unhashed); - copy_hash(stream_hdr.hash, lte->hash); + progress->write_streams.completed_bytes += complete_size; + progress->write_streams.completed_streams += complete_count; } - reshdr_flags = lte->flags & ~(WIM_RESHDR_FLAG_COMPRESSED | WIM_RESHDR_FLAG_PACKED_STREAMS); - reshdr_flags |= additional_reshdr_flags; - stream_hdr.flags = cpu_to_le32(reshdr_flags); - ret = full_write(out_fd, &stream_hdr, sizeof(stream_hdr)); - if (ret) - ERROR_WITH_ERRNO("Error writing stream header"); - return ret; -} + if (progress->write_streams.completed_bytes >= progress_data->next_progress) { -static int -seek_and_truncate(struct filedes *out_fd, off_t offset) -{ - if (filedes_seek(out_fd, offset) == -1 || - ftruncate(out_fd->fd, offset)) - { - ERROR_WITH_ERRNO("Failed to truncate output WIM file"); - return WIMLIB_ERR_WRITE; - } - return 0; -} + ret = call_progress(progress_data->progfunc, + WIMLIB_PROGRESS_MSG_WRITE_STREAMS, + progress, + progress_data->progctx); + if (ret) + return ret; -static int -finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte) -{ - u8 md[SHA1_HASH_SIZE]; - - sha1_final(md, sha_ctx); - if (lte->unhashed) { - copy_hash(lte->hash, md); - } else if (!hashes_equal(md, lte->hash)) { - ERROR("WIM resource has incorrect hash!"); - if (lte_filename_valid(lte)) { - ERROR("We were reading it from \"%"TS"\"; maybe " - "it changed while we were reading it.", - lte->file_on_disk); - } - return WIMLIB_ERR_INVALID_RESOURCE_HASH; + set_next_progress(progress->write_streams.completed_bytes, + progress->write_streams.total_bytes, + &progress_data->next_progress); } return 0; } -struct write_resource_ctx { +struct write_blobs_ctx { + /* File descriptor to which the blobs are being written. */ + struct filedes *out_fd; + + /* Blob table for the WIMStruct on whose behalf the blobs are being + * written. */ + struct blob_table *blob_table; + + /* Compression format to use. */ int out_ctype; + + /* Maximum uncompressed chunk size in compressed resources to use. */ u32 out_chunk_size; - struct wimlib_lzx_context *comp_ctx; - struct chunk_table *chunk_tab; - struct filedes *out_fd; - SHA_CTX sha_ctx; - bool doing_sha; - int resource_flags; -}; -static int -write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx) -{ - struct write_resource_ctx *ctx = _ctx; - const void *out_chunk; - unsigned out_chunk_size; - int ret; - void *compressed_chunk = NULL; - unsigned compressed_size; - bool compressed_chunk_malloced = false; + /* Flags that affect how the blobs will be written. */ + int write_resource_flags; - if (ctx->doing_sha) - sha1_update(&ctx->sha_ctx, chunk, chunk_size); + /* Data used for issuing WRITE_STREAMS progress. */ + struct write_blobs_progress_data progress_data; - out_chunk = chunk; - out_chunk_size = chunk_size; - if (ctx->out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) { + struct filter_context *filter_ctx; - /* Compress the chunk. */ - if (chunk_size <= STACK_MAX) { - compressed_chunk = alloca(chunk_size); - } else { - compressed_chunk = MALLOC(chunk_size); - if (compressed_chunk == NULL) - return WIMLIB_ERR_NOMEM; - compressed_chunk_malloced = true; - } + /* Upper bound on the total number of bytes that need to be compressed. + * */ + u64 num_bytes_to_compress; - compressed_size = compress_chunk(chunk, chunk_size, - compressed_chunk, - ctx->out_ctype, - ctx->comp_ctx); - /* Use compressed data if compression to less than input size - * was successful. */ - if (compressed_size) { - out_chunk = compressed_chunk; - out_chunk_size = compressed_size; - } - } + /* Pointer to the chunk_compressor implementation being used for + * compressing chunks of data, or NULL if chunks are being written + * uncompressed. */ + struct chunk_compressor *compressor; - if (ctx->chunk_tab) { - /* Update chunk table accounting. */ - chunk_tab_record_chunk(ctx->chunk_tab, out_chunk_size); + /* A buffer of size @out_chunk_size that has been loaned out from the + * chunk compressor and is currently being filled with the uncompressed + * data of the next chunk. */ + u8 *cur_chunk_buf; - /* If writing compressed chunks to a pipable WIM, before the - * chunk data write a chunk header that provides the compressed - * chunk size. */ - if (ctx->resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) { - struct pwm_chunk_hdr chunk_hdr = { - .compressed_size = cpu_to_le32(out_chunk_size), - }; - ret = full_write(ctx->out_fd, &chunk_hdr, - sizeof(chunk_hdr)); - if (ret) - goto error; - } - } + /* Number of bytes in @cur_chunk_buf that are currently filled. */ + size_t cur_chunk_buf_filled; - /* Write the chunk data. */ - ret = full_write(ctx->out_fd, out_chunk, out_chunk_size); - if (ret) - goto error; + /* List of blobs that currently have chunks being compressed. */ + struct list_head blobs_being_compressed; -out_free_memory: - if (compressed_chunk_malloced) - FREE(compressed_chunk); - return ret; + /* List of blobs in the solid resource. Blobs are moved here after + * @blobs_being_compressed only when writing a solid resource. */ + struct list_head blobs_in_solid_resource; -error: - ERROR_WITH_ERRNO("Failed to write WIM resource chunk"); - goto out_free_memory; -} + /* Current uncompressed offset in the blob being read. */ + u64 cur_read_blob_offset; -/* - * write_wim_resource()- - * - * Write a resource to an output WIM. - * - * @lte: - * Lookup table entry for the resource, which could be in another WIM, in - * an external file, or in another location. - * - * @out_fd: - * File descriptor opened to the output WIM. - * - * @out_ctype: - * One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate which - * compression algorithm to use. - * - * @out_chunk_size: - * Compressed chunk size to use. - * - * @out_reshdr: - * On success, this is filled in with the offset, flags, compressed size, - * and uncompressed size of the resource in the output WIM. - * - * @resource_flags: - * * WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed even - * if it could otherwise be copied directly from the input; - * * WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE if writing a resource for a pipable WIM - * (and the output file descriptor may be a pipe). - * - * @comp_ctx: - * Location of LZX compression context pointer, which will be allocated or - * updated if needed. (Initialize to NULL.) - * - * Additional notes: The SHA1 message digest of the uncompressed data is - * calculated (except when doing a raw copy --- see below). If the @unhashed - * flag is set on the lookup table entry, this message digest is simply copied - * to it; otherwise, the message digest is compared with the existing one, and - * this function will fail if they do not match. - */ + /* Uncompressed size of the blob currently being read. */ + u64 cur_read_blob_size; + + /* Current uncompressed offset in the blob being written. */ + u64 cur_write_blob_offset; + + /* Uncompressed size of resource currently being written. */ + u64 cur_write_res_size; + + /* Array that is filled in with compressed chunk sizes as a resource is + * being written. */ + u64 *chunk_csizes; + + /* Index of next entry in @chunk_csizes to fill in. */ + size_t chunk_index; + + /* Number of entries in @chunk_csizes currently allocated. */ + size_t num_alloc_chunks; + + /* Offset in the output file of the start of the chunks of the resource + * currently being written. */ + u64 chunks_start_offset; +}; + +/* Reserve space for the chunk table and prepare to accumulate the chunk table + * in memory. */ static int -write_wim_resource(struct wim_lookup_table_entry *lte, - struct filedes *out_fd, int out_ctype, - u32 out_chunk_size, - struct wim_reshdr *out_reshdr, - int resource_flags, - struct wimlib_lzx_context **comp_ctx) -{ - struct write_resource_ctx write_ctx; - off_t res_start_offset; - u32 in_chunk_size; - u64 read_size; +begin_chunk_table(struct write_blobs_ctx *ctx, u64 res_expected_size) +{ + u64 expected_num_chunks; + u64 expected_num_chunk_entries; + size_t reserve_size; int ret; - /* Mask out any irrelevant flags, since this function also uses this - * variable to store WIMLIB_READ_RESOURCE flags. */ - resource_flags &= WIMLIB_WRITE_RESOURCE_MASK; - - /* Get current position in output WIM. */ - res_start_offset = out_fd->offset; - - /* If we are not forcing the data to be recompressed, and the input - * resource is located in a WIM with a compression mode compatible with - * the output, we can simply copy the compressed data without - * recompressing it. This also means we must skip calculating the SHA1, - * as we never will see the uncompressed data. */ - if (can_raw_copy(lte, resource_flags, out_ctype, out_chunk_size)) { - /* Normally, for raw copies we can request a RAW_FULL read, but - * if we're reading from a pipable resource and writing a - * non-pipable resource or vice versa, then a RAW_CHUNKS read - * needs to be requested so that the written resource can be - * appropriately formatted. However, in neither case is any - * actual decompression needed. */ - if (lte->rspec->is_pipable == !!(resource_flags & - WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) - { - resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_FULL; - read_size = lte->rspec->size_in_wim; - } else { - resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS; - read_size = lte->size; + /* Calculate the number of chunks and chunk entries that should be + * needed for the resource. These normally will be the final values, + * but in SOLID mode some of the blobs we're planning to write into the + * resource may be duplicates, and therefore discarded, potentially + * decreasing the number of chunk entries needed. */ + expected_num_chunks = DIV_ROUND_UP(res_expected_size, ctx->out_chunk_size); + expected_num_chunk_entries = expected_num_chunks; + if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)) + expected_num_chunk_entries--; + + /* Make sure the chunk_csizes array is long enough to store the + * compressed size of each chunk. */ + if (expected_num_chunks > ctx->num_alloc_chunks) { + u64 new_length = expected_num_chunks + 50; + + if ((size_t)new_length != new_length) { + ERROR("Resource size too large (%"PRIu64" bytes!", + res_expected_size); + return WIMLIB_ERR_NOMEM; } - write_ctx.doing_sha = false; - } else { - write_ctx.doing_sha = true; - sha1_init(&write_ctx.sha_ctx); - read_size = lte->size; - } - /* Set the output compression mode and initialize chunk table if needed. - */ - write_ctx.out_ctype = WIMLIB_COMPRESSION_TYPE_NONE; - write_ctx.out_chunk_size = out_chunk_size; - write_ctx.chunk_tab = NULL; - if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) { - wimlib_assert(out_chunk_size > 0); - if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW)) { - /* Compression needed. */ - write_ctx.out_ctype = out_ctype; - if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) { - ret = wimlib_lzx_alloc_context(out_chunk_size, - NULL, comp_ctx); - if (ret) - goto out; - } - write_ctx.comp_ctx = *comp_ctx; - } - if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW_FULL)) { - /* Chunk table needed. */ - ret = begin_wim_resource_chunk_tab(lte, out_fd, - out_chunk_size, - &write_ctx.chunk_tab, - resource_flags); - if (ret) - goto out; + FREE(ctx->chunk_csizes); + ctx->chunk_csizes = MALLOC(new_length * sizeof(ctx->chunk_csizes[0])); + if (ctx->chunk_csizes == NULL) { + ctx->num_alloc_chunks = 0; + return WIMLIB_ERR_NOMEM; } - } - - /* If writing a pipable resource, write the stream header and update - * @res_start_offset to be the end of the stream header. */ - if (resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) { - int reshdr_flags = 0; - if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) - reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED; - ret = write_pwm_stream_header(lte, out_fd, reshdr_flags); + ctx->num_alloc_chunks = new_length; + } + + ctx->chunk_index = 0; + + if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) { + /* Reserve space for the chunk table in the output file. In the + * case of solid resources this reserves the upper bound for the + * needed space, not necessarily the exact space which will + * prove to be needed. At this point, we just use @chunk_csizes + * for a buffer of 0's because the actual compressed chunk sizes + * are unknown. */ + reserve_size = expected_num_chunk_entries * + get_chunk_entry_size(res_expected_size, + 0 != (ctx->write_resource_flags & + WRITE_RESOURCE_FLAG_SOLID)); + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) + reserve_size += sizeof(struct alt_chunk_table_header_disk); + memset(ctx->chunk_csizes, 0, reserve_size); + ret = full_write(ctx->out_fd, ctx->chunk_csizes, reserve_size); if (ret) - goto out_free_chunk_tab; - res_start_offset = out_fd->offset; + return ret; } + return 0; +} - /* Write the entire resource by reading the entire resource and feeding - * the data through write_resource_cb(). */ - write_ctx.out_fd = out_fd; - write_ctx.resource_flags = resource_flags; -try_write_again: - if (write_ctx.out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) - in_chunk_size = 0; - else - in_chunk_size = out_chunk_size; - ret = read_stream_prefix(lte, read_size, write_resource_cb, - &write_ctx, resource_flags); - if (ret) - goto out_free_chunk_tab; +static int +begin_write_resource(struct write_blobs_ctx *ctx, u64 res_expected_size) +{ + int ret; - /* Verify SHA1 message digest of the resource, or set the hash for the - * first time. */ - if (write_ctx.doing_sha) { - ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte); - if (ret) - goto out_free_chunk_tab; - } + wimlib_assert(res_expected_size != 0); - /* Write chunk table if needed. */ - if (write_ctx.chunk_tab) { - ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, - out_fd, - res_start_offset, - resource_flags); + if (ctx->compressor != NULL) { + ret = begin_chunk_table(ctx, res_expected_size); if (ret) - goto out_free_chunk_tab; + return ret; } - /* Fill in out_reshdr with information about the newly written - * resource. */ - out_reshdr->size_in_wim = out_fd->offset - res_start_offset; - out_reshdr->flags = lte->flags & ~WIM_RESHDR_FLAG_PACKED_STREAMS; - if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) - out_reshdr->flags &= ~WIM_RESHDR_FLAG_COMPRESSED; - else - out_reshdr->flags |= WIM_RESHDR_FLAG_COMPRESSED; - out_reshdr->offset_in_wim = res_start_offset; - out_reshdr->uncompressed_size = lte->size; - - /* Check for resources compressed to greater than their original size - * and write them uncompressed instead. (But never do this if writing - * to a pipe, and don't bother if we did a raw copy.) */ - if (out_reshdr->size_in_wim > out_reshdr->uncompressed_size && - !(resource_flags & (WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE | - WIMLIB_READ_RESOURCE_FLAG_RAW))) - { - DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; " - "writing uncompressed instead", - out_reshdr->uncompressed_size, out_reshdr->size_in_wim); - ret = seek_and_truncate(out_fd, res_start_offset); - if (ret) - goto out_free_chunk_tab; - out_ctype = WIMLIB_COMPRESSION_TYPE_NONE; - FREE(write_ctx.chunk_tab); - write_ctx.out_ctype = WIMLIB_COMPRESSION_TYPE_NONE; - write_ctx.chunk_tab = NULL; - write_ctx.doing_sha = false; - goto try_write_again; - } - if (resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW) { - DEBUG("Copied raw compressed data " - "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)", - out_reshdr->uncompressed_size, out_reshdr->size_in_wim, - out_reshdr->offset_in_wim, out_reshdr->flags); - } else if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) { - DEBUG("Wrote compressed resource " - "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)", - out_reshdr->uncompressed_size, out_reshdr->size_in_wim, - out_reshdr->offset_in_wim, out_reshdr->flags); - } else { - DEBUG("Wrote uncompressed resource " - "(%"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)", - out_reshdr->uncompressed_size, - out_reshdr->offset_in_wim, out_reshdr->flags); - } - ret = 0; -out_free_chunk_tab: - FREE(write_ctx.chunk_tab); -out: - return ret; + /* Output file descriptor is now positioned at the offset at which to + * write the first chunk of the resource. */ + ctx->chunks_start_offset = ctx->out_fd->offset; + ctx->cur_write_blob_offset = 0; + ctx->cur_write_res_size = res_expected_size; + return 0; } -/* Like write_wim_resource(), but the resource is specified by a buffer of - * uncompressed data rather a lookup table entry. Also writes the SHA1 message - * digest of the buffer to @hash_ret if it is non-NULL. */ -int -write_wim_resource_from_buffer(const void *buf, size_t buf_size, - int reshdr_flags, struct filedes *out_fd, - int out_ctype, - u32 out_chunk_size, - struct wim_reshdr *out_reshdr, - u8 *hash_ret, int write_resource_flags, - struct wimlib_lzx_context **comp_ctx) +static int +end_chunk_table(struct write_blobs_ctx *ctx, u64 res_actual_size, + u64 *res_start_offset_ret, u64 *res_store_size_ret) { + size_t actual_num_chunks; + size_t actual_num_chunk_entries; + size_t chunk_entry_size; int ret; - struct wim_lookup_table_entry *lte; - /* Set up a temporary lookup table entry to provide to - * write_wim_resource(). */ + actual_num_chunks = ctx->chunk_index; + actual_num_chunk_entries = actual_num_chunks; + if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)) + actual_num_chunk_entries--; - lte = new_lookup_table_entry(); - if (lte == NULL) - return WIMLIB_ERR_NOMEM; + chunk_entry_size = get_chunk_entry_size(res_actual_size, + 0 != (ctx->write_resource_flags & + WRITE_RESOURCE_FLAG_SOLID)); + + typedef le64 _may_alias_attribute aliased_le64_t; + typedef le32 _may_alias_attribute aliased_le32_t; - lte->resource_location = RESOURCE_IN_ATTACHED_BUFFER; - lte->attached_buffer = (void*)buf; - lte->size = buf_size; - lte->flags = reshdr_flags; + if (chunk_entry_size == 4) { + aliased_le32_t *entries = (aliased_le32_t*)ctx->chunk_csizes; - if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) { - sha1_buffer(buf, buf_size, lte->hash); - lte->unhashed = 0; + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + for (size_t i = 0; i < actual_num_chunk_entries; i++) + entries[i] = cpu_to_le32(ctx->chunk_csizes[i]); + } else { + u32 offset = ctx->chunk_csizes[0]; + for (size_t i = 0; i < actual_num_chunk_entries; i++) { + u32 next_size = ctx->chunk_csizes[i + 1]; + entries[i] = cpu_to_le32(offset); + offset += next_size; + } + } } else { - lte->unhashed = 1; + aliased_le64_t *entries = (aliased_le64_t*)ctx->chunk_csizes; + + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + for (size_t i = 0; i < actual_num_chunk_entries; i++) + entries[i] = cpu_to_le64(ctx->chunk_csizes[i]); + } else { + u64 offset = ctx->chunk_csizes[0]; + for (size_t i = 0; i < actual_num_chunk_entries; i++) { + u64 next_size = ctx->chunk_csizes[i + 1]; + entries[i] = cpu_to_le64(offset); + offset += next_size; + } + } } - ret = write_wim_resource(lte, out_fd, out_ctype, out_chunk_size, - out_reshdr, write_resource_flags, comp_ctx); - if (ret) - goto out_free_lte; - if (hash_ret) - copy_hash(hash_ret, lte->hash); - ret = 0; -out_free_lte: - lte->resource_location = RESOURCE_NONEXISTENT; - free_lookup_table_entry(lte); - return ret; -} + size_t chunk_table_size = actual_num_chunk_entries * chunk_entry_size; + u64 res_start_offset; + u64 res_end_offset; + + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) { + ret = full_write(ctx->out_fd, ctx->chunk_csizes, chunk_table_size); + if (ret) + goto write_error; + res_end_offset = ctx->out_fd->offset; + res_start_offset = ctx->chunks_start_offset; + } else { + res_end_offset = ctx->out_fd->offset; + u64 chunk_table_offset; -#ifdef ENABLE_MULTITHREADED_COMPRESSION + chunk_table_offset = ctx->chunks_start_offset - chunk_table_size; -/* Blocking shared queue (solves the producer-consumer problem) */ -struct shared_queue { - unsigned size; - unsigned front; - unsigned back; - unsigned filled_slots; - void **array; - pthread_mutex_t lock; - pthread_cond_t msg_avail_cond; - pthread_cond_t space_avail_cond; -}; + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + struct alt_chunk_table_header_disk hdr; -static int -shared_queue_init(struct shared_queue *q, unsigned size) -{ - wimlib_assert(size != 0); - q->array = CALLOC(sizeof(q->array[0]), size); - if (!q->array) - goto err; - q->filled_slots = 0; - q->front = 0; - q->back = size - 1; - q->size = size; - if (pthread_mutex_init(&q->lock, NULL)) { - ERROR_WITH_ERRNO("Failed to initialize mutex"); - goto err; - } - if (pthread_cond_init(&q->msg_avail_cond, NULL)) { - ERROR_WITH_ERRNO("Failed to initialize condition variable"); - goto err_destroy_lock; - } - if (pthread_cond_init(&q->space_avail_cond, NULL)) { - ERROR_WITH_ERRNO("Failed to initialize condition variable"); - goto err_destroy_msg_avail_cond; + hdr.res_usize = cpu_to_le64(res_actual_size); + hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size); + hdr.compression_format = cpu_to_le32(ctx->out_ctype); + + BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1); + BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2); + BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3); + + ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr), + chunk_table_offset - sizeof(hdr)); + if (ret) + goto write_error; + res_start_offset = chunk_table_offset - sizeof(hdr); + } else { + res_start_offset = chunk_table_offset; + } + + ret = full_pwrite(ctx->out_fd, ctx->chunk_csizes, + chunk_table_size, chunk_table_offset); + if (ret) + goto write_error; } + + *res_start_offset_ret = res_start_offset; + *res_store_size_ret = res_end_offset - res_start_offset; + return 0; -err_destroy_msg_avail_cond: - pthread_cond_destroy(&q->msg_avail_cond); -err_destroy_lock: - pthread_mutex_destroy(&q->lock); -err: - return WIMLIB_ERR_NOMEM; -} -static void -shared_queue_destroy(struct shared_queue *q) -{ - FREE(q->array); - pthread_mutex_destroy(&q->lock); - pthread_cond_destroy(&q->msg_avail_cond); - pthread_cond_destroy(&q->space_avail_cond); +write_error: + ERROR_WITH_ERRNO("Write error"); + return ret; } -static void -shared_queue_put(struct shared_queue *q, void *obj) +/* Finish writing a WIM resource by writing or updating the chunk table (if not + * writing the data uncompressed) and loading its metadata into @out_reshdr. */ +static int +end_write_resource(struct write_blobs_ctx *ctx, struct wim_reshdr *out_reshdr) { - pthread_mutex_lock(&q->lock); - while (q->filled_slots == q->size) - pthread_cond_wait(&q->space_avail_cond, &q->lock); + int ret; + u64 res_size_in_wim; + u64 res_uncompressed_size; + u64 res_offset_in_wim; - q->back = (q->back + 1) % q->size; - q->array[q->back] = obj; - q->filled_slots++; + wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size || + (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)); + res_uncompressed_size = ctx->cur_write_res_size; - pthread_cond_broadcast(&q->msg_avail_cond); - pthread_mutex_unlock(&q->lock); + if (ctx->compressor) { + ret = end_chunk_table(ctx, res_uncompressed_size, + &res_offset_in_wim, &res_size_in_wim); + if (ret) + return ret; + } else { + res_offset_in_wim = ctx->chunks_start_offset; + res_size_in_wim = ctx->out_fd->offset - res_offset_in_wim; + } + out_reshdr->uncompressed_size = res_uncompressed_size; + out_reshdr->size_in_wim = res_size_in_wim; + out_reshdr->offset_in_wim = res_offset_in_wim; + return 0; } -static void * -shared_queue_get(struct shared_queue *q) +/* Call when no more data from the file at @path is needed. */ +static int +done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx) { - void *obj; + union wimlib_progress_info info; - pthread_mutex_lock(&q->lock); - while (q->filled_slots == 0) - pthread_cond_wait(&q->msg_avail_cond, &q->lock); + info.done_with_file.path_to_file = path; - obj = q->array[q->front]; - q->array[q->front] = NULL; - q->front = (q->front + 1) % q->size; - q->filled_slots--; - - pthread_cond_broadcast(&q->space_avail_cond); - pthread_mutex_unlock(&q->lock); - return obj; + return call_progress(progfunc, WIMLIB_PROGRESS_MSG_DONE_WITH_FILE, + &info, progctx); } -struct compressor_thread_params { - struct shared_queue *res_to_compress_queue; - struct shared_queue *compressed_res_queue; - int out_ctype; - struct wimlib_lzx_context *comp_ctx; -}; - -#define MAX_CHUNKS_PER_MSG 2 +static int +do_done_with_blob(struct blob_descriptor *blob, + wimlib_progress_func_t progfunc, void *progctx) +{ + int ret; + struct wim_inode *inode; + tchar *cookie1; + tchar *cookie2; -struct message { - struct wim_lookup_table_entry *lte; - u32 out_chunk_size; - u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG]; - u8 *compressed_chunks[MAX_CHUNKS_PER_MSG]; - unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG]; - struct iovec out_chunks[MAX_CHUNKS_PER_MSG]; - unsigned num_chunks; - struct list_head list; - bool complete; - u64 begin_chunk; -}; + if (!blob->may_send_done_with_file) + return 0; -static void -compress_chunks(struct message *msg, int out_ctype, - struct wimlib_lzx_context *comp_ctx) -{ - for (unsigned i = 0; i < msg->num_chunks; i++) { - unsigned len; - - len = compress_chunk(msg->uncompressed_chunks[i], - msg->uncompressed_chunk_sizes[i], - msg->compressed_chunks[i], - out_ctype, - comp_ctx); - - void *out_chunk; - unsigned out_len; - if (len) { - /* To be written compressed */ - out_chunk = msg->compressed_chunks[i]; - out_len = len; - } else { - /* To be written uncompressed */ - out_chunk = msg->uncompressed_chunks[i]; - out_len = msg->uncompressed_chunk_sizes[i]; - } - msg->out_chunks[i].iov_base = out_chunk; - msg->out_chunks[i].iov_len = out_len; - } -} + inode = blob->file_inode; -/* Compressor thread routine. This is a lot simpler than the main thread - * routine: just repeatedly get a group of chunks from the - * res_to_compress_queue, compress them, and put them in the - * compressed_res_queue. A NULL pointer indicates that the thread should stop. - * */ -static void * -compressor_thread_proc(void *arg) -{ - struct compressor_thread_params *params = arg; - struct shared_queue *res_to_compress_queue = params->res_to_compress_queue; - struct shared_queue *compressed_res_queue = params->compressed_res_queue; - struct message *msg; + wimlib_assert(inode != NULL); + wimlib_assert(inode->i_num_remaining_streams > 0); + if (--inode->i_num_remaining_streams > 0) + return 0; - DEBUG("Compressor thread ready"); - while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) { - compress_chunks(msg, params->out_ctype, params->comp_ctx); - shared_queue_put(compressed_res_queue, msg); - } - DEBUG("Compressor thread terminating"); - return NULL; -} -#endif /* ENABLE_MULTITHREADED_COMPRESSION */ + cookie1 = progress_get_streamless_path(blob->file_on_disk); + cookie2 = progress_get_win32_path(blob->file_on_disk); -struct write_streams_progress_data { - wimlib_progress_func_t progress_func; - union wimlib_progress_info progress; - uint64_t next_progress; - WIMStruct *prev_wim_part; -}; + ret = done_with_file(blob->file_on_disk, progfunc, progctx); -static void -do_write_streams_progress(struct write_streams_progress_data *progress_data, - struct wim_lookup_table_entry *lte, - bool stream_discarded) -{ - union wimlib_progress_info *progress = &progress_data->progress; - bool new_wim_part; + progress_put_win32_path(cookie2); + progress_put_streamless_path(cookie1); - if (stream_discarded) { - progress->write_streams.total_bytes -= lte->size; - if (progress_data->next_progress != ~(uint64_t)0 && - progress_data->next_progress > progress->write_streams.total_bytes) - { - progress_data->next_progress = progress->write_streams.total_bytes; - } - } else { - progress->write_streams.completed_bytes += lte->size; - } - new_wim_part = false; - if (lte->resource_location == RESOURCE_IN_WIM && - lte->rspec->wim != progress_data->prev_wim_part) - { - if (progress_data->prev_wim_part) { - new_wim_part = true; - progress->write_streams.completed_parts++; - } - progress_data->prev_wim_part = lte->rspec->wim; - } - progress->write_streams.completed_streams++; - if (progress_data->progress_func - && (progress->write_streams.completed_bytes >= progress_data->next_progress - || new_wim_part)) - { - progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, - progress); - if (progress_data->next_progress == progress->write_streams.total_bytes) { - progress_data->next_progress = ~(uint64_t)0; - } else { - progress_data->next_progress = - min(progress->write_streams.total_bytes, - progress->write_streams.completed_bytes + - progress->write_streams.total_bytes / 100); - } - } + return ret; } -struct serial_write_stream_ctx { - struct filedes *out_fd; - int out_ctype; - u32 out_chunk_size; - struct wimlib_lzx_context **comp_ctx; - int write_resource_flags; -}; - -static int -serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx) +/* Handle WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES mode. */ +static inline int +done_with_blob(struct blob_descriptor *blob, struct write_blobs_ctx *ctx) { - struct serial_write_stream_ctx *ctx = _ctx; - return write_wim_resource(lte, ctx->out_fd, - ctx->out_ctype, - ctx->out_chunk_size, - <e->out_reshdr, - ctx->write_resource_flags, - ctx->comp_ctx); + if (likely(!(ctx->write_resource_flags & + WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE))) + return 0; + return do_done_with_blob(blob, ctx->progress_data.progfunc, + ctx->progress_data.progctx); } - -/* Write a list of streams, taking into account that some streams may be - * duplicates that are checksummed and discarded on the fly, and also delegating - * the actual writing of a stream to a function @write_stream_cb, which is - * passed the context @write_stream_ctx. */ +/* Begin processing a blob for writing. */ static int -do_write_stream_list(struct list_head *stream_list, - struct wim_lookup_table *lookup_table, - int (*write_stream_cb)(struct wim_lookup_table_entry *, void *), - void *write_stream_ctx, - struct write_streams_progress_data *progress_data) +write_blob_begin_read(struct blob_descriptor *blob, void *_ctx) { - int ret = 0; - struct wim_lookup_table_entry *lte; - bool stream_discarded; - - /* For each stream in @stream_list ... */ - while (!list_empty(stream_list)) { - stream_discarded = false; - lte = container_of(stream_list->next, - struct wim_lookup_table_entry, - write_streams_list); - list_del(<e->write_streams_list); - if (lte->unhashed && !lte->unique_size) { - /* Unhashed stream that shares a size with some other - * stream in the WIM we are writing. The stream must be - * checksummed to know if we need to write it or not. */ - struct wim_lookup_table_entry *tmp; - u32 orig_out_refcnt = lte->out_refcnt; - - ret = hash_unhashed_stream(lte, lookup_table, &tmp); - if (ret) - break; - if (tmp != lte) { - /* We found a duplicate stream. 'lte' was - * freed, so replace it with the duplicate. */ - lte = tmp; - - /* 'out_refcnt' was transferred to the - * duplicate, and we can detect if the duplicate - * stream was already referenced for writing by - * checking if its 'out_refcnt' is higher than - * that of the original stream. In such cases, - * the current stream can be discarded. We can - * also discard the current stream if it was - * previously marked as filtered (e.g. already - * present in the WIM being written). */ - if (lte->out_refcnt > orig_out_refcnt || - lte->filtered) { - DEBUG("Discarding duplicate stream of " - "length %"PRIu64, - lte->size); - lte->no_progress = 0; - stream_discarded = true; - goto skip_to_progress; - } - } - } + struct write_blobs_ctx *ctx = _ctx; + int ret; - /* Here, @lte is either a hashed stream or an unhashed stream - * with a unique size. In either case we know that the stream - * has to be written. In either case the SHA1 message digest - * will be calculated over the stream while writing it; however, - * in the former case this is done merely to check the data, - * while in the latter case this is done because we do not have - * the SHA1 message digest yet. */ - wimlib_assert(lte->out_refcnt != 0); - lte->deferred = 0; - lte->no_progress = 0; - ret = (*write_stream_cb)(lte, write_stream_ctx); - if (ret) - break; - /* In parallel mode, some streams are deferred for later, - * serialized processing; ignore them here. */ - if (lte->deferred) - continue; - if (lte->unhashed) { - list_del(<e->unhashed_list); - lookup_table_insert(lookup_table, lte); - lte->unhashed = 0; - } - skip_to_progress: - if (!lte->no_progress) { - do_write_streams_progress(progress_data, - lte, stream_discarded); - } - } - return ret; -} + wimlib_assert(blob->size > 0); -static int -do_write_stream_list_serial(struct list_head *stream_list, - struct wim_lookup_table *lookup_table, - struct filedes *out_fd, - int out_ctype, - u32 out_chunk_size, - struct wimlib_lzx_context **comp_ctx, - int write_resource_flags, - struct write_streams_progress_data *progress_data) -{ - struct serial_write_stream_ctx ctx = { - .out_fd = out_fd, - .out_ctype = out_ctype, - .out_chunk_size = out_chunk_size, - .write_resource_flags = write_resource_flags, - .comp_ctx = comp_ctx, - }; - return do_write_stream_list(stream_list, - lookup_table, - serial_write_stream, - &ctx, - progress_data); -} + ctx->cur_read_blob_offset = 0; + ctx->cur_read_blob_size = blob->size; -static inline int -write_flags_to_resource_flags(int write_flags) -{ - int resource_flags = 0; + /* As an optimization, we allow some blobs to be "unhashed", meaning + * their SHA-1 message digests are unknown. This is the case with blobs + * that are added by scanning a directory tree with wimlib_add_image(), + * for example. Since WIM uses single-instance blobs, we don't know + * whether such each such blob really need to written until it is + * actually checksummed, unless it has a unique size. In such cases we + * read and checksum the blob in this function, thereby advancing ahead + * of read_blob_list(), which will still provide the data again to + * write_blob_process_chunk(). This is okay because an unhashed blob + * cannot be in a WIM resource, which might be costly to decompress. */ + if (ctx->blob_table != NULL && blob->unhashed && !blob->unique_size) { - if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) - resource_flags |= WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS; - if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE) - resource_flags |= WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE; - return resource_flags; + struct blob_descriptor *new_blob; + + ret = hash_unhashed_blob(blob, ctx->blob_table, &new_blob); + if (ret) + return ret; + if (new_blob != blob) { + /* Duplicate blob detected. */ + + if (new_blob->will_be_in_output_wim || + blob_filtered(new_blob, ctx->filter_ctx)) + { + /* The duplicate blob is already being included + * in the output WIM, or it would be filtered + * out if it had been. Skip writing this blob + * (and reading it again) entirely, passing its + * output reference count to the duplicate blob + * in the former case. */ + ret = do_write_blobs_progress(&ctx->progress_data, + blob->size, 1, true); + list_del(&blob->write_blobs_list); + list_del(&blob->blob_table_list); + if (new_blob->will_be_in_output_wim) + new_blob->out_refcnt += blob->out_refcnt; + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) + ctx->cur_write_res_size -= blob->size; + if (!ret) + ret = done_with_blob(blob, ctx); + free_blob_descriptor(blob); + if (ret) + return ret; + return BEGIN_BLOB_STATUS_SKIP_BLOB; + } else { + /* The duplicate blob can validly be written, + * but was not marked as such. Discard the + * current blob descriptor and use the + * duplicate, but actually freeing the current + * blob descriptor must wait until + * read_blob_list() has finished reading its + * data. */ + list_replace(&blob->write_blobs_list, + &new_blob->write_blobs_list); + list_replace(&blob->blob_table_list, + &new_blob->blob_table_list); + blob->will_be_in_output_wim = 0; + new_blob->out_refcnt = blob->out_refcnt; + new_blob->will_be_in_output_wim = 1; + new_blob->may_send_done_with_file = 0; + blob = new_blob; + } + } + } + list_move_tail(&blob->write_blobs_list, &ctx->blobs_being_compressed); + return 0; } +/* Rewrite a blob that was just written compressed (as a non-solid WIM resource) + * as uncompressed instead. */ static int -write_stream_list_serial(struct list_head *stream_list, - struct wim_lookup_table *lookup_table, - struct filedes *out_fd, - int out_ctype, - u32 out_chunk_size, - struct wimlib_lzx_context **comp_ctx, - int write_resource_flags, - struct write_streams_progress_data *progress_data) -{ - union wimlib_progress_info *progress = &progress_data->progress; - DEBUG("Writing stream list of size %"PRIu64" (serial version)", - progress->write_streams.total_streams); - progress->write_streams.num_threads = 1; - if (progress_data->progress_func) { - progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, - progress); - } - return do_write_stream_list_serial(stream_list, - lookup_table, - out_fd, - out_ctype, - out_chunk_size, - comp_ctx, - write_resource_flags, - progress_data); -} - -#ifdef ENABLE_MULTITHREADED_COMPRESSION -static int -write_wim_chunks(struct message *msg, struct filedes *out_fd, - struct chunk_table *chunk_tab, - int write_resource_flags) +write_blob_uncompressed(struct blob_descriptor *blob, struct filedes *out_fd) { - struct iovec *vecs; - struct pwm_chunk_hdr *chunk_hdrs; - unsigned nvecs; int ret; + u64 begin_offset = blob->out_reshdr.offset_in_wim; + u64 end_offset = out_fd->offset; - for (unsigned i = 0; i < msg->num_chunks; i++) - chunk_tab_record_chunk(chunk_tab, msg->out_chunks[i].iov_len); + if (filedes_seek(out_fd, begin_offset) == -1) + return 0; - if (!(write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) { - nvecs = msg->num_chunks; - vecs = msg->out_chunks; - } else { - /* Special case: If writing a compressed resource to a pipable - * WIM, prefix each compressed chunk with a header that gives - * its compressed size. */ - nvecs = msg->num_chunks * 2; - vecs = alloca(nvecs * sizeof(vecs[0])); - chunk_hdrs = alloca(msg->num_chunks * sizeof(chunk_hdrs[0])); - - for (unsigned i = 0; i < msg->num_chunks; i++) { - chunk_hdrs[i].compressed_size = cpu_to_le32(msg->out_chunks[i].iov_len); - vecs[i * 2].iov_base = &chunk_hdrs[i]; - vecs[i * 2].iov_len = sizeof(chunk_hdrs[i]); - vecs[i * 2 + 1].iov_base = msg->out_chunks[i].iov_base; - vecs[i * 2 + 1].iov_len = msg->out_chunks[i].iov_len; + ret = extract_blob_to_fd(blob, out_fd); + if (ret) { + /* Error reading the uncompressed data. */ + if (out_fd->offset == begin_offset && + filedes_seek(out_fd, end_offset) != -1) + { + /* Nothing was actually written yet, and we successfully + * seeked to the end of the compressed resource, so + * don't issue a hard error; just keep the compressed + * resource instead. */ + WARNING("Recovered compressed resource of " + "size %"PRIu64", continuing on.", blob->size); + return 0; } + return ret; } - ret = full_writev(out_fd, vecs, nvecs); - if (ret) - ERROR_WITH_ERRNO("Write error"); - return ret; -} -struct main_writer_thread_ctx { - struct list_head *stream_list; - struct wim_lookup_table *lookup_table; - struct filedes *out_fd; - off_t res_start_offset; - int out_ctype; - u32 out_chunk_size; - struct wimlib_lzx_context **comp_ctx; - int write_resource_flags; - struct shared_queue *res_to_compress_queue; - struct shared_queue *compressed_res_queue; - size_t num_messages; - struct write_streams_progress_data *progress_data; - - struct list_head available_msgs; - struct list_head outstanding_streams; - struct list_head serial_streams; - size_t num_outstanding_messages; - - SHA_CTX next_sha_ctx; - u64 next_chunk; - u64 next_num_chunks; - struct wim_lookup_table_entry *next_lte; - - struct message *msgs; - struct message *next_msg; - struct chunk_table *cur_chunk_tab; -}; + wimlib_assert(out_fd->offset - begin_offset == blob->size); -static int -init_message(struct message *msg, u32 out_chunk_size) -{ - msg->out_chunk_size = out_chunk_size; - for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) { - msg->compressed_chunks[i] = MALLOC(out_chunk_size); - msg->uncompressed_chunks[i] = MALLOC(out_chunk_size); - if (msg->compressed_chunks[i] == NULL || - msg->uncompressed_chunks[i] == NULL) - return WIMLIB_ERR_NOMEM; + if (out_fd->offset < end_offset && + 0 != ftruncate(out_fd->fd, out_fd->offset)) + { + ERROR_WITH_ERRNO("Can't truncate output file to " + "offset %"PRIu64, out_fd->offset); + return WIMLIB_ERR_WRITE; } + + blob->out_reshdr.size_in_wim = blob->size; + blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED | + WIM_RESHDR_FLAG_SOLID); return 0; } -static void -destroy_message(struct message *msg) +/* Returns true if the specified blob, which was written as a non-solid + * resource, should be truncated from the WIM file and re-written uncompressed. + * blob->out_reshdr must be filled in from the initial write of the blob. */ +static bool +should_rewrite_blob_uncompressed(const struct write_blobs_ctx *ctx, + const struct blob_descriptor *blob) { - for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) { - FREE(msg->compressed_chunks[i]); - FREE(msg->uncompressed_chunks[i]); - } + /* If the compressed data is smaller than the uncompressed data, prefer + * the compressed data. */ + if (blob->out_reshdr.size_in_wim < blob->out_reshdr.uncompressed_size) + return false; + + /* If we're not actually writing compressed data, then there's no need + * for re-writing. */ + if (!ctx->compressor) + return false; + + /* If writing a pipable WIM, everything we write to the output is final + * (it might actually be a pipe!). */ + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) + return false; + + /* If the blob that would need to be re-read is located in a solid + * resource in another WIM file, then re-reading it would be costly. So + * don't do it. + * + * Exception: if the compressed size happens to be *exactly* the same as + * the uncompressed size, then the blob *must* be written uncompressed + * in order to remain compatible with the Windows Overlay Filesystem + * Filter Driver (WOF). + * + * TODO: we are currently assuming that the optimization for + * single-chunk resources in maybe_rewrite_blob_uncompressed() prevents + * this case from being triggered too often. To fully prevent excessive + * decompressions in degenerate cases, we really should obtain the + * uncompressed data by decompressing the compressed data we wrote to + * the output file. + */ + if (blob->blob_location == BLOB_IN_WIM && + blob->size != blob->rdesc->uncompressed_size && + blob->size != blob->out_reshdr.size_in_wim) + return false; + + return true; } -static void -free_messages(struct message *msgs, size_t num_messages) +static int +maybe_rewrite_blob_uncompressed(struct write_blobs_ctx *ctx, + struct blob_descriptor *blob) { - if (msgs) { - for (size_t i = 0; i < num_messages; i++) - destroy_message(&msgs[i]); - FREE(msgs); + if (!should_rewrite_blob_uncompressed(ctx, blob)) + return 0; + + /* Regular (non-solid) WIM resources with exactly one chunk and + * compressed size equal to uncompressed size are exactly the same as + * the corresponding compressed data --- since there must be 0 entries + * in the chunk table and the only chunk must be stored uncompressed. + * In this case, there's no need to rewrite anything. */ + if (ctx->chunk_index == 1 && + blob->out_reshdr.size_in_wim == blob->out_reshdr.uncompressed_size) + { + blob->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED; + return 0; } + + return write_blob_uncompressed(blob, ctx->out_fd); } -static struct message * -allocate_messages(size_t num_messages, u32 out_chunk_size) +/* Write the next chunk of (typically compressed) data to the output WIM, + * handling the writing of the chunk table. */ +static int +write_chunk(struct write_blobs_ctx *ctx, const void *cchunk, + size_t csize, size_t usize) { - struct message *msgs; + int ret; + struct blob_descriptor *blob; + u32 completed_blob_count; + u32 completed_size; + + blob = list_entry(ctx->blobs_being_compressed.next, + struct blob_descriptor, write_blobs_list); - msgs = CALLOC(num_messages, sizeof(struct message)); - if (msgs == NULL) - return NULL; - for (size_t i = 0; i < num_messages; i++) { - if (init_message(&msgs[i], out_chunk_size)) { - free_messages(msgs, num_messages); - return NULL; + if (ctx->cur_write_blob_offset == 0 && + !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)) + { + /* Starting to write a new blob in non-solid mode. */ + + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) { + ret = write_pwm_blob_header(blob, ctx->out_fd, + ctx->compressor != NULL); + if (ret) + return ret; } + + ret = begin_write_resource(ctx, blob->size); + if (ret) + return ret; } - return msgs; -} -static void -main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx) -{ - while (ctx->num_outstanding_messages--) - shared_queue_get(ctx->compressed_res_queue); - free_messages(ctx->msgs, ctx->num_messages); - FREE(ctx->cur_chunk_tab); -} + if (ctx->compressor != NULL) { + /* Record the compresed chunk size. */ + wimlib_assert(ctx->chunk_index < ctx->num_alloc_chunks); + ctx->chunk_csizes[ctx->chunk_index++] = csize; -static int -main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx) -{ - /* Pre-allocate all the buffers that will be needed to do the chunk - * compression. */ - ctx->msgs = allocate_messages(ctx->num_messages, ctx->out_chunk_size); - if (ctx->msgs == NULL) - return WIMLIB_ERR_NOMEM; + /* If writing a pipable WIM, before the chunk data write a chunk + * header that provides the compressed chunk size. */ + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) { + struct pwm_chunk_hdr chunk_hdr = { + .compressed_size = cpu_to_le32(csize), + }; + ret = full_write(ctx->out_fd, &chunk_hdr, + sizeof(chunk_hdr)); + if (ret) + goto write_error; + } + } - /* Initially, all the messages are available to use. */ - INIT_LIST_HEAD(&ctx->available_msgs); - for (size_t i = 0; i < ctx->num_messages; i++) - list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs); + /* Write the chunk data. */ + ret = full_write(ctx->out_fd, cchunk, csize); + if (ret) + goto write_error; - /* outstanding_streams is the list of streams that currently have had - * chunks sent off for compression. - * - * The first stream in outstanding_streams is the stream that is - * currently being written. - * - * The last stream in outstanding_streams is the stream that is - * currently being read and having chunks fed to the compressor threads. - * */ - INIT_LIST_HEAD(&ctx->outstanding_streams); - ctx->num_outstanding_messages = 0; + ctx->cur_write_blob_offset += usize; - /* Message currently being prepared. */ - ctx->next_msg = NULL; + completed_size = usize; + completed_blob_count = 0; + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + /* Wrote chunk in solid mode. It may have finished multiple + * blobs. */ + struct blob_descriptor *next_blob; - /* Resources that don't need any chunks compressed are added to this - * list and written directly by the main thread. */ - INIT_LIST_HEAD(&ctx->serial_streams); + while (blob && ctx->cur_write_blob_offset >= blob->size) { - /* Pointer to chunk table for stream currently being written. */ - ctx->cur_chunk_tab = NULL; + ctx->cur_write_blob_offset -= blob->size; - return 0; -} + if (ctx->cur_write_blob_offset) + next_blob = list_entry(blob->write_blobs_list.next, + struct blob_descriptor, + write_blobs_list); + else + next_blob = NULL; -static int -receive_compressed_chunks(struct main_writer_thread_ctx *ctx) -{ - struct message *msg; - struct wim_lookup_table_entry *cur_lte; - int ret; + ret = done_with_blob(blob, ctx); + if (ret) + return ret; + list_move_tail(&blob->write_blobs_list, &ctx->blobs_in_solid_resource); + completed_blob_count++; - wimlib_assert(!list_empty(&ctx->outstanding_streams)); - wimlib_assert(ctx->num_outstanding_messages != 0); + blob = next_blob; + } + } else { + /* Wrote chunk in non-solid mode. It may have finished a + * blob. */ + if (ctx->cur_write_blob_offset == blob->size) { - cur_lte = container_of(ctx->outstanding_streams.next, - struct wim_lookup_table_entry, - being_compressed_list); + wimlib_assert(ctx->cur_write_blob_offset == + ctx->cur_write_res_size); - /* Get the next message from the queue and process it. - * The message will contain 1 or more data chunks that have been - * compressed. */ - msg = shared_queue_get(ctx->compressed_res_queue); - msg->complete = true; - --ctx->num_outstanding_messages; + ret = end_write_resource(ctx, &blob->out_reshdr); + if (ret) + return ret; - /* Is this the next chunk in the current resource? If it's not (i.e., - * an earlier chunk in a same or different resource hasn't been - * compressed yet), do nothing, and keep this message around until all - * earlier chunks are received. - * - * Otherwise, write all the chunks we can. */ - while (cur_lte != NULL && - !list_empty(&cur_lte->msg_list) - && (msg = container_of(cur_lte->msg_list.next, - struct message, - list))->complete) - { - list_move(&msg->list, &ctx->available_msgs); - if (msg->begin_chunk == 0) { - /* First set of chunks. */ + blob->out_reshdr.flags = reshdr_flags_for_blob(blob); + if (ctx->compressor != NULL) + blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED; - /* Write pipable WIM stream header if needed. */ - if (ctx->write_resource_flags & - WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) - { - ret = write_pwm_stream_header(cur_lte, ctx->out_fd, - WIM_RESHDR_FLAG_COMPRESSED); - if (ret) - return ret; - } + ret = maybe_rewrite_blob_uncompressed(ctx, blob); + if (ret) + return ret; - /* Save current offset. */ - ctx->res_start_offset = ctx->out_fd->offset; + wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size); - /* Begin building the chunk table, and leave space for - * it if needed. */ - ret = begin_wim_resource_chunk_tab(cur_lte, - ctx->out_fd, - ctx->out_chunk_size, - &ctx->cur_chunk_tab, - ctx->write_resource_flags); + ctx->cur_write_blob_offset = 0; + + ret = done_with_blob(blob, ctx); if (ret) return ret; + list_del(&blob->write_blobs_list); + completed_blob_count++; } + } + + return do_write_blobs_progress(&ctx->progress_data, completed_size, + completed_blob_count, false); + +write_error: + ERROR_WITH_ERRNO("Write error"); + return ret; +} - /* Write the compressed chunks from the message. */ - ret = write_wim_chunks(msg, ctx->out_fd, ctx->cur_chunk_tab, - ctx->write_resource_flags); +static int +prepare_chunk_buffer(struct write_blobs_ctx *ctx) +{ + /* While we are unable to get a new chunk buffer due to too many chunks + * already outstanding, retrieve and write the next compressed chunk. */ + while (!(ctx->cur_chunk_buf = + ctx->compressor->get_chunk_buffer(ctx->compressor))) + { + const void *cchunk; + u32 csize; + u32 usize; + bool bret; + int ret; + + bret = ctx->compressor->get_compression_result(ctx->compressor, + &cchunk, + &csize, + &usize); + wimlib_assert(bret); + + ret = write_chunk(ctx, cchunk, csize, usize); if (ret) return ret; + } + return 0; +} - /* Was this the last chunk of the stream? If so, finish the - * stream by writing the chunk table. */ - if (list_empty(&cur_lte->msg_list) && - msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks) - { - u64 res_csize; +/* Process the next chunk of data to be written to a WIM resource. */ +static int +write_blob_process_chunk(const void *chunk, size_t size, void *_ctx) +{ + struct write_blobs_ctx *ctx = _ctx; + int ret; + const u8 *chunkptr, *chunkend; - ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab, - ctx->out_fd, - ctx->res_start_offset, - ctx->write_resource_flags); + wimlib_assert(size != 0); + + if (ctx->compressor == NULL) { + /* Write chunk uncompressed. */ + ret = write_chunk(ctx, chunk, size, size); + if (ret) + return ret; + ctx->cur_read_blob_offset += size; + return 0; + } + + /* Submit the chunk for compression, but take into account that the + * @size the chunk was provided in may not correspond to the + * @out_chunk_size being used for compression. */ + chunkptr = chunk; + chunkend = chunkptr + size; + do { + size_t needed_chunk_size; + size_t bytes_consumed; + + if (!ctx->cur_chunk_buf) { + ret = prepare_chunk_buffer(ctx); if (ret) return ret; + } - list_del(&cur_lte->being_compressed_list); - - res_csize = ctx->out_fd->offset - ctx->res_start_offset; - - FREE(ctx->cur_chunk_tab); - ctx->cur_chunk_tab = NULL; + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + needed_chunk_size = ctx->out_chunk_size; + } else { + needed_chunk_size = min(ctx->out_chunk_size, + ctx->cur_chunk_buf_filled + + (ctx->cur_read_blob_size - + ctx->cur_read_blob_offset)); + } - /* Check for resources compressed to greater than or - * equal to their original size and write them - * uncompressed instead. (But never do this if writing - * to a pipe.) */ - if (res_csize >= cur_lte->size && - !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE)) - { - DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; " - "writing uncompressed instead", - cur_lte->size, res_csize); - ret = seek_and_truncate(ctx->out_fd, ctx->res_start_offset); - if (ret) - return ret; - ret = write_wim_resource(cur_lte, - ctx->out_fd, - WIMLIB_COMPRESSION_TYPE_NONE, - 0, - &cur_lte->out_reshdr, - ctx->write_resource_flags, - ctx->comp_ctx); - if (ret) - return ret; - } else { - cur_lte->out_reshdr.size_in_wim = - res_csize; + bytes_consumed = min(chunkend - chunkptr, + needed_chunk_size - ctx->cur_chunk_buf_filled); - cur_lte->out_reshdr.uncompressed_size = - cur_lte->size; + memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled], + chunkptr, bytes_consumed); - cur_lte->out_reshdr.offset_in_wim = - ctx->res_start_offset; + chunkptr += bytes_consumed; + ctx->cur_read_blob_offset += bytes_consumed; + ctx->cur_chunk_buf_filled += bytes_consumed; - cur_lte->out_reshdr.flags = - cur_lte->flags | - WIM_RESHDR_FLAG_COMPRESSED; + if (ctx->cur_chunk_buf_filled == needed_chunk_size) { + ctx->compressor->signal_chunk_filled(ctx->compressor, + ctx->cur_chunk_buf_filled); + ctx->cur_chunk_buf = NULL; + ctx->cur_chunk_buf_filled = 0; + } + } while (chunkptr != chunkend); + return 0; +} - DEBUG("Wrote compressed resource " - "(%"PRIu64" => %"PRIu64" bytes @ +%"PRIu64", flags=0x%02x)", - cur_lte->out_reshdr.uncompressed_size, - cur_lte->out_reshdr.size_in_wim, - cur_lte->out_reshdr.offset_in_wim, - cur_lte->out_reshdr.flags); - } +/* Finish processing a blob for writing. It may not have been completely + * written yet, as the chunk_compressor implementation may still have chunks + * buffered or being compressed. */ +static int +write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx) +{ + struct write_blobs_ctx *ctx = _ctx; - do_write_streams_progress(ctx->progress_data, - cur_lte, false); - - /* Since we just finished writing a stream, write any - * streams that have been added to the serial_streams - * list for direct writing by the main thread (e.g. - * resources that don't need to be compressed because - * the desired compression type is the same as the - * previous compression type). */ - if (!list_empty(&ctx->serial_streams)) { - ret = do_write_stream_list_serial(&ctx->serial_streams, - ctx->lookup_table, - ctx->out_fd, - ctx->out_ctype, - ctx->out_chunk_size, - ctx->comp_ctx, - ctx->write_resource_flags, - ctx->progress_data); - if (ret) - return ret; - } + wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status); - /* Advance to the next stream to write. */ - if (list_empty(&ctx->outstanding_streams)) { - cur_lte = NULL; - } else { - cur_lte = container_of(ctx->outstanding_streams.next, - struct wim_lookup_table_entry, - being_compressed_list); + if (!blob->will_be_in_output_wim) { + /* The blob was a duplicate. Now that its data has finished + * being read, it is being discarded in favor of the duplicate + * entry. It therefore is no longer needed, and we can fire the + * DONE_WITH_FILE callback because the file will not be read + * again. + * + * Note: we can't yet fire DONE_WITH_FILE for non-duplicate + * blobs, since it needs to be possible to re-read the file if + * it does not compress to less than its original size. */ + if (!status) + status = done_with_blob(blob, ctx); + free_blob_descriptor(blob); + } else if (!status && blob->unhashed && ctx->blob_table != NULL) { + /* The blob was not a duplicate and was previously unhashed. + * Since we passed COMPUTE_MISSING_BLOB_HASHES to + * read_blob_list(), blob->hash is now computed and valid. So + * turn this blob into a "hashed" blob. */ + list_del(&blob->unhashed_list); + blob_table_insert(ctx->blob_table, blob); + blob->unhashed = 0; + } + return status; +} + +/* Compute statistics about a list of blobs that will be written. + * + * Assumes the blobs are sorted such that all blobs located in each distinct WIM + * (specified by WIMStruct) are together. */ +static void +compute_blob_list_stats(struct list_head *blob_list, + struct write_blobs_ctx *ctx) +{ + struct blob_descriptor *blob; + u64 total_bytes = 0; + u64 num_blobs = 0; + u64 total_parts = 0; + WIMStruct *prev_wim_part = NULL; + + list_for_each_entry(blob, blob_list, write_blobs_list) { + num_blobs++; + total_bytes += blob->size; + if (blob->blob_location == BLOB_IN_WIM) { + if (prev_wim_part != blob->rdesc->wim) { + prev_wim_part = blob->rdesc->wim; + total_parts++; } } } - return 0; + ctx->progress_data.progress.write_streams.total_bytes = total_bytes; + ctx->progress_data.progress.write_streams.total_streams = num_blobs; + ctx->progress_data.progress.write_streams.completed_bytes = 0; + ctx->progress_data.progress.write_streams.completed_streams = 0; + ctx->progress_data.progress.write_streams.compression_type = ctx->out_ctype; + ctx->progress_data.progress.write_streams.total_parts = total_parts; + ctx->progress_data.progress.write_streams.completed_parts = 0; + ctx->progress_data.next_progress = 0; +} + +/* Find blobs in @blob_list that can be copied to the output WIM in raw form + * rather than compressed. Delete these blobs from @blob_list and move them to + * @raw_copy_blobs. Return the total uncompressed size of the blobs that need + * to be compressed. */ +static u64 +find_raw_copy_blobs(struct list_head *blob_list, + int write_resource_flags, + int out_ctype, + u32 out_chunk_size, + struct list_head *raw_copy_blobs) +{ + struct blob_descriptor *blob, *tmp; + u64 num_bytes_to_compress = 0; + + INIT_LIST_HEAD(raw_copy_blobs); + + /* Initialize temporary raw_copy_ok flag. */ + list_for_each_entry(blob, blob_list, write_blobs_list) + if (blob->blob_location == BLOB_IN_WIM) + blob->rdesc->raw_copy_ok = 0; + + list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) { + if (blob->blob_location == BLOB_IN_WIM && + blob->rdesc->raw_copy_ok) + { + list_move_tail(&blob->write_blobs_list, + raw_copy_blobs); + } else if (can_raw_copy(blob, write_resource_flags, + out_ctype, out_chunk_size)) + { + blob->rdesc->raw_copy_ok = 1; + list_move_tail(&blob->write_blobs_list, + raw_copy_blobs); + } else { + num_bytes_to_compress += blob->size; + } + } + + return num_bytes_to_compress; } -/* Called when the main thread has read a new chunk of data. */ +/* Copy a raw compressed resource located in another WIM file to the WIM file + * being written. */ static int -main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx) +write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc, + struct filedes *out_fd) { - struct main_writer_thread_ctx *ctx = _ctx; + u64 cur_read_offset; + u64 end_read_offset; + u8 buf[BUFFER_SIZE]; + size_t bytes_to_read; int ret; - struct message *next_msg; - u64 next_chunk_in_msg; - - /* Update SHA1 message digest for the stream currently being read by the - * main thread. */ - sha1_update(&ctx->next_sha_ctx, chunk, chunk_size); - - /* We send chunks of data to the compressor chunks in batches which we - * refer to as "messages". @next_msg is the message that is currently - * being prepared to send off. If it is NULL, that indicates that we - * need to start a new message. */ - next_msg = ctx->next_msg; - if (!next_msg) { - /* We need to start a new message. First check to see if there - * is a message available in the list of available messages. If - * so, we can just take one. If not, all the messages (there is - * a fixed number of them, proportional to the number of - * threads) have been sent off to the compressor threads, so we - * receive messages from the compressor threads containing - * compressed chunks of data. - * - * We may need to receive multiple messages before one is - * actually available to use because messages received that are - * *not* for the very next set of chunks to compress must be - * buffered until it's time to write those chunks. */ - while (list_empty(&ctx->available_msgs)) { - ret = receive_compressed_chunks(ctx); - if (ret) - return ret; - } + struct filedes *in_fd; + struct blob_descriptor *blob; + u64 out_offset_in_wim; - next_msg = container_of(ctx->available_msgs.next, - struct message, list); - list_del(&next_msg->list); - next_msg->complete = false; - next_msg->begin_chunk = ctx->next_chunk; - next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG, - ctx->next_num_chunks - ctx->next_chunk); - ctx->next_msg = next_msg; - } - - /* Fill in the next chunk to compress */ - next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk; - - next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size; - memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg], - chunk, chunk_size); - ctx->next_chunk++; - if (++next_chunk_in_msg == next_msg->num_chunks) { - /* Send off an array of chunks to compress */ - list_add_tail(&next_msg->list, &ctx->next_lte->msg_list); - shared_queue_put(ctx->res_to_compress_queue, next_msg); - ++ctx->num_outstanding_messages; - ctx->next_msg = NULL; + /* Copy the raw data. */ + cur_read_offset = in_rdesc->offset_in_wim; + end_read_offset = cur_read_offset + in_rdesc->size_in_wim; + + out_offset_in_wim = out_fd->offset; + + if (in_rdesc->is_pipable) { + if (cur_read_offset < sizeof(struct pwm_blob_hdr)) + return WIMLIB_ERR_INVALID_PIPABLE_WIM; + cur_read_offset -= sizeof(struct pwm_blob_hdr); + out_offset_in_wim += sizeof(struct pwm_blob_hdr); + } + in_fd = &in_rdesc->wim->in_fd; + wimlib_assert(cur_read_offset != end_read_offset); + do { + + bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset); + + ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset); + if (ret) + return ret; + + ret = full_write(out_fd, buf, bytes_to_read); + if (ret) + return ret; + + cur_read_offset += bytes_to_read; + + } while (cur_read_offset != end_read_offset); + + list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) { + if (blob->will_be_in_output_wim) { + blob_set_out_reshdr_for_reuse(blob); + if (in_rdesc->flags & WIM_RESHDR_FLAG_SOLID) + blob->out_res_offset_in_wim = out_offset_in_wim; + else + blob->out_reshdr.offset_in_wim = out_offset_in_wim; + + } } return 0; } +/* Copy a list of raw compressed resources located in other WIM file(s) to the + * WIM file being written. */ static int -main_writer_thread_finish(void *_ctx) +write_raw_copy_resources(struct list_head *raw_copy_blobs, + struct filedes *out_fd, + struct write_blobs_progress_data *progress_data) { - struct main_writer_thread_ctx *ctx = _ctx; + struct blob_descriptor *blob; int ret; - while (ctx->num_outstanding_messages != 0) { - ret = receive_compressed_chunks(ctx); + + list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) + blob->rdesc->raw_copy_ok = 1; + + list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) { + if (blob->rdesc->raw_copy_ok) { + /* Write each solid resource only one time. */ + ret = write_raw_copy_resource(blob->rdesc, out_fd); + if (ret) + return ret; + blob->rdesc->raw_copy_ok = 0; + } + ret = do_write_blobs_progress(progress_data, blob->size, + 1, false); if (ret) return ret; } - wimlib_assert(list_empty(&ctx->outstanding_streams)); - return do_write_stream_list_serial(&ctx->serial_streams, - ctx->lookup_table, - ctx->out_fd, - ctx->out_ctype, - ctx->out_chunk_size, - ctx->comp_ctx, - ctx->write_resource_flags, - ctx->progress_data); + return 0; } +/* Wait for and write all chunks pending in the compressor. */ static int -submit_stream_for_compression(struct wim_lookup_table_entry *lte, - struct main_writer_thread_ctx *ctx) +finish_remaining_chunks(struct write_blobs_ctx *ctx) { + const void *cdata; + u32 csize; + u32 usize; int ret; - /* Read the entire stream @lte, feeding its data chunks to the - * compressor threads. Also SHA1-sum the stream; this is required in - * the case that @lte is unhashed, and a nice additional verification - * when @lte is already hashed. */ - sha1_init(&ctx->next_sha_ctx); - ctx->next_chunk = 0; - ctx->next_num_chunks = DIV_ROUND_UP(lte->size, ctx->out_chunk_size); - ctx->next_lte = lte; - INIT_LIST_HEAD(<e->msg_list); - list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams); - ret = read_stream_prefix(lte, lte->size, main_writer_thread_cb, ctx, 0); - if (ret) - return ret; - wimlib_assert(ctx->next_chunk == ctx->next_num_chunks); - return finalize_and_check_sha1(&ctx->next_sha_ctx, lte); + if (ctx->compressor == NULL) + return 0; + + if (ctx->cur_chunk_buf_filled != 0) { + ctx->compressor->signal_chunk_filled(ctx->compressor, + ctx->cur_chunk_buf_filled); + } + + while (ctx->compressor->get_compression_result(ctx->compressor, &cdata, + &csize, &usize)) + { + ret = write_chunk(ctx, cdata, csize, usize); + if (ret) + return ret; + } + return 0; } -static int -main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx) +static void +validate_blob_list(struct list_head *blob_list) { - struct main_writer_thread_ctx *ctx = _ctx; - int ret; + struct blob_descriptor *blob; - if (lte->size < 1000 || - !must_compress_stream(lte, ctx->write_resource_flags, - ctx->out_ctype, ctx->out_chunk_size)) - { - /* Stream is too small or isn't being compressed. Process it by - * the main thread when we have a chance. We can't necessarily - * process it right here, as the main thread could be in the - * middle of writing a different stream. */ - list_add_tail(<e->write_streams_list, &ctx->serial_streams); - lte->deferred = 1; - ret = 0; - } else { - ret = submit_stream_for_compression(lte, ctx); + list_for_each_entry(blob, blob_list, write_blobs_list) { + wimlib_assert(blob->will_be_in_output_wim); + wimlib_assert(blob->size != 0); } - lte->no_progress = 1; - return ret; } -static long -get_default_num_threads(void) +static inline bool +blob_is_in_file(const struct blob_descriptor *blob) { + return blob->blob_location == BLOB_IN_FILE_ON_DISK #ifdef __WIN32__ - return win32_get_number_of_processors(); -#else - return sysconf(_SC_NPROCESSORS_ONLN); + || blob->blob_location == BLOB_IN_WINNT_FILE_ON_DISK + || blob->blob_location == BLOB_WIN32_ENCRYPTED #endif + ; } -/* Equivalent to write_stream_list_serial(), except this takes a @num_threads - * parameter and will perform compression using that many threads. Falls - * back to write_stream_list_serial() on certain errors, such as a failure to - * create the number of threads requested. - * - * High level description of the algorithm for writing compressed streams in - * parallel: We perform compression on chunks rather than on full files. The - * currently executing thread becomes the main thread and is entirely in charge - * of reading the data to compress (which may be in any location understood by - * the resource code--- such as in an external file being captured, or in - * another WIM file from which an image is being exported) and actually writing - * the compressed data to the output file. Additional threads are "compressor - * threads" and all execute the compressor_thread_proc, where they repeatedly - * retrieve buffers of data from the main thread, compress them, and hand them - * back to the main thread. - * - * Certain streams, such as streams that do not need to be compressed (e.g. - * input compression type same as output compression type) or streams of very - * small size are placed in a list (main_writer_thread_ctx.serial_list) and - * handled entirely by the main thread at an appropriate time. - * - * At any given point in time, multiple streams may be having chunks compressed - * concurrently. The stream that the main thread is currently *reading* may be - * later in the list that the stream that the main thread is currently - * *writing*. */ -static int -write_stream_list_parallel(struct list_head *stream_list, - struct wim_lookup_table *lookup_table, - struct filedes *out_fd, - int out_ctype, - u32 out_chunk_size, - struct wimlib_lzx_context **comp_ctx, - int write_resource_flags, - struct write_streams_progress_data *progress_data, - unsigned num_threads) +static void +init_done_with_file_info(struct list_head *blob_list) { - int ret; - struct shared_queue res_to_compress_queue; - struct shared_queue compressed_res_queue; - pthread_t *compressor_threads = NULL; - union wimlib_progress_info *progress = &progress_data->progress; - unsigned num_started_threads; - bool can_retry = true; - - if (num_threads == 0) { - long nthreads = get_default_num_threads(); - if (nthreads < 1 || nthreads > UINT_MAX) { - WARNING("Could not determine number of processors! Assuming 1"); - goto out_serial_quiet; - } else if (nthreads == 1) { - goto out_serial_quiet; + struct blob_descriptor *blob; + + list_for_each_entry(blob, blob_list, write_blobs_list) { + if (blob_is_in_file(blob)) { + blob->file_inode->i_num_remaining_streams = 0; + blob->may_send_done_with_file = 1; } else { - num_threads = nthreads; + blob->may_send_done_with_file = 0; } } - DEBUG("Writing stream list of size %"PRIu64" " - "(parallel version, num_threads=%u)", - progress->write_streams.total_streams, num_threads); + list_for_each_entry(blob, blob_list, write_blobs_list) + if (blob->may_send_done_with_file) + blob->file_inode->i_num_remaining_streams++; +} - progress->write_streams.num_threads = num_threads; +/* + * Write a list of blobs to the output WIM file. + * + * @blob_list + * The list of blobs to write, specified by a list of 'struct blob_descriptor' linked + * by the 'write_blobs_list' member. + * + * @out_fd + * The file descriptor, opened for writing, to which to write the blobs. + * + * @write_resource_flags + * Flags to modify how the blobs are written: + * + * WRITE_RESOURCE_FLAG_RECOMPRESS: + * Force compression of all resources, even if they could otherwise + * be re-used by copying the raw data, due to being located in a WIM + * file with compatible compression parameters. + * + * WRITE_RESOURCE_FLAG_PIPABLE: + * Write the resources in the wimlib-specific pipable format, and + * furthermore do so in such a way that no seeking backwards in + * @out_fd will be performed (so it may be a pipe). + * + * WRITE_RESOURCE_FLAG_SOLID: + * Combine all the blobs into a single resource rather than writing + * them in separate resources. This flag is only valid if the WIM + * version number has been, or will be, set to WIM_VERSION_SOLID. + * This flag may not be combined with WRITE_RESOURCE_FLAG_PIPABLE. + * + * @out_ctype + * Compression format to use in the output resources, specified as one of + * the WIMLIB_COMPRESSION_TYPE_* constants. WIMLIB_COMPRESSION_TYPE_NONE + * is allowed. + * + * @out_chunk_size + * Compression chunk size to use in the output resources. It must be a + * valid chunk size for the specified compression format @out_ctype, unless + * @out_ctype is WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter + * is ignored. + * + * @num_threads + * Number of threads to use to compress data. If 0, a default number of + * threads will be chosen. The number of threads still may be decreased + * from the specified value if insufficient memory is detected. + * + * @blob_table + * If on-the-fly deduplication of unhashed blobs is desired, this parameter + * must be pointer to the blob table for the WIMStruct on whose behalf the + * blobs are being written. Otherwise, this parameter can be NULL. + * + * @filter_ctx + * If on-the-fly deduplication of unhashed blobs is desired, this parameter + * can be a pointer to a context for blob filtering used to detect whether + * the duplicate blob has been hard-filtered or not. If no blobs are + * hard-filtered or no blobs are unhashed, this parameter can be NULL. + * + * This function will write the blobs in @blob_list to resources in + * consecutive positions in the output WIM file, or to a single solid resource + * if WRITE_RESOURCE_FLAG_SOLID was specified in @write_resource_flags. In both + * cases, the @out_reshdr of the `struct blob_descriptor' for each blob written will be + * updated to specify its location, size, and flags in the output WIM. In the + * solid resource case, WIM_RESHDR_FLAG_SOLID will be set in the @flags field of + * each @out_reshdr, and furthermore @out_res_offset_in_wim and + * @out_res_size_in_wim of each @out_reshdr will be set to the offset and size, + * respectively, in the output WIM of the solid resource containing the + * corresponding blob. + * + * Each of the blobs to write may be in any location supported by the + * resource-handling code (specifically, read_blob_list()), such as the contents + * of external file that has been logically added to the output WIM, or a blob + * in another WIM file that has been imported, or even a blob in the "same" WIM + * file of which a modified copy is being written. In the case that a blob is + * already in a WIM file and uses compatible compression parameters, by default + * this function will re-use the raw data instead of decompressing it, then + * recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS + * specified in @write_resource_flags, this is not done. + * + * As a further requirement, this function requires that the + * @will_be_in_output_wim member be set to 1 on all blobs in @blob_list as well + * as any other blobs not in @blob_list that will be in the output WIM file, but + * set to 0 on any other blobs in the output WIM's blob table or sharing a solid + * resource with a blob in @blob_list. Still furthermore, if on-the-fly + * deduplication of blobs is possible, then all blobs in @blob_list must also be + * linked by @blob_table_list along with any other blobs that have + * @will_be_in_output_wim set. + * + * This function handles on-the-fly deduplication of blobs for which SHA-1 + * message digests have not yet been calculated. Such blobs may or may not need + * to be written. If @blob_table is non-NULL, then each blob in @blob_list that + * has @unhashed set but not @unique_size set is checksummed immediately before + * it would otherwise be read for writing in order to determine if it is + * identical to another blob already being written or one that would be filtered + * out of the output WIM using blob_filtered() with the context @filter_ctx. + * Each such duplicate blob will be removed from @blob_list, its reference count + * transfered to the pre-existing duplicate blob, its memory freed, and will not + * be written. Alternatively, if a blob in @blob_list is a duplicate with any + * blob in @blob_table that has not been marked for writing or would not be + * hard-filtered, it is freed and the pre-existing duplicate is written instead, + * taking ownership of the reference count and slot in the @blob_table_list. + * + * Returns 0 if every blob was either written successfully or did not need to be + * written; otherwise returns a non-zero error code. + */ +static int +write_blob_list(struct list_head *blob_list, + struct filedes *out_fd, + int write_resource_flags, + int out_ctype, + u32 out_chunk_size, + unsigned num_threads, + struct blob_table *blob_table, + struct filter_context *filter_ctx, + wimlib_progress_func_t progfunc, + void *progctx) +{ + int ret; + struct write_blobs_ctx ctx; + struct list_head raw_copy_blobs; - static const size_t MESSAGES_PER_THREAD = 2; - size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD); + wimlib_assert((write_resource_flags & + (WRITE_RESOURCE_FLAG_SOLID | + WRITE_RESOURCE_FLAG_PIPABLE)) != + (WRITE_RESOURCE_FLAG_SOLID | + WRITE_RESOURCE_FLAG_PIPABLE)); - DEBUG("Initializing shared queues (queue_size=%zu)", queue_size); + validate_blob_list(blob_list); - ret = shared_queue_init(&res_to_compress_queue, queue_size); - if (ret) - goto out_serial; + if (list_empty(blob_list)) + return 0; + + /* If needed, set auxiliary information so that we can detect when the + * library has finished using each external file. */ + if (unlikely(write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE)) + init_done_with_file_info(blob_list); + + memset(&ctx, 0, sizeof(ctx)); + + ctx.out_fd = out_fd; + ctx.blob_table = blob_table; + ctx.out_ctype = out_ctype; + ctx.out_chunk_size = out_chunk_size; + ctx.write_resource_flags = write_resource_flags; + ctx.filter_ctx = filter_ctx; + + /* + * We normally sort the blobs to write by a "sequential" order that is + * optimized for reading. But when using solid compression, we instead + * sort the blobs by file extension and file name (when applicable; and + * we don't do this for blobs from solid resources) so that similar + * files are grouped together, which improves the compression ratio. + * This is somewhat of a hack since a blob does not necessarily + * correspond one-to-one with a filename, nor is there any guarantee + * that two files with similar names or extensions are actually similar + * in content. A potential TODO is to sort the blobs based on some + * measure of similarity of their actual contents. + */ - ret = shared_queue_init(&compressed_res_queue, queue_size); + ret = sort_blob_list_by_sequential_order(blob_list, + offsetof(struct blob_descriptor, + write_blobs_list)); if (ret) - goto out_destroy_res_to_compress_queue; + return ret; - struct compressor_thread_params *params; + compute_blob_list_stats(blob_list, &ctx); - params = CALLOC(num_threads, sizeof(params[0])); - if (params == NULL) { - ret = WIMLIB_ERR_NOMEM; - goto out_destroy_compressed_res_queue; + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) { + ret = sort_blob_list_for_solid_compression(blob_list); + if (unlikely(ret)) + WARNING("Failed to sort blobs for solid compression. Continuing anyways."); } - for (unsigned i = 0; i < num_threads; i++) { - params[i].res_to_compress_queue = &res_to_compress_queue; - params[i].compressed_res_queue = &compressed_res_queue; - params[i].out_ctype = out_ctype; - if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) { - ret = wimlib_lzx_alloc_context(out_chunk_size, - NULL, ¶ms[i].comp_ctx); + ctx.progress_data.progfunc = progfunc; + ctx.progress_data.progctx = progctx; + + ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list, + write_resource_flags, + out_ctype, + out_chunk_size, + &raw_copy_blobs); + + if (ctx.num_bytes_to_compress == 0) + goto out_write_raw_copy_resources; + + /* Unless uncompressed output was required, allocate a chunk_compressor + * to do compression. There are serial and parallel implementations of + * the chunk_compressor interface. We default to parallel using the + * specified number of threads, unless the upper bound on the number + * bytes needing to be compressed is less than a heuristic value. */ + if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) { + + #ifdef ENABLE_MULTITHREADED_COMPRESSION + if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) { + ret = new_parallel_chunk_compressor(out_ctype, + out_chunk_size, + num_threads, 0, + &ctx.compressor); + if (ret > 0) { + WARNING("Couldn't create parallel chunk compressor: %"TS".\n" + " Falling back to single-threaded compression.", + wimlib_get_error_string(ret)); + } + } + #endif + + if (ctx.compressor == NULL) { + ret = new_serial_chunk_compressor(out_ctype, out_chunk_size, + &ctx.compressor); if (ret) - goto out_free_params; + goto out_destroy_context; } } - compressor_threads = MALLOC(num_threads * sizeof(pthread_t)); - if (compressor_threads == NULL) { - ret = WIMLIB_ERR_NOMEM; - goto out_free_params; - } + if (ctx.compressor) + ctx.progress_data.progress.write_streams.num_threads = ctx.compressor->num_threads; + else + ctx.progress_data.progress.write_streams.num_threads = 1; - for (unsigned i = 0; i < num_threads; i++) { - DEBUG("pthread_create thread %u of %u", i + 1, num_threads); - ret = pthread_create(&compressor_threads[i], NULL, - compressor_thread_proc, ¶ms[i]); - if (ret) { - errno = ret; - ret = -1; - ERROR_WITH_ERRNO("Failed to create compressor " - "thread %u of %u", - i + 1, num_threads); - num_started_threads = i; - goto out_join; - } - } - num_started_threads = num_threads; + INIT_LIST_HEAD(&ctx.blobs_being_compressed); + INIT_LIST_HEAD(&ctx.blobs_in_solid_resource); - if (progress_data->progress_func) { - progress_data->progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, - progress); + ret = call_progress(ctx.progress_data.progfunc, + WIMLIB_PROGRESS_MSG_WRITE_STREAMS, + &ctx.progress_data.progress, + ctx.progress_data.progctx); + if (ret) + goto out_destroy_context; + + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress); + if (ret) + goto out_destroy_context; } - struct main_writer_thread_ctx ctx; + /* Read the list of blobs needing to be compressed, using the specified + * callbacks to execute processing of the data. */ - memset(&ctx, 0, sizeof(ctx)); + struct read_blob_callbacks cbs = { + .begin_blob = write_blob_begin_read, + .consume_chunk = write_blob_process_chunk, + .end_blob = write_blob_end_read, + .ctx = &ctx, + }; + + ret = read_blob_list(blob_list, + offsetof(struct blob_descriptor, write_blobs_list), + &cbs, + BLOB_LIST_ALREADY_SORTED | + VERIFY_BLOB_HASHES | + COMPUTE_MISSING_BLOB_HASHES); - ctx.stream_list = stream_list; - ctx.lookup_table = lookup_table; - ctx.out_fd = out_fd; - ctx.out_ctype = out_ctype; - ctx.out_chunk_size = out_chunk_size; - ctx.comp_ctx = comp_ctx; - ctx.res_to_compress_queue = &res_to_compress_queue; - ctx.compressed_res_queue = &compressed_res_queue; - ctx.num_messages = queue_size; - ctx.write_resource_flags = write_resource_flags; - ctx.progress_data = progress_data; - ret = main_writer_thread_init_ctx(&ctx); if (ret) - goto out_join; + goto out_destroy_context; - can_retry = false; - ret = do_write_stream_list(stream_list, lookup_table, - main_thread_process_next_stream, - &ctx, progress_data); + ret = finish_remaining_chunks(&ctx); if (ret) - goto out_destroy_ctx; - - /* The main thread has finished reading all streams that are going to be - * compressed in parallel, and it now needs to wait for all remaining - * chunks to be compressed so that the remaining streams can actually be - * written to the output file. Furthermore, any remaining streams that - * had processing deferred to the main thread need to be handled. These - * tasks are done by the main_writer_thread_finish() function. */ - ret = main_writer_thread_finish(&ctx); -out_destroy_ctx: - main_writer_thread_destroy_ctx(&ctx); -out_join: - for (unsigned i = 0; i < num_started_threads; i++) - shared_queue_put(&res_to_compress_queue, NULL); - - for (unsigned i = 0; i < num_started_threads; i++) { - if (pthread_join(compressor_threads[i], NULL)) { - WARNING_WITH_ERRNO("Failed to join compressor " - "thread %u of %u", - i + 1, num_threads); + goto out_destroy_context; + + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + struct wim_reshdr reshdr; + struct blob_descriptor *blob; + u64 offset_in_res; + + ret = end_write_resource(&ctx, &reshdr); + if (ret) + goto out_destroy_context; + + offset_in_res = 0; + list_for_each_entry(blob, &ctx.blobs_in_solid_resource, write_blobs_list) { + blob->out_reshdr.size_in_wim = blob->size; + blob->out_reshdr.flags = reshdr_flags_for_blob(blob) | + WIM_RESHDR_FLAG_SOLID; + blob->out_reshdr.uncompressed_size = 0; + blob->out_reshdr.offset_in_wim = offset_in_res; + blob->out_res_offset_in_wim = reshdr.offset_in_wim; + blob->out_res_size_in_wim = reshdr.size_in_wim; + blob->out_res_uncompressed_size = reshdr.uncompressed_size; + offset_in_res += blob->size; } + wimlib_assert(offset_in_res == reshdr.uncompressed_size); } - FREE(compressor_threads); -out_free_params: - for (unsigned i = 0; i < num_threads; i++) - wimlib_lzx_free_context(params[i].comp_ctx); - FREE(params); -out_destroy_compressed_res_queue: - shared_queue_destroy(&compressed_res_queue); -out_destroy_res_to_compress_queue: - shared_queue_destroy(&res_to_compress_queue); - if (!can_retry || (ret >= 0 && ret != WIMLIB_ERR_NOMEM)) - return ret; -out_serial: - WARNING("Falling back to single-threaded compression"); -out_serial_quiet: - return write_stream_list_serial(stream_list, - lookup_table, - out_fd, - out_ctype, - out_chunk_size, - comp_ctx, - write_resource_flags, - progress_data); +out_write_raw_copy_resources: + /* Copy any compressed resources for which the raw data can be reused + * without decompression. */ + ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd, + &ctx.progress_data); + +out_destroy_context: + FREE(ctx.chunk_csizes); + if (ctx.compressor) + ctx.compressor->destroy(ctx.compressor); + return ret; } -#endif -/* Write a list of streams to a WIM (@out_fd) using the compression type - * @out_ctype, chunk size @out_chunk_size, and up to @num_threads compressor - * threads. */ + static int -write_stream_list(struct list_head *stream_list, - struct wim_lookup_table *lookup_table, - struct filedes *out_fd, int out_ctype, - u32 out_chunk_size, - struct wimlib_lzx_context **comp_ctx, - int write_flags, - unsigned num_threads, wimlib_progress_func_t progress_func) +write_file_data_blobs(WIMStruct *wim, + struct list_head *blob_list, + int write_flags, + unsigned num_threads, + struct filter_context *filter_ctx) +{ + int out_ctype; + u32 out_chunk_size; + int write_resource_flags; + + write_resource_flags = write_flags_to_resource_flags(write_flags); + + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + out_chunk_size = wim->out_solid_chunk_size; + out_ctype = wim->out_solid_compression_type; + } else { + out_chunk_size = wim->out_chunk_size; + out_ctype = wim->out_compression_type; + } + + return write_blob_list(blob_list, + &wim->out_fd, + write_resource_flags, + out_ctype, + out_chunk_size, + num_threads, + wim->blob_table, + filter_ctx, + wim->progfunc, + wim->progctx); +} + +/* Write the contents of the specified blob as a WIM resource. */ +static int +write_wim_resource(struct blob_descriptor *blob, + struct filedes *out_fd, + int out_ctype, + u32 out_chunk_size, + int write_resource_flags) +{ + LIST_HEAD(blob_list); + list_add(&blob->write_blobs_list, &blob_list); + blob->will_be_in_output_wim = 1; + return write_blob_list(&blob_list, + out_fd, + write_resource_flags & ~WRITE_RESOURCE_FLAG_SOLID, + out_ctype, + out_chunk_size, + 1, + NULL, + NULL, + NULL, + NULL); +} + +/* Write the contents of the specified buffer as a WIM resource. */ +int +write_wim_resource_from_buffer(const void *buf, + size_t buf_size, + bool is_metadata, + struct filedes *out_fd, + int out_ctype, + u32 out_chunk_size, + struct wim_reshdr *out_reshdr, + u8 *hash_ret, + int write_resource_flags) { int ret; - int write_resource_flags; - u64 total_bytes; - u64 total_compression_bytes; - unsigned total_parts; - WIMStruct *prev_wim_part; - size_t num_streams; - struct wim_lookup_table_entry *lte; - struct write_streams_progress_data progress_data; - - if (list_empty(stream_list)) { - DEBUG("No streams to write."); + struct blob_descriptor blob; + + if (unlikely(buf_size == 0)) { + zero_reshdr(out_reshdr); + if (hash_ret) + copy_hash(hash_ret, zero_hash); return 0; } - write_resource_flags = write_flags_to_resource_flags(write_flags); - - DEBUG("Writing stream list (offset = %"PRIu64", write_resource_flags=0x%08x)", - out_fd->offset, write_resource_flags); + blob_set_is_located_in_attached_buffer(&blob, (void *)buf, buf_size); + sha1_buffer(buf, buf_size, blob.hash); + blob.unhashed = 0; + blob.is_metadata = is_metadata; - /* Sort the stream list into a good order for reading. */ - ret = sort_stream_list_by_sequential_order(stream_list, - offsetof(struct wim_lookup_table_entry, - write_streams_list)); + ret = write_wim_resource(&blob, out_fd, out_ctype, out_chunk_size, + write_resource_flags); if (ret) return ret; - /* Calculate the total size of the streams to be written. Note: this - * will be the uncompressed size, as we may not know the compressed size - * yet, and also this will assume that every unhashed stream will be - * written (which will not necessarily be the case). */ - total_bytes = 0; - total_compression_bytes = 0; - num_streams = 0; - total_parts = 0; - prev_wim_part = NULL; - list_for_each_entry(lte, stream_list, write_streams_list) { - num_streams++; - total_bytes += lte->size; - if (must_compress_stream(lte, write_resource_flags, - out_ctype, out_chunk_size)) - total_compression_bytes += lte->size; - if (lte->resource_location == RESOURCE_IN_WIM) { - if (prev_wim_part != lte->rspec->wim) { - prev_wim_part = lte->rspec->wim; - total_parts++; - } - } - } + copy_reshdr(out_reshdr, &blob.out_reshdr); - memset(&progress_data, 0, sizeof(progress_data)); - progress_data.progress_func = progress_func; - - progress_data.progress.write_streams.total_bytes = total_bytes; - progress_data.progress.write_streams.total_streams = num_streams; - progress_data.progress.write_streams.completed_bytes = 0; - progress_data.progress.write_streams.completed_streams = 0; - progress_data.progress.write_streams.num_threads = num_threads; - progress_data.progress.write_streams.compression_type = out_ctype; - progress_data.progress.write_streams.total_parts = total_parts; - progress_data.progress.write_streams.completed_parts = 0; - - progress_data.next_progress = 0; - progress_data.prev_wim_part = NULL; - -#ifdef ENABLE_MULTITHREADED_COMPRESSION - if (total_compression_bytes >= 2000000 && num_threads != 1) - ret = write_stream_list_parallel(stream_list, - lookup_table, - out_fd, - out_ctype, - out_chunk_size, - comp_ctx, - write_resource_flags, - &progress_data, - num_threads); - else -#endif - ret = write_stream_list_serial(stream_list, - lookup_table, - out_fd, - out_ctype, - out_chunk_size, - comp_ctx, - write_resource_flags, - &progress_data); - if (ret == 0) - DEBUG("Successfully wrote stream list."); - else - DEBUG("Failed to write stream list (ret=%d).", ret); - return ret; + if (hash_ret) + copy_hash(hash_ret, blob.hash); + return 0; } -struct stream_size_table { +struct blob_size_table { struct hlist_head *array; size_t num_entries; size_t capacity; }; static int -init_stream_size_table(struct stream_size_table *tab, size_t capacity) +init_blob_size_table(struct blob_size_table *tab, size_t capacity) { tab->array = CALLOC(capacity, sizeof(tab->array[0])); - if (!tab->array) + if (tab->array == NULL) return WIMLIB_ERR_NOMEM; tab->num_entries = 0; tab->capacity = capacity; @@ -1836,151 +1736,151 @@ init_stream_size_table(struct stream_size_table *tab, size_t capacity) } static void -destroy_stream_size_table(struct stream_size_table *tab) +destroy_blob_size_table(struct blob_size_table *tab) { FREE(tab->array); } static int -stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab) +blob_size_table_insert(struct blob_descriptor *blob, void *_tab) { - struct stream_size_table *tab = _tab; + struct blob_size_table *tab = _tab; size_t pos; - struct wim_lookup_table_entry *same_size_lte; - struct hlist_node *tmp; - - pos = hash_u64(lte->size) % tab->capacity; - lte->unique_size = 1; - hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) { - if (same_size_lte->size == lte->size) { - lte->unique_size = 0; - same_size_lte->unique_size = 0; + struct blob_descriptor *same_size_blob; + + pos = hash_u64(blob->size) % tab->capacity; + blob->unique_size = 1; + hlist_for_each_entry(same_size_blob, &tab->array[pos], hash_list_2) { + if (same_size_blob->size == blob->size) { + blob->unique_size = 0; + same_size_blob->unique_size = 0; break; } } - hlist_add_head(<e->hash_list_2, &tab->array[pos]); + hlist_add_head(&blob->hash_list_2, &tab->array[pos]); tab->num_entries++; return 0; } -struct find_streams_ctx { +struct find_blobs_ctx { WIMStruct *wim; int write_flags; - struct list_head stream_list; - struct stream_size_table stream_size_tab; + struct list_head blob_list; + struct blob_size_table blob_size_tab; }; static void -lte_reference_for_logical_write(struct wim_lookup_table_entry *lte, - struct find_streams_ctx *ctx, - unsigned nref) +reference_blob_for_write(struct blob_descriptor *blob, + struct list_head *blob_list, u32 nref) { - if (lte->out_refcnt == 0) { - stream_size_table_insert(lte, &ctx->stream_size_tab); - list_add_tail(<e->write_streams_list, &ctx->stream_list); + if (!blob->will_be_in_output_wim) { + blob->out_refcnt = 0; + list_add_tail(&blob->write_blobs_list, blob_list); + blob->will_be_in_output_wim = 1; } - lte->out_refcnt += nref; + blob->out_refcnt += nref; } static int -do_lte_full_reference_for_logical_write(struct wim_lookup_table_entry *lte, - void *_ctx) +fully_reference_blob_for_write(struct blob_descriptor *blob, void *_blob_list) { - struct find_streams_ctx *ctx = _ctx; - lte->out_refcnt = 0; - lte_reference_for_logical_write(lte, ctx, - (lte->refcnt ? lte->refcnt : 1)); + struct list_head *blob_list = _blob_list; + blob->will_be_in_output_wim = 0; + reference_blob_for_write(blob, blob_list, blob->refcnt); return 0; } static int -inode_find_streams_to_write(struct wim_inode *inode, - struct wim_lookup_table *table, - struct find_streams_ctx *ctx) +inode_find_blobs_to_reference(const struct wim_inode *inode, + const struct blob_table *table, + struct list_head *blob_list) { - struct wim_lookup_table_entry *lte; - unsigned i; + wimlib_assert(inode->i_nlink > 0); - for (i = 0; i <= inode->i_num_ads; i++) { - lte = inode_stream_lte(inode, i, table); - if (lte) - lte_reference_for_logical_write(lte, ctx, inode->i_nlink); - else if (!is_zero_hash(inode_stream_hash(inode, i))) - return WIMLIB_ERR_RESOURCE_NOT_FOUND; + for (unsigned i = 0; i < inode->i_num_streams; i++) { + struct blob_descriptor *blob; + const u8 *hash; + + blob = stream_blob(&inode->i_streams[i], table); + if (blob) { + reference_blob_for_write(blob, blob_list, inode->i_nlink); + } else { + hash = stream_hash(&inode->i_streams[i]); + if (!is_zero_hash(hash)) + return blob_not_found_error(inode, hash); + } } return 0; } static int -image_find_streams_to_write(WIMStruct *wim) +do_blob_set_not_in_output_wim(struct blob_descriptor *blob, void *_ignore) +{ + blob->will_be_in_output_wim = 0; + return 0; +} + +static int +image_find_blobs_to_reference(WIMStruct *wim) { - struct find_streams_ctx *ctx; struct wim_image_metadata *imd; struct wim_inode *inode; - struct wim_lookup_table_entry *lte; + struct blob_descriptor *blob; + struct list_head *blob_list; int ret; - ctx = wim->private; imd = wim_get_current_image_metadata(wim); - image_for_each_unhashed_stream(lte, imd) - lte->out_refcnt = 0; + image_for_each_unhashed_blob(blob, imd) + blob->will_be_in_output_wim = 0; - /* Go through this image's inodes to find any streams that have not been - * found yet. */ + blob_list = wim->private; image_for_each_inode(inode, imd) { - ret = inode_find_streams_to_write(inode, wim->lookup_table, ctx); + ret = inode_find_blobs_to_reference(inode, + wim->blob_table, + blob_list); if (ret) return ret; } return 0; } -/* - * Build a list of streams (via `struct wim_lookup_table_entry's) included in - * the "logical write" of the WIM, meaning all streams that are referenced at - * least once by dentries in the the image(s) being written. 'out_refcnt' on - * each stream being included in the logical write is set to the number of - * references from dentries in the image(s). Furthermore, 'unique_size' on each - * stream being included in the logical write is set to indicate whether that - * stream has a unique size relative to the streams being included in the - * logical write. Still furthermore, 'part_number' on each stream being - * included in the logical write is set to the part number given in the - * in-memory header of @p wim. - * - * This is considered a "logical write" because it does not take into account - * filtering out streams already present in the WIM (in the case of an in place - * overwrite) or present in other WIMs (in case of creating delta WIM). - */ static int -prepare_logical_stream_list(WIMStruct *wim, int image, bool streams_ok, - struct find_streams_ctx *ctx) +prepare_unfiltered_list_of_blobs_in_output_wim(WIMStruct *wim, + int image, + int blobs_ok, + struct list_head *blob_list_ret) { int ret; - if (streams_ok && (image == WIMLIB_ALL_IMAGES || - (image == 1 && wim->hdr.image_count == 1))) + INIT_LIST_HEAD(blob_list_ret); + + if (blobs_ok && (image == WIMLIB_ALL_IMAGES || + (image == 1 && wim->hdr.image_count == 1))) { - /* Fast case: Assume that all streams are being written and - * that the reference counts are correct. */ - struct wim_lookup_table_entry *lte; + /* Fast case: Assume that all blobs are being written and that + * the reference counts are correct. */ + struct blob_descriptor *blob; struct wim_image_metadata *imd; unsigned i; - for_lookup_table_entry(wim->lookup_table, - do_lte_full_reference_for_logical_write, ctx); + for_blob_in_table(wim->blob_table, + fully_reference_blob_for_write, + blob_list_ret); + for (i = 0; i < wim->hdr.image_count; i++) { imd = wim->image_metadata[i]; - image_for_each_unhashed_stream(lte, imd) - do_lte_full_reference_for_logical_write(lte, ctx); + image_for_each_unhashed_blob(blob, imd) + fully_reference_blob_for_write(blob, blob_list_ret); } } else { /* Slow case: Walk through the images being written and - * determine the streams referenced. */ - for_lookup_table_entry(wim->lookup_table, lte_zero_out_refcnt, NULL); - wim->private = ctx; - ret = for_image(wim, image, image_find_streams_to_write); + * determine the blobs referenced. */ + for_blob_in_table(wim->blob_table, + do_blob_set_not_in_output_wim, NULL); + wim->private = blob_list_ret; + ret = for_image(wim, image, image_find_blobs_to_reference); if (ret) return ret; } @@ -1988,172 +1888,242 @@ prepare_logical_stream_list(WIMStruct *wim, int image, bool streams_ok, return 0; } +struct insert_other_if_hard_filtered_ctx { + struct blob_size_table *tab; + struct filter_context *filter_ctx; +}; + static int -process_filtered_stream(struct wim_lookup_table_entry *lte, void *_ctx) -{ - struct find_streams_ctx *ctx = _ctx; - u16 filtered = 0; - - /* Calculate and set lte->filtered. */ - if (lte->resource_location == RESOURCE_IN_WIM) { - if (lte->rspec->wim == ctx->wim && - (ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE)) - filtered |= FILTERED_SAME_WIM; - if (lte->rspec->wim != ctx->wim && - (ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS)) - filtered |= FILTERED_EXTERNAL_WIM; - } - lte->filtered = filtered; - - /* Filtered streams get inserted into the stream size table too, unless - * they already were. This is because streams that are checksummed - * on-the-fly during the write should not be written if they are - * duplicates of filtered stream. */ - if (lte->filtered && lte->out_refcnt == 0) - stream_size_table_insert(lte, &ctx->stream_size_tab); +insert_other_if_hard_filtered(struct blob_descriptor *blob, void *_ctx) +{ + struct insert_other_if_hard_filtered_ctx *ctx = _ctx; + + if (!blob->will_be_in_output_wim && + blob_hard_filtered(blob, ctx->filter_ctx)) + blob_size_table_insert(blob, ctx->tab); return 0; } static int -mark_stream_not_filtered(struct wim_lookup_table_entry *lte, void *_ignore) +determine_blob_size_uniquity(struct list_head *blob_list, + struct blob_table *lt, + struct filter_context *filter_ctx) { - lte->filtered = 0; + int ret; + struct blob_size_table tab; + struct blob_descriptor *blob; + + ret = init_blob_size_table(&tab, 9001); + if (ret) + return ret; + + if (may_hard_filter_blobs(filter_ctx)) { + struct insert_other_if_hard_filtered_ctx ctx = { + .tab = &tab, + .filter_ctx = filter_ctx, + }; + for_blob_in_table(lt, insert_other_if_hard_filtered, &ctx); + } + + list_for_each_entry(blob, blob_list, write_blobs_list) + blob_size_table_insert(blob, &tab); + + destroy_blob_size_table(&tab); return 0; } -/* Given the list of streams to include in a logical write of a WIM, handle - * filtering out streams already present in the WIM or already present in - * external WIMs, depending on the write flags provided. */ static void -handle_stream_filtering(struct find_streams_ctx *ctx) +filter_blob_list_for_write(struct list_head *blob_list, + struct filter_context *filter_ctx) { - struct wim_lookup_table_entry *lte, *tmp; + struct blob_descriptor *blob, *tmp; - if (!(ctx->write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE | - WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS))) - { - for_lookup_table_entry(ctx->wim->lookup_table, - mark_stream_not_filtered, ctx); - return; - } - - for_lookup_table_entry(ctx->wim->lookup_table, - process_filtered_stream, ctx); - - /* Streams in logical write list that were filtered can be removed. */ - list_for_each_entry_safe(lte, tmp, &ctx->stream_list, - write_streams_list) - if (lte->filtered) - list_del(<e->write_streams_list); -} - -/* Prepares list of streams to write for the specified WIM image(s). This wraps - * around prepare_logical_stream_list() to handle filtering out streams already - * present in the WIM or already present in external WIMs, depending on the - * write flags provided. - * - * Note: some additional data is stored in each `struct wim_lookup_table_entry': - * - * - 'out_refcnt' is set to the number of references found for the logical write. - * This will be nonzero on all streams in the list returned by this function, - * but will also be nonzero on streams not in the list that were included in - * the logical write list, but filtered out from the returned list. - * - 'filtered' is set to nonzero if the stream was filtered. Filtered streams - * are not included in the list of streams returned by this function. - * - 'unique_size' is set if the stream has a unique size among all streams in - * the logical write plus any filtered streams in the entire WIM that could - * potentially turn out to have the same checksum as a yet-to-be-checksummed - * stream being written. + list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) { + int status = blob_filtered(blob, filter_ctx); + + if (status == 0) { + /* Not filtered. */ + continue; + } else { + if (status > 0) { + /* Soft filtered. */ + } else { + /* Hard filtered. */ + blob->will_be_in_output_wim = 0; + list_del(&blob->blob_table_list); + } + list_del(&blob->write_blobs_list); + } + } +} + +/* + * prepare_blob_list_for_write() - + * + * Prepare the list of blobs to write for writing a WIM containing the specified + * image(s) with the specified write flags. + * + * @wim + * The WIMStruct on whose behalf the write is occurring. + * + * @image + * Image(s) from the WIM to write; may be WIMLIB_ALL_IMAGES. + * + * @write_flags + * WIMLIB_WRITE_FLAG_* flags for the write operation: + * + * STREAMS_OK: For writes of all images, assume that all blobs in the blob + * table of @wim and the per-image lists of unhashed blobs should be taken + * as-is, and image metadata should not be searched for references. This + * does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below. + * + * OVERWRITE: Blobs already present in @wim shall not be returned in + * @blob_list_ret. + * + * SKIP_EXTERNAL_WIMS: Blobs already present in a WIM file, but not @wim, + * shall be returned in neither @blob_list_ret nor @blob_table_list_ret. + * + * @blob_list_ret + * List of blobs, linked by write_blobs_list, that need to be written will + * be returned here. + * + * Note that this function assumes that unhashed blobs will be written; it + * does not take into account that they may become duplicates when actually + * hashed. + * + * @blob_table_list_ret + * List of blobs, linked by blob_table_list, that need to be included in + * the WIM's blob table will be returned here. This will be a superset of + * the blobs in @blob_list_ret. + * + * This list will be a proper superset of @blob_list_ret if and only if + * WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of + * the blobs that would otherwise need to be written were already located + * in the WIM file. + * + * All blobs in this list will have @out_refcnt set to the number of + * references to the blob in the output WIM. If + * WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt + * may be as low as 0. + * + * @filter_ctx_ret + * A context for queries of blob filter status with blob_filtered() is + * returned in this location. + * + * In addition, @will_be_in_output_wim will be set to 1 in all blobs inserted + * into @blob_table_list_ret and to 0 in all blobs in the blob table of @wim not + * inserted into @blob_table_list_ret. + * + * Still furthermore, @unique_size will be set to 1 on all blobs in + * @blob_list_ret that have unique size among all blobs in @blob_list_ret and + * among all blobs in the blob table of @wim that are ineligible for being + * written due to filtering. + * + * Returns 0 on success; nonzero on read error, memory allocation error, or + * otherwise. */ static int -prepare_stream_list(WIMStruct *wim, int image, int write_flags, - struct list_head *stream_list) +prepare_blob_list_for_write(WIMStruct *wim, int image, + int write_flags, + struct list_head *blob_list_ret, + struct list_head *blob_table_list_ret, + struct filter_context *filter_ctx_ret) { int ret; - bool streams_ok; - struct find_streams_ctx ctx; + struct blob_descriptor *blob; + + filter_ctx_ret->write_flags = write_flags; + filter_ctx_ret->wim = wim; - INIT_LIST_HEAD(&ctx.stream_list); - ret = init_stream_size_table(&ctx.stream_size_tab, - wim->lookup_table->capacity); + ret = prepare_unfiltered_list_of_blobs_in_output_wim( + wim, + image, + write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK, + blob_list_ret); if (ret) return ret; - ctx.write_flags = write_flags; - ctx.wim = wim; - streams_ok = ((write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK) != 0); + INIT_LIST_HEAD(blob_table_list_ret); + list_for_each_entry(blob, blob_list_ret, write_blobs_list) + list_add_tail(&blob->blob_table_list, blob_table_list_ret); - ret = prepare_logical_stream_list(wim, image, streams_ok, &ctx); + ret = determine_blob_size_uniquity(blob_list_ret, wim->blob_table, + filter_ctx_ret); if (ret) - goto out_destroy_table; + return ret; - handle_stream_filtering(&ctx); - list_transfer(&ctx.stream_list, stream_list); - ret = 0; -out_destroy_table: - destroy_stream_size_table(&ctx.stream_size_tab); - return ret; + if (may_filter_blobs(filter_ctx_ret)) + filter_blob_list_for_write(blob_list_ret, filter_ctx_ret); + + return 0; } static int -write_wim_streams(WIMStruct *wim, int image, int write_flags, - unsigned num_threads, - wimlib_progress_func_t progress_func, - struct list_head *stream_list_override) +write_file_data(WIMStruct *wim, int image, int write_flags, + unsigned num_threads, + struct list_head *blob_list_override, + struct list_head *blob_table_list_ret) { int ret; - struct list_head _stream_list; - struct list_head *stream_list; - struct wim_lookup_table_entry *lte; - - if (stream_list_override == NULL) { - /* Normal case: prepare stream list from image(s) being written. + struct list_head _blob_list; + struct list_head *blob_list; + struct blob_descriptor *blob; + struct filter_context _filter_ctx; + struct filter_context *filter_ctx; + + if (blob_list_override == NULL) { + /* Normal case: prepare blob list from image(s) being written. */ - stream_list = &_stream_list; - ret = prepare_stream_list(wim, image, write_flags, stream_list); + blob_list = &_blob_list; + filter_ctx = &_filter_ctx; + ret = prepare_blob_list_for_write(wim, image, write_flags, + blob_list, + blob_table_list_ret, + filter_ctx); if (ret) return ret; } else { /* Currently only as a result of wimlib_split() being called: - * use stream list already explicitly provided. Use existing + * use blob list already explicitly provided. Use existing * reference counts. */ - stream_list = stream_list_override; - list_for_each_entry(lte, stream_list, write_streams_list) - lte->out_refcnt = (lte->refcnt ? lte->refcnt : 1); + blob_list = blob_list_override; + filter_ctx = NULL; + INIT_LIST_HEAD(blob_table_list_ret); + list_for_each_entry(blob, blob_list, write_blobs_list) { + blob->out_refcnt = blob->refcnt; + blob->will_be_in_output_wim = 1; + blob->unique_size = 0; + list_add_tail(&blob->blob_table_list, blob_table_list_ret); + } } - return write_stream_list(stream_list, - wim->lookup_table, - &wim->out_fd, - wim->out_compression_type, - wim->out_chunk_size, - &wim->lzx_context, - write_flags, - num_threads, - progress_func); + return write_file_data_blobs(wim, + blob_list, + write_flags, + num_threads, + filter_ctx); } static int -write_wim_metadata_resources(WIMStruct *wim, int image, int write_flags, - wimlib_progress_func_t progress_func) +write_metadata_resources(WIMStruct *wim, int image, int write_flags) { int ret; int start_image; int end_image; int write_resource_flags; - if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA) { - DEBUG("Not writing any metadata resources."); + if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA) return 0; - } write_resource_flags = write_flags_to_resource_flags(write_flags); - DEBUG("Writing metadata resources (offset=%"PRIu64")", - wim->out_fd.offset); + write_resource_flags &= ~WRITE_RESOURCE_FLAG_SOLID; - if (progress_func) - progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL); + ret = call_progress(wim->progfunc, + WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, + NULL, wim->progctx); + if (ret) + return ret; if (image == WIMLIB_ALL_IMAGES) { start_image = 1; @@ -2171,42 +2141,31 @@ write_wim_metadata_resources(WIMStruct *wim, int image, int write_flags, * the original (or was newly added). Otherwise just copy the * existing one. */ if (imd->modified) { - DEBUG("Image %u was modified; building and writing new " - "metadata resource", i); ret = write_metadata_resource(wim, i, write_resource_flags); } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) { - DEBUG("Image %u was not modified; re-using existing " - "metadata resource.", i); - wim_res_spec_to_hdr(imd->metadata_lte->rspec, - &imd->metadata_lte->out_reshdr); + blob_set_out_reshdr_for_reuse(imd->metadata_blob); ret = 0; } else { - DEBUG("Image %u was not modified; copying existing " - "metadata resource.", i); - ret = write_wim_resource(imd->metadata_lte, + ret = write_wim_resource(imd->metadata_blob, &wim->out_fd, wim->out_compression_type, wim->out_chunk_size, - &imd->metadata_lte->out_reshdr, - write_resource_flags, - &wim->lzx_context); + write_resource_flags); } if (ret) return ret; } - if (progress_func) - progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL); - return 0; + + return call_progress(wim->progfunc, + WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, + NULL, wim->progctx); } static int open_wim_writable(WIMStruct *wim, const tchar *path, int open_flags) { - int raw_fd; - DEBUG("Opening \"%"TS"\" for writing.", path); - - raw_fd = topen(path, open_flags | O_BINARY, 0644); + int raw_fd = topen(path, open_flags | O_BINARY, 0644); if (raw_fd < 0) { ERROR_WITH_ERRNO("Failed to open \"%"TS"\" for writing", path); return WIMLIB_ERR_OPEN; @@ -2220,75 +2179,112 @@ close_wim_writable(WIMStruct *wim, int write_flags) { int ret = 0; - if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)) { - DEBUG("Closing WIM file."); + if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)) if (filedes_valid(&wim->out_fd)) if (filedes_close(&wim->out_fd)) ret = WIMLIB_ERR_WRITE; - } filedes_invalidate(&wim->out_fd); return ret; } +static int +cmp_blobs_by_out_rdesc(const void *p1, const void *p2) +{ + const struct blob_descriptor *blob1, *blob2; + + blob1 = *(const struct blob_descriptor**)p1; + blob2 = *(const struct blob_descriptor**)p2; + + if (blob1->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) { + if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) { + if (blob1->out_res_offset_in_wim != blob2->out_res_offset_in_wim) + return cmp_u64(blob1->out_res_offset_in_wim, + blob2->out_res_offset_in_wim); + } else { + return 1; + } + } else { + if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) + return -1; + } + return cmp_u64(blob1->out_reshdr.offset_in_wim, + blob2->out_reshdr.offset_in_wim); +} + +static int +write_blob_table(WIMStruct *wim, int image, int write_flags, + struct list_head *blob_table_list) +{ + int ret; + + /* Set output resource metadata for blobs already present in WIM. */ + if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) { + struct blob_descriptor *blob; + list_for_each_entry(blob, blob_table_list, blob_table_list) { + if (blob->blob_location == BLOB_IN_WIM && + blob->rdesc->wim == wim) + { + blob_set_out_reshdr_for_reuse(blob); + } + } + } + + ret = sort_blob_list(blob_table_list, + offsetof(struct blob_descriptor, blob_table_list), + cmp_blobs_by_out_rdesc); + if (ret) + return ret; + + /* Add entries for metadata resources. */ + if (!(write_flags & WIMLIB_WRITE_FLAG_NO_METADATA)) { + int start_image; + int end_image; + + if (image == WIMLIB_ALL_IMAGES) { + start_image = 1; + end_image = wim->hdr.image_count; + } else { + start_image = image; + end_image = image; + } + + /* Push metadata blob table entries onto the front of the list + * in reverse order, so that they're written in order. + */ + for (int i = end_image; i >= start_image; i--) { + struct blob_descriptor *metadata_blob; + + metadata_blob = wim->image_metadata[i - 1]->metadata_blob; + wimlib_assert(metadata_blob->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA); + metadata_blob->out_refcnt = 1; + list_add(&metadata_blob->blob_table_list, blob_table_list); + } + } + + return write_blob_table_from_blob_list(blob_table_list, + &wim->out_fd, + wim->out_hdr.part_number, + &wim->out_hdr.blob_table_reshdr, + write_flags_to_resource_flags(write_flags)); +} + /* - * finish_write(): - * - * Finish writing a WIM file: write the lookup table, xml data, and integrity - * table, then overwrite the WIM header. By default, closes the WIM file - * descriptor (@wim->out_fd) if successful. - * - * write_flags is a bitwise OR of the following: - * - * (public) WIMLIB_WRITE_FLAG_CHECK_INTEGRITY: - * Include an integrity table. - * - * (public) WIMLIB_WRITE_FLAG_FSYNC: - * fsync() the output file before closing it. - * - * (public) WIMLIB_WRITE_FLAG_PIPABLE: - * Writing a pipable WIM, possibly to a pipe; include pipable WIM - * stream headers before the lookup table and XML data, and also - * write the WIM header at the end instead of seeking to the - * beginning. Can't be combined with - * WIMLIB_WRITE_FLAG_CHECK_INTEGRITY. - * - * (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE: - * Don't write the lookup table. - * - * (private) WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE: - * When (if) writing the integrity table, re-use entries from the - * existing integrity table, if possible. - * - * (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML: - * After writing the XML data but before writing the integrity - * table, write a temporary WIM header and flush the stream so that - * the WIM is less likely to become corrupted upon abrupt program - * termination. - * (private) WIMLIB_WRITE_FLAG_HEADER_AT_END: - * Instead of overwriting the WIM header at the beginning of the - * file, simply append it to the end of the file. (Used when - * writing to pipe.) - * (private) WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR: - * Do not close the file descriptor @wim->out_fd on either success - * on failure. - * (private) WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES: - * Use the existing stored in the in-memory XML - * information, rather than setting it to the offset of the XML - * data being written. + * Finish writing a WIM file: write the blob table, xml data, and integrity + * table, then overwrite the WIM header. + * + * The output file descriptor is closed on success, except when writing to a + * user-specified file descriptor (WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR set). */ static int finish_write(WIMStruct *wim, int image, int write_flags, - wimlib_progress_func_t progress_func, - struct list_head *stream_list_override) + struct list_head *blob_table_list) { - int ret; - off_t hdr_offset; int write_resource_flags; - off_t old_lookup_table_end; - off_t new_lookup_table_end; + off_t old_blob_table_end = 0; + struct integrity_table *old_integrity_table = NULL; + off_t new_blob_table_end; u64 xml_totalbytes; - - DEBUG("image=%d, write_flags=%08x", image, write_flags); + int ret; write_resource_flags = write_flags_to_resource_flags(write_flags); @@ -2296,24 +2292,44 @@ finish_write(WIMStruct *wim, int image, int write_flags, * metadata resource labeled as the "boot metadata". This entry should * be zeroed out if there is no bootable image (boot_idx 0). Otherwise, * it should be a copy of the resource entry for the image that is - * marked as bootable. This is not well documented... */ - if (wim->hdr.boot_idx == 0) { - zero_reshdr(&wim->hdr.boot_metadata_reshdr); + * marked as bootable. */ + if (wim->out_hdr.boot_idx == 0) { + zero_reshdr(&wim->out_hdr.boot_metadata_reshdr); } else { - copy_reshdr(&wim->hdr.boot_metadata_reshdr, - &wim->image_metadata[wim->hdr.boot_idx- 1 - ]->metadata_lte->out_reshdr); - } - - /* Write lookup table. (Save old position first.) */ - old_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim + - wim->hdr.lookup_table_reshdr.size_in_wim; - if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { - ret = write_wim_lookup_table(wim, image, write_flags, - &wim->hdr.lookup_table_reshdr, - stream_list_override); - if (ret) + copy_reshdr(&wim->out_hdr.boot_metadata_reshdr, + &wim->image_metadata[ + wim->out_hdr.boot_idx - 1]->metadata_blob->out_reshdr); + } + + /* If overwriting the WIM file containing an integrity table in-place, + * we'd like to re-use the information in the old integrity table + * instead of recalculating it. But we might overwrite the old + * integrity table when we expand the XML data. Read it into memory + * just in case. */ + if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE | + WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) == + (WIMLIB_WRITE_FLAG_OVERWRITE | + WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) + && wim_has_integrity_table(wim)) + { + old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim + + wim->hdr.blob_table_reshdr.size_in_wim; + (void)read_integrity_table(wim, + old_blob_table_end - WIM_HEADER_DISK_SIZE, + &old_integrity_table); + /* If we couldn't read the old integrity table, we can still + * re-calculate the full integrity table ourselves. Hence the + * ignoring of the return value. */ + } + + /* Write blob table if needed. */ + if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) { + ret = write_blob_table(wim, image, write_flags, + blob_table_list); + if (ret) { + free_integrity_table(old_integrity_table); return ret; + } } /* Write XML data. */ @@ -2321,51 +2337,55 @@ finish_write(WIMStruct *wim, int image, int write_flags, if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES) xml_totalbytes = WIM_TOTALBYTES_USE_EXISTING; ret = write_wim_xml_data(wim, image, xml_totalbytes, - &wim->hdr.xml_data_reshdr, + &wim->out_hdr.xml_data_reshdr, write_resource_flags); - if (ret) + if (ret) { + free_integrity_table(old_integrity_table); return ret; + } - /* Write integrity table (optional). */ + /* Write integrity table if needed. */ if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) { - if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) { + if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS) { + /* The XML data we wrote may have overwritten part of + * the old integrity table, so while calculating the new + * integrity table we should temporarily update the WIM + * header to remove the integrity table reference. */ struct wim_header checkpoint_hdr; - memcpy(&checkpoint_hdr, &wim->hdr, sizeof(struct wim_header)); + memcpy(&checkpoint_hdr, &wim->out_hdr, sizeof(struct wim_header)); zero_reshdr(&checkpoint_hdr.integrity_table_reshdr); checkpoint_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS; - ret = write_wim_header_at_offset(&checkpoint_hdr, - &wim->out_fd, 0); - if (ret) + ret = write_wim_header(&checkpoint_hdr, &wim->out_fd, 0); + if (ret) { + free_integrity_table(old_integrity_table); return ret; + } } - if (!(write_flags & WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE)) - old_lookup_table_end = 0; - - new_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim + - wim->hdr.lookup_table_reshdr.size_in_wim; + new_blob_table_end = wim->out_hdr.blob_table_reshdr.offset_in_wim + + wim->out_hdr.blob_table_reshdr.size_in_wim; ret = write_integrity_table(wim, - new_lookup_table_end, - old_lookup_table_end, - progress_func); + new_blob_table_end, + old_blob_table_end, + old_integrity_table); + free_integrity_table(old_integrity_table); if (ret) return ret; } else { /* No integrity table. */ - zero_reshdr(&wim->hdr.integrity_table_reshdr); + zero_reshdr(&wim->out_hdr.integrity_table_reshdr); } /* Now that all information in the WIM header has been determined, the * preliminary header written earlier can be overwritten, the header of * the existing WIM file can be overwritten, or the final header can be * written to the end of the pipable WIM. */ - wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS; - hdr_offset = 0; - if (write_flags & WIMLIB_WRITE_FLAG_HEADER_AT_END) - hdr_offset = wim->out_fd.offset; - DEBUG("Writing new header @ %"PRIu64".", hdr_offset); - ret = write_wim_header_at_offset(&wim->hdr, &wim->out_fd, hdr_offset); + wim->out_hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS; + if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE) + ret = write_wim_header(&wim->out_hdr, &wim->out_fd, wim->out_fd.offset); + else + ret = write_wim_header(&wim->out_hdr, &wim->out_fd, 0); if (ret) return ret; @@ -2376,7 +2396,6 @@ finish_write(WIMStruct *wim, int image, int write_flags, * operation has been written to disk, but the new file data has not. */ if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) { - DEBUG("Syncing WIM file."); if (fsync(wim->out_fd.fd)) { ERROR_WITH_ERRNO("Error syncing data to WIM file"); return WIMLIB_ERR_WRITE; @@ -2392,28 +2411,30 @@ finish_write(WIMStruct *wim, int image, int write_flags, } #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK) + +/* Set advisory lock on WIM file (if not already done so) */ int -lock_wim(WIMStruct *wim, int fd) +lock_wim_for_append(WIMStruct *wim) { - int ret = 0; - if (fd != -1 && !wim->wim_locked) { - ret = flock(fd, LOCK_EX | LOCK_NB); - if (ret != 0) { - if (errno == EWOULDBLOCK) { - ERROR("`%"TS"' is already being modified or has been " - "mounted read-write\n" - " by another process!", wim->filename); - ret = WIMLIB_ERR_ALREADY_LOCKED; - } else { - WARNING_WITH_ERRNO("Failed to lock `%"TS"'", - wim->filename); - ret = 0; - } - } else { - wim->wim_locked = 1; - } + if (wim->locked_for_append) + return 0; + if (!flock(wim->in_fd.fd, LOCK_EX | LOCK_NB)) { + wim->locked_for_append = 1; + return 0; + } + if (errno != EWOULDBLOCK) + return 0; + return WIMLIB_ERR_ALREADY_LOCKED; +} + +/* Remove advisory lock on WIM file (if present) */ +void +unlock_wim_for_append(WIMStruct *wim) +{ + if (wim->locked_for_append) { + flock(wim->in_fd.fd, LOCK_UN); + wim->locked_for_append = 0; } - return ret; } #endif @@ -2433,9 +2454,9 @@ lock_wim(WIMStruct *wim, int fd) * stops other software from trying to read the file as a normal WIM. * * - The header at the beginning of the file does not contain all the normal - * information; in particular it will have all 0's for the lookup table and - * XML data resource entries. This is because this information cannot be - * determined until the lookup table and XML data have been written. + * information; in particular it will have all 0's for the blob table and XML + * data resource entries. This is because this information cannot be + * determined until the blob table and XML data have been written. * Consequently, wimlib will write the full header at the very end of the * file. The header at the end, however, is only used when reading the WIM * from a seekable file (not a pipe). @@ -2445,14 +2466,17 @@ lock_wim(WIMStruct *wim, int fd) * reading the WIM from a pipe. This copy of the XML data is ignored if the * WIM is read from a seekable file (not a pipe). * - * - The format of resources, or streams, has been modified to allow them to be - * used before the "lookup table" has been read. Each stream is prefixed with - * a `struct pwm_stream_hdr' that is basically an abbreviated form of `struct - * wim_lookup_table_entry_disk' that only contains the SHA1 message digest, - * uncompressed stream size, and flags that indicate whether the stream is - * compressed. The data of uncompressed streams then follows literally, while - * the data of compressed streams follows in a modified format. Compressed - * streams do not begin with a chunk table, since the chunk table cannot be + * - Solid resources are not allowed. Each blob is always stored in its own + * resource. + * + * - The format of resources, or blobs, has been modified to allow them to be + * used before the "blob table" has been read. Each blob is prefixed with a + * `struct pwm_blob_hdr' that is basically an abbreviated form of `struct + * blob_descriptor_disk' that only contains the SHA-1 message digest, + * uncompressed blob size, and flags that indicate whether the blob is + * compressed. The data of uncompressed blobs then follows literally, while + * the data of compressed blobs follows in a modified format. Compressed + * blobs do not begin with a chunk table, since the chunk table cannot be * written until all chunks have been compressed. Instead, each compressed * chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size. * Furthermore, the chunk table is written at the end of the resource instead @@ -2460,29 +2484,29 @@ lock_wim(WIMStruct *wim, int fd) * `struct pwm_chunk_hdr's were not present; also, the chunk table is only * used if the WIM is being read from a seekable file (not a pipe). * - * - Metadata resources always come before other file resources (streams). - * (This does not by itself constitute an incompatibility with normal WIMs, - * since this is valid in normal WIMs.) + * - Metadata blobs always come before non-metadata blobs. (This does not by + * itself constitute an incompatibility with normal WIMs, since this is valid + * in normal WIMs.) * - * - At least up to the end of the file resources, all components must be packed - * as tightly as possible; there cannot be any "holes" in the WIM. (This does + * - At least up to the end of the blobs, all components must be packed as + * tightly as possible; there cannot be any "holes" in the WIM. (This does * not by itself consititute an incompatibility with normal WIMs, since this * is valid in normal WIMs.) * - * Note: the lookup table, XML data, and header at the end are not used when + * Note: the blob table, XML data, and header at the end are not used when * applying from a pipe. They exist to support functionality such as image * application and export when the WIM is *not* read from a pipe. * * Layout of pipable WIM: * * ---------+----------+--------------------+----------------+--------------+-----------+--------+ - * | Header | XML data | Metadata resources | File resources | Lookup table | XML data | Header | + * | Header | XML data | Metadata resources | File resources | Blob table | XML data | Header | * ---------+----------+--------------------+----------------+--------------+-----------+--------+ * * Layout of normal WIM: * * +--------+-----------------------------+-------------------------+ - * | Header | File and metadata resources | Lookup table | XML data | + * | Header | File and metadata resources | Blob table | XML data | * +--------+-----------------------------+-------------------------+ * * An optional integrity table can follow the final XML data in both normal and @@ -2497,54 +2521,63 @@ lock_wim(WIMStruct *wim, int fd) */ static int write_pipable_wim(WIMStruct *wim, int image, int write_flags, - unsigned num_threads, wimlib_progress_func_t progress_func, - struct list_head *stream_list_override) + unsigned num_threads, + struct list_head *blob_list_override, + struct list_head *blob_table_list_ret) { int ret; struct wim_reshdr xml_reshdr; WARNING("Creating a pipable WIM, which will " "be incompatible\n" - " with Microsoft's software (wimgapi/imagex/Dism)."); + " with Microsoft's software (WIMGAPI/ImageX/DISM)."); /* At this point, the header at the beginning of the file has already * been written. */ /* For efficiency, when wimlib adds an image to the WIM with - * wimlib_add_image(), the SHA1 message digests of files is not + * wimlib_add_image(), the SHA-1 message digests of files are not * calculated; instead, they are calculated while the files are being * written. However, this does not work when writing a pipable WIM, - * since when writing a stream to a pipable WIM, its SHA1 message digest - * needs to be known before the stream data is written. Therefore, - * before getting much farther, we need to pre-calculate the SHA1 - * message digests of all streams that will be written. */ - ret = wim_checksum_unhashed_streams(wim); + * since when writing a blob to a pipable WIM, its SHA-1 message digest + * needs to be known before the blob data is written. Therefore, before + * getting much farther, we need to pre-calculate the SHA-1 message + * digests of all blobs that will be written. */ + ret = wim_checksum_unhashed_blobs(wim); if (ret) return ret; /* Write extra copy of the XML data. */ ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT, - &xml_reshdr, - WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE); + &xml_reshdr, WRITE_RESOURCE_FLAG_PIPABLE); if (ret) return ret; /* Write metadata resources for the image(s) being included in the * output WIM. */ - ret = write_wim_metadata_resources(wim, image, write_flags, - progress_func); + ret = write_metadata_resources(wim, image, write_flags); if (ret) return ret; - /* Write streams needed for the image(s) being included in the output - * WIM, or streams needed for the split WIM part. */ - return write_wim_streams(wim, image, write_flags, num_threads, - progress_func, stream_list_override); + /* Write file data needed for the image(s) being included in the output + * WIM, or file data needed for the split WIM part. */ + return write_file_data(wim, image, write_flags, + num_threads, blob_list_override, + blob_table_list_ret); - /* The lookup table, XML data, and header at end are handled by + /* The blob table, XML data, and header at end are handled by * finish_write(). */ } +static bool +should_default_to_solid_compression(WIMStruct *wim, int write_flags) +{ + return wim->out_hdr.wim_version == WIM_VERSION_SOLID && + !(write_flags & (WIMLIB_WRITE_FLAG_SOLID | + WIMLIB_WRITE_FLAG_PIPABLE)) && + wim_has_solid_resources(wim); +} + /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file * descriptor. */ int @@ -2553,57 +2586,13 @@ write_wim_part(WIMStruct *wim, int image, int write_flags, unsigned num_threads, - wimlib_progress_func_t progress_func, unsigned part_number, unsigned total_parts, - struct list_head *stream_list_override, + struct list_head *blob_list_override, const u8 *guid) { int ret; - struct wim_header hdr_save; - struct list_head lt_stream_list_override; - - if (total_parts == 1) - DEBUG("Writing standalone WIM."); - else - DEBUG("Writing split WIM part %u/%u", part_number, total_parts); - if (image == WIMLIB_ALL_IMAGES) - DEBUG("Including all images."); - else - DEBUG("Including image %d only.", image); - if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) - DEBUG("File descriptor: %d", *(const int*)path_or_fd); - else - DEBUG("Path: \"%"TS"\"", (const tchar*)path_or_fd); - DEBUG("Write flags: 0x%08x", write_flags); - if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) - DEBUG("\tCHECK_INTEGRITY"); - if (write_flags & WIMLIB_WRITE_FLAG_REBUILD) - DEBUG("\tREBUILD"); - if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) - DEBUG("\tRECOMPRESS"); - if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) - DEBUG("\tFSYNC"); - if (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE) - DEBUG("\tFSYNC"); - if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG) - DEBUG("\tIGNORE_READONLY_FLAG"); - if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE) - DEBUG("\tPIPABLE"); - if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) - DEBUG("\tFILE_DESCRIPTOR"); - if (write_flags & WIMLIB_WRITE_FLAG_NO_METADATA) - DEBUG("\tNO_METADATA"); - if (write_flags & WIMLIB_WRITE_FLAG_USE_EXISTING_TOTALBYTES) - DEBUG("\tUSE_EXISTING_TOTALBYTES"); - if (num_threads == 0) - DEBUG("Number of threads: autodetect"); - else - DEBUG("Number of threads: %u", num_threads); - DEBUG("Progress function: %s", (progress_func ? "yes" : "no")); - DEBUG("Stream list: %s", (stream_list_override ? "specified" : "autodetect")); - DEBUG("GUID: %s", ((guid || wim->guid_set_explicitly) ? - "specified" : "generate new")); + struct list_head blob_table_list; /* Internally, this is always called with a valid part number and total * parts. */ @@ -2635,196 +2624,203 @@ write_wim_part(WIMStruct *wim, WIMLIB_WRITE_FLAG_NOT_PIPABLE)) return WIMLIB_ERR_INVALID_PARAM; - /* Save previous header, then start initializing the new one. */ - memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header)); + /* Include an integrity table by default if no preference was given and + * the WIM already had an integrity table. */ + if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY | + WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))) { + if (wim_has_integrity_table(wim)) + write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY; + } - /* Set default integrity and pipable flags. */ + /* Write a pipable WIM by default if no preference was given and the WIM + * was already pipable. */ if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE | - WIMLIB_WRITE_FLAG_NOT_PIPABLE))) + WIMLIB_WRITE_FLAG_NOT_PIPABLE))) { if (wim_is_pipable(wim)) write_flags |= WIMLIB_WRITE_FLAG_PIPABLE; + } - if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY | - WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))) - if (wim_has_integrity_table(wim)) - write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY; + if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE | + WIMLIB_WRITE_FLAG_SOLID)) + == (WIMLIB_WRITE_FLAG_PIPABLE | + WIMLIB_WRITE_FLAG_SOLID)) + { + ERROR("Solid compression is unsupported in pipable WIMs"); + return WIMLIB_ERR_INVALID_PARAM; + } - /* Set appropriate magic number. */ + /* Start initializing the new file header. */ + memset(&wim->out_hdr, 0, sizeof(wim->out_hdr)); + + /* Set the magic number. */ if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE) - wim->hdr.magic = PWM_MAGIC; + wim->out_hdr.magic = PWM_MAGIC; else - wim->hdr.magic = WIM_MAGIC; - - /* Clear header flags that will be set automatically. */ - wim->hdr.flags &= ~(WIM_HDR_FLAG_METADATA_ONLY | - WIM_HDR_FLAG_RESOURCE_ONLY | - WIM_HDR_FLAG_SPANNED | - WIM_HDR_FLAG_WRITE_IN_PROGRESS); + wim->out_hdr.magic = WIM_MAGIC; - /* Set SPANNED header flag if writing part of a split WIM. */ - if (total_parts != 1) - wim->hdr.flags |= WIM_HDR_FLAG_SPANNED; + /* Set the version number. */ + if ((write_flags & WIMLIB_WRITE_FLAG_SOLID) || + wim->out_compression_type == WIMLIB_COMPRESSION_TYPE_LZMS) + wim->out_hdr.wim_version = WIM_VERSION_SOLID; + else + wim->out_hdr.wim_version = WIM_VERSION_DEFAULT; - /* Set part number and total parts of split WIM. This will be 1 and 1 - * if the WIM is standalone. */ - wim->hdr.part_number = part_number; - wim->hdr.total_parts = total_parts; + /* Default to solid compression if it is valid in the chosen WIM file + * format and the WIMStruct references any solid resources. This is + * useful when exporting an image from a solid WIM. */ + if (should_default_to_solid_compression(wim, write_flags)) + write_flags |= WIMLIB_WRITE_FLAG_SOLID; - /* Set compression type if different. */ - if (wim->compression_type != wim->out_compression_type) { - ret = set_wim_hdr_cflags(wim->out_compression_type, &wim->hdr); - wimlib_assert(ret == 0); + /* Set the header flags. */ + wim->out_hdr.flags = (wim->hdr.flags & (WIM_HDR_FLAG_RP_FIX | + WIM_HDR_FLAG_READONLY)); + if (total_parts != 1) + wim->out_hdr.flags |= WIM_HDR_FLAG_SPANNED; + if (wim->out_compression_type != WIMLIB_COMPRESSION_TYPE_NONE) { + wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESSION; + switch (wim->out_compression_type) { + case WIMLIB_COMPRESSION_TYPE_XPRESS: + wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_XPRESS; + break; + case WIMLIB_COMPRESSION_TYPE_LZX: + wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_LZX; + break; + case WIMLIB_COMPRESSION_TYPE_LZMS: + wim->out_hdr.flags |= WIM_HDR_FLAG_COMPRESS_LZMS; + break; + } } - /* Set chunk size if different. */ - wim->hdr.chunk_size = wim->out_chunk_size; + /* Set the chunk size. */ + wim->out_hdr.chunk_size = wim->out_chunk_size; - /* Use GUID if specified; otherwise generate a new one. */ + /* Set the GUID. */ + if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID) + guid = wim->hdr.guid; if (guid) - memcpy(wim->hdr.guid, guid, WIMLIB_GUID_LEN); - else if (!wim->guid_set_explicitly) - randomize_byte_array(wim->hdr.guid, WIMLIB_GUID_LEN); - - /* Clear references to resources that have not been written yet. */ - zero_reshdr(&wim->hdr.lookup_table_reshdr); - zero_reshdr(&wim->hdr.xml_data_reshdr); - zero_reshdr(&wim->hdr.boot_metadata_reshdr); - zero_reshdr(&wim->hdr.integrity_table_reshdr); - - /* Set image count and boot index correctly for single image writes. */ - if (image != WIMLIB_ALL_IMAGES) { - wim->hdr.image_count = 1; - if (wim->hdr.boot_idx == image) - wim->hdr.boot_idx = 1; - else - wim->hdr.boot_idx = 0; - } - - /* Split WIMs can't be bootable. */ - if (total_parts != 1) - wim->hdr.boot_idx = 0; + copy_guid(wim->out_hdr.guid, guid); + else + generate_guid(wim->out_hdr.guid); + + /* Set the part number and total parts. */ + wim->out_hdr.part_number = part_number; + wim->out_hdr.total_parts = total_parts; + + /* Set the image count. */ + if (image == WIMLIB_ALL_IMAGES) + wim->out_hdr.image_count = wim->hdr.image_count; + else + wim->out_hdr.image_count = 1; + + /* Set the boot index. */ + wim->out_hdr.boot_idx = 0; + if (total_parts == 1) { + if (image == WIMLIB_ALL_IMAGES) + wim->out_hdr.boot_idx = wim->hdr.boot_idx; + else if (image == wim->hdr.boot_idx) + wim->out_hdr.boot_idx = 1; + } - /* Initialize output file descriptor. */ + /* Set up the output file descriptor. */ if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) { - /* File descriptor was explicitly provided. Return error if - * file descriptor is not seekable, unless writing a pipable WIM - * was requested. */ - wim->out_fd.fd = *(const int*)path_or_fd; - wim->out_fd.offset = 0; + /* File descriptor was explicitly provided. */ + filedes_init(&wim->out_fd, *(const int *)path_or_fd); if (!filedes_is_seekable(&wim->out_fd)) { + /* The file descriptor is a pipe. */ ret = WIMLIB_ERR_INVALID_PARAM; if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) - goto out_restore_hdr; + goto out_cleanup; if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) { ERROR("Can't include integrity check when " "writing pipable WIM to pipe!"); - goto out_restore_hdr; + goto out_cleanup; } } - } else { /* Filename of WIM to write was provided; open file descriptor * to it. */ ret = open_wim_writable(wim, (const tchar*)path_or_fd, O_TRUNC | O_CREAT | O_RDWR); if (ret) - goto out_restore_hdr; + goto out_cleanup; } /* Write initial header. This is merely a "dummy" header since it - * doesn't have all the information yet, so it will be overwritten later - * (unless writing a pipable WIM). */ + * doesn't have resource entries filled in yet, so it will be + * overwritten later (unless writing a pipable WIM). */ if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) - wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS; - ret = write_wim_header(&wim->hdr, &wim->out_fd); - wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS; + wim->out_hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS; + ret = write_wim_header(&wim->out_hdr, &wim->out_fd, wim->out_fd.offset); + wim->out_hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS; if (ret) - goto out_restore_hdr; - - if (stream_list_override) { - struct wim_lookup_table_entry *lte; - INIT_LIST_HEAD(<_stream_list_override); - list_for_each_entry(lte, stream_list_override, - write_streams_list) - { - list_add_tail(<e->lookup_table_list, - <_stream_list_override); - } - } + goto out_cleanup; - /* Write metadata resources and streams. */ + /* Write file data and metadata resources. */ if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) { /* Default case: create a normal (non-pipable) WIM. */ - ret = write_wim_streams(wim, image, write_flags, num_threads, - progress_func, stream_list_override); + ret = write_file_data(wim, image, write_flags, + num_threads, + blob_list_override, + &blob_table_list); if (ret) - goto out_restore_hdr; + goto out_cleanup; - ret = write_wim_metadata_resources(wim, image, write_flags, - progress_func); + ret = write_metadata_resources(wim, image, write_flags); if (ret) - goto out_restore_hdr; + goto out_cleanup; } else { /* Non-default case: create pipable WIM. */ ret = write_pipable_wim(wim, image, write_flags, num_threads, - progress_func, stream_list_override); + blob_list_override, + &blob_table_list); if (ret) - goto out_restore_hdr; - write_flags |= WIMLIB_WRITE_FLAG_HEADER_AT_END; + goto out_cleanup; } - if (stream_list_override) - stream_list_override = <_stream_list_override; - - /* Write lookup table, XML data, and (optional) integrity table. */ - ret = finish_write(wim, image, write_flags, progress_func, - stream_list_override); -out_restore_hdr: - memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header)); + /* Write blob table, XML data, and (optional) integrity table. */ + ret = finish_write(wim, image, write_flags, &blob_table_list); +out_cleanup: (void)close_wim_writable(wim, write_flags); - DEBUG("ret=%d", ret); return ret; } /* Write a standalone WIM to a file or file descriptor. */ static int write_standalone_wim(WIMStruct *wim, const void *path_or_fd, - int image, int write_flags, unsigned num_threads, - wimlib_progress_func_t progress_func) + int image, int write_flags, unsigned num_threads) { return write_wim_part(wim, path_or_fd, image, write_flags, - num_threads, progress_func, 1, 1, NULL, NULL); + num_threads, 1, 1, NULL, NULL); } /* API function documented in wimlib.h */ WIMLIBAPI int wimlib_write(WIMStruct *wim, const tchar *path, - int image, int write_flags, unsigned num_threads, - wimlib_progress_func_t progress_func) + int image, int write_flags, unsigned num_threads) { - if (!path) + if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC) return WIMLIB_ERR_INVALID_PARAM; - write_flags &= WIMLIB_WRITE_MASK_PUBLIC; + if (path == NULL || path[0] == T('\0')) + return WIMLIB_ERR_INVALID_PARAM; - return write_standalone_wim(wim, path, image, write_flags, - num_threads, progress_func); + return write_standalone_wim(wim, path, image, write_flags, num_threads); } /* API function documented in wimlib.h */ WIMLIBAPI int wimlib_write_to_fd(WIMStruct *wim, int fd, - int image, int write_flags, unsigned num_threads, - wimlib_progress_func_t progress_func) + int image, int write_flags, unsigned num_threads) { + if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC) + return WIMLIB_ERR_INVALID_PARAM; + if (fd < 0) return WIMLIB_ERR_INVALID_PARAM; - write_flags &= WIMLIB_WRITE_MASK_PUBLIC; write_flags |= WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR; - return write_standalone_wim(wim, &fd, image, write_flags, - num_threads, progress_func); + return write_standalone_wim(wim, &fd, image, write_flags, num_threads); } static bool @@ -2837,13 +2833,14 @@ any_images_modified(WIMStruct *wim) } static int -check_resource_offset(struct wim_lookup_table_entry *lte, void *_wim) +check_resource_offset(struct blob_descriptor *blob, void *_wim) { const WIMStruct *wim = _wim; off_t end_offset = *(const off_t*)wim->private; - if (lte->resource_location == RESOURCE_IN_WIM && lte->rspec->wim == wim && - lte->rspec->offset_in_wim + lte->rspec->size_in_wim > end_offset) + if (blob->blob_location == BLOB_IN_WIM && + blob->rdesc->wim == wim && + blob->rdesc->offset_in_wim + blob->rdesc->size_in_wim > end_offset) return WIMLIB_ERR_RESOURCE_ORDER; return 0; } @@ -2858,12 +2855,12 @@ check_resource_offsets(WIMStruct *wim, off_t end_offset) unsigned i; wim->private = &end_offset; - ret = for_lookup_table_entry(wim->lookup_table, check_resource_offset, wim); + ret = for_blob_in_table(wim->blob_table, check_resource_offset, wim); if (ret) return ret; for (i = 0; i < wim->hdr.image_count; i++) { - ret = check_resource_offset(wim->image_metadata[i]->metadata_lte, wim); + ret = check_resource_offset(wim->image_metadata[i]->metadata_blob, wim); if (ret) return ret; } @@ -2871,218 +2868,216 @@ check_resource_offsets(WIMStruct *wim, off_t end_offset) } /* - * Overwrite a WIM, possibly appending streams to it. + * Overwrite a WIM, possibly appending new resources to it. * * A WIM looks like (or is supposed to look like) the following: * * Header (212 bytes) - * Streams and metadata resources (variable size) - * Lookup table (variable size) + * Resources for metadata and files (variable size) + * Blob table (variable size) * XML data (variable size) * Integrity table (optional) (variable size) * - * If we are not adding any streams or metadata resources, the lookup table is + * If we are not adding any new files or metadata, then the blob table is * unchanged--- so we only need to overwrite the XML data, integrity table, and * header. This operation is potentially unsafe if the program is abruptly * terminated while the XML data or integrity table are being overwritten, but * before the new header has been written. To partially alleviate this problem, - * a special flag (WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) is passed to - * finish_write() to cause a temporary WIM header to be written after the XML - * data has been written. This may prevent the WIM from becoming corrupted if - * the program is terminated while the integrity table is being calculated (but - * no guarantees, due to write re-ordering...). - * - * If we are adding new streams or images (metadata resources), the lookup table - * needs to be changed, and those streams need to be written. In this case, we - * try to perform a safe update of the WIM file by writing the streams *after* - * the end of the previous WIM, then writing the new lookup table, XML data, and - * (optionally) integrity table following the new streams. This will produce a - * layout like the following: + * we write a temporary header after the XML data has been written. This may + * prevent the WIM from becoming corrupted if the program is terminated while + * the integrity table is being calculated (but no guarantees, due to write + * re-ordering...). + * + * If we are adding new blobs, including new file data as well as any metadata + * for any new images, then the blob table needs to be changed, and those blobs + * need to be written. In this case, we try to perform a safe update of the WIM + * file by writing the blobs *after* the end of the previous WIM, then writing + * the new blob table, XML data, and (optionally) integrity table following the + * new blobs. This will produce a layout like the following: * * Header (212 bytes) - * (OLD) Streams and metadata resources (variable size) - * (OLD) Lookup table (variable size) + * (OLD) Resources for metadata and files (variable size) + * (OLD) Blob table (variable size) * (OLD) XML data (variable size) * (OLD) Integrity table (optional) (variable size) - * (NEW) Streams and metadata resources (variable size) - * (NEW) Lookup table (variable size) + * (NEW) Resources for metadata and files (variable size) + * (NEW) Blob table (variable size) * (NEW) XML data (variable size) * (NEW) Integrity table (optional) (variable size) * * At all points, the WIM is valid as nothing points to the new data yet. Then, - * the header is overwritten to point to the new lookup table, XML data, and + * the header is overwritten to point to the new blob table, XML data, and * integrity table, to produce the following layout: * * Header (212 bytes) - * Streams and metadata resources (variable size) + * Resources for metadata and files (variable size) * Nothing (variable size) - * More Streams and metadata resources (variable size) - * Lookup table (variable size) + * Resources for metadata and files (variable size) + * Blob table (variable size) * XML data (variable size) * Integrity table (optional) (variable size) * * This method allows an image to be appended to a large WIM very quickly, and - * is is crash-safe except in the case of write re-ordering, but the - * disadvantage is that a small hole is left in the WIM where the old lookup - * table, xml data, and integrity table were. (These usually only take up a - * small amount of space compared to the streams, however.) + * is crash-safe except in the case of write re-ordering, but the disadvantage + * is that a small hole is left in the WIM where the old blob table, xml data, + * and integrity table were. (These usually only take up a small amount of + * space compared to the blobs, however.) */ static int -overwrite_wim_inplace(WIMStruct *wim, int write_flags, - unsigned num_threads, - wimlib_progress_func_t progress_func) +overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) { int ret; - struct list_head stream_list; off_t old_wim_end; - u64 old_lookup_table_end, old_xml_begin, old_xml_end; - struct wim_header hdr_save; - - DEBUG("Overwriting `%"TS"' in-place", wim->filename); + u64 old_blob_table_end, old_xml_begin, old_xml_end; + struct list_head blob_list; + struct list_head blob_table_list; + struct filter_context filter_ctx; - /* Set default integrity flag. */ + /* Include an integrity table by default if no preference was given and + * the WIM already had an integrity table. */ if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY | WIMLIB_WRITE_FLAG_NO_CHECK_INTEGRITY))) if (wim_has_integrity_table(wim)) write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY; + /* Start preparing the updated file header. */ + memcpy(&wim->out_hdr, &wim->hdr, sizeof(wim->out_hdr)); + + /* If using solid compression, the version number must be set to + * WIM_VERSION_SOLID. */ + if (write_flags & WIMLIB_WRITE_FLAG_SOLID) + wim->out_hdr.wim_version = WIM_VERSION_SOLID; + + /* Default to solid compression if it is valid in the chosen WIM file + * format and the WIMStruct references any solid resources. This is + * useful when updating a solid WIM. */ + if (should_default_to_solid_compression(wim, write_flags)) + write_flags |= WIMLIB_WRITE_FLAG_SOLID; + /* Set additional flags for overwrite. */ write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE | WIMLIB_WRITE_FLAG_STREAMS_OK; - /* Make sure that the integrity table (if present) is after the XML - * data, and that there are no stream resources, metadata resources, or - * lookup tables after the XML data. Otherwise, these data would be - * overwritten. */ + /* Make sure there is no data after the XML data, except possibily an + * integrity table. If this were the case, then this data would be + * overwritten. */ old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim; old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim; - old_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim + - wim->hdr.lookup_table_reshdr.size_in_wim; - if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0 && + old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim + + wim->hdr.blob_table_reshdr.size_in_wim; + if (wim_has_integrity_table(wim) && wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) { WARNING("Didn't expect the integrity table to be before the XML data"); - return WIMLIB_ERR_RESOURCE_ORDER; + ret = WIMLIB_ERR_RESOURCE_ORDER; + goto out; } - if (old_lookup_table_end > old_xml_begin) { - WARNING("Didn't expect the lookup table to be after the XML data"); - return WIMLIB_ERR_RESOURCE_ORDER; + if (old_blob_table_end > old_xml_begin) { + WARNING("Didn't expect the blob table to be after the XML data"); + ret = WIMLIB_ERR_RESOURCE_ORDER; + goto out; } /* Set @old_wim_end, which indicates the point beyond which we don't * allow any file and metadata resources to appear without returning * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise * overwrite these resources). */ - if (!wim->deletion_occurred && !any_images_modified(wim)) { + if (!wim->image_deletion_occurred && !any_images_modified(wim)) { /* If no images have been modified and no images have been - * deleted, a new lookup table does not need to be written. We + * deleted, a new blob table does not need to be written. We * shall write the new XML data and optional integrity table - * immediately after the lookup table. Note that this may + * immediately after the blob table. Note that this may * overwrite an existing integrity table. */ - DEBUG("Skipping writing lookup table " - "(no images modified or deleted)"); - old_wim_end = old_lookup_table_end; - write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE | - WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML; - } else if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0) { - /* Old WIM has an integrity table; begin writing new streams - * after it. */ + old_wim_end = old_blob_table_end; + write_flags |= WIMLIB_WRITE_FLAG_NO_NEW_BLOBS; + } else if (wim_has_integrity_table(wim)) { + /* Old WIM has an integrity table; begin writing new blobs after + * it. */ old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim + wim->hdr.integrity_table_reshdr.size_in_wim; } else { - /* No existing integrity table; begin writing new streams after + /* No existing integrity table; begin writing new blobs after * the old XML data. */ old_wim_end = old_xml_end; } ret = check_resource_offsets(wim, old_wim_end); if (ret) - return ret; + goto out; - ret = prepare_stream_list(wim, WIMLIB_ALL_IMAGES, write_flags, - &stream_list); + ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags, + &blob_list, &blob_table_list, + &filter_ctx); if (ret) - return ret; + goto out; + + if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS) + wimlib_assert(list_empty(&blob_list)); ret = open_wim_writable(wim, wim->filename, O_RDWR); if (ret) - return ret; + goto out; - ret = lock_wim(wim, wim->out_fd.fd); + ret = lock_wim_for_append(wim); if (ret) goto out_close_wim; - /* Save original header so it can be restored in case of error */ - memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header)); - /* Set WIM_HDR_FLAG_WRITE_IN_PROGRESS flag in header. */ wim->hdr.flags |= WIM_HDR_FLAG_WRITE_IN_PROGRESS; ret = write_wim_header_flags(wim->hdr.flags, &wim->out_fd); + wim->hdr.flags &= ~WIM_HDR_FLAG_WRITE_IN_PROGRESS; if (ret) { ERROR_WITH_ERRNO("Error updating WIM header flags"); - goto out_restore_memory_hdr; + goto out_unlock_wim; } if (filedes_seek(&wim->out_fd, old_wim_end) == -1) { ERROR_WITH_ERRNO("Can't seek to end of WIM"); ret = WIMLIB_ERR_WRITE; - goto out_restore_physical_hdr; - } - - ret = write_stream_list(&stream_list, - wim->lookup_table, - &wim->out_fd, - wim->compression_type, - wim->chunk_size, - &wim->lzx_context, - write_flags, - num_threads, - progress_func); + goto out_restore_hdr; + } + + ret = write_file_data_blobs(wim, &blob_list, write_flags, + num_threads, &filter_ctx); if (ret) goto out_truncate; - ret = write_wim_metadata_resources(wim, WIMLIB_ALL_IMAGES, - write_flags, progress_func); + ret = write_metadata_resources(wim, WIMLIB_ALL_IMAGES, write_flags); if (ret) goto out_truncate; - write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE; ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags, - progress_func, NULL); + &blob_table_list); if (ret) goto out_truncate; - goto out_unlock_wim; + unlock_wim_for_append(wim); + return 0; out_truncate: - if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { - WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)", - wim->filename, old_wim_end); + if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) { + WARNING("Truncating \"%"TS"\" to its original size " + "(%"PRIu64" bytes)", wim->filename, old_wim_end); /* Return value of ftruncate() is ignored because this is * already an error path. */ (void)ftruncate(wim->out_fd.fd, old_wim_end); } -out_restore_physical_hdr: - (void)write_wim_header_flags(hdr_save.flags, &wim->out_fd); -out_restore_memory_hdr: - memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header)); +out_restore_hdr: + (void)write_wim_header_flags(wim->hdr.flags, &wim->out_fd); +out_unlock_wim: + unlock_wim_for_append(wim); out_close_wim: (void)close_wim_writable(wim, write_flags); -out_unlock_wim: - wim->wim_locked = 0; +out: return ret; } static int -overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, - unsigned num_threads, - wimlib_progress_func_t progress_func) +overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, unsigned num_threads) { size_t wim_name_len; int ret; - DEBUG("Overwriting `%"TS"' via a temporary file", wim->filename); - /* Write the WIM to a temporary file in the same directory as the * original WIM. */ wim_name_len = tstrlen(wim->filename); @@ -3092,19 +3087,23 @@ overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, tmpfile[wim_name_len + 9] = T('\0'); ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES, - write_flags | WIMLIB_WRITE_FLAG_FSYNC, - num_threads, progress_func); + write_flags | + WIMLIB_WRITE_FLAG_FSYNC | + WIMLIB_WRITE_FLAG_RETAIN_GUID, + num_threads); if (ret) { tunlink(tmpfile); return ret; } - close_wim(wim); + if (filedes_valid(&wim->in_fd)) { + filedes_close(&wim->in_fd); + filedes_invalidate(&wim->in_fd); + } /* Rename the new WIM file to the original WIM file. Note: on Windows * this actually calls win32_rename_replacement(), not _wrename(), so * that removing the existing destination file can be handled. */ - DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, wim->filename); ret = trename(tmpfile, wim->filename); if (ret) { ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'", @@ -3118,27 +3117,51 @@ overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, return WIMLIB_ERR_RENAME; } - if (progress_func) { - union wimlib_progress_info progress; - progress.rename.from = tmpfile; - progress.rename.to = wim->filename; - progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress); - } - return 0; + union wimlib_progress_info progress; + progress.rename.from = tmpfile; + progress.rename.to = wim->filename; + return call_progress(wim->progfunc, WIMLIB_PROGRESS_MSG_RENAME, + &progress, wim->progctx); +} + +/* Determine if the specified WIM file may be updated by appending in-place + * rather than writing and replacing it with an entirely new file. */ +static bool +can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags) +{ + /* REBUILD flag forces full rebuild. */ + if (write_flags & WIMLIB_WRITE_FLAG_REBUILD) + return false; + + /* Image deletions cause full rebuild by default. */ + if (wim->image_deletion_occurred && + !(write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE)) + return false; + + /* Pipable WIMs cannot be updated in place, nor can a non-pipable WIM be + * turned into a pipable WIM in-place. */ + if (wim_is_pipable(wim) || (write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) + return false; + + /* The default compression type and compression chunk size selected for + * the output WIM must be the same as those currently used for the WIM. + */ + if (wim->compression_type != wim->out_compression_type) + return false; + if (wim->chunk_size != wim->out_chunk_size) + return false; + + return true; } /* API function documented in wimlib.h */ WIMLIBAPI int -wimlib_overwrite(WIMStruct *wim, int write_flags, - unsigned num_threads, - wimlib_progress_func_t progress_func) +wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads) { int ret; u32 orig_hdr_flags; - write_flags &= WIMLIB_WRITE_MASK_PUBLIC; - - if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) + if (write_flags & ~WIMLIB_WRITE_MASK_PUBLIC) return WIMLIB_ERR_INVALID_PARAM; if (!wim->filename) @@ -3152,19 +3175,11 @@ wimlib_overwrite(WIMStruct *wim, int write_flags, if (ret) return ret; - if ((!wim->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE)) - && !(write_flags & (WIMLIB_WRITE_FLAG_REBUILD | - WIMLIB_WRITE_FLAG_PIPABLE)) - && !(wim_is_pipable(wim)) - && wim->compression_type == wim->out_compression_type - && wim->chunk_size == wim->out_chunk_size) - { - ret = overwrite_wim_inplace(wim, write_flags, num_threads, - progress_func); + if (can_overwrite_wim_inplace(wim, write_flags)) { + ret = overwrite_wim_inplace(wim, write_flags, num_threads); if (ret != WIMLIB_ERR_RESOURCE_ORDER) return ret; WARNING("Falling back to re-building entire WIM"); } - return overwrite_wim_via_tmpfile(wim, write_flags, num_threads, - progress_func); + return overwrite_wim_via_tmpfile(wim, write_flags, num_threads); }