X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Fwrite.c;h=6be215e3e76570bd38aa12fc28afec740911f92f;hp=fcc5983fca50ee2c300342a2e5a360285c8ef43d;hb=3abe6501c7ebb20a0ead1cd69ebd93cbe6b917e1;hpb=7251c7d0afac3b738dda1c4f45e6d3d3090f2622 diff --git a/src/write.c b/src/write.c index fcc5983f..6be215e3 100644 --- a/src/write.c +++ b/src/write.c @@ -6,7 +6,7 @@ */ /* - * Copyright (C) 2012, 2013, 2014 Eric Biggers + * Copyright (C) 2012, 2013, 2014, 2015 Eric Biggers * * This file is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free @@ -32,7 +32,14 @@ # include #endif +#include +#include +#include +#include + +#include "wimlib/alloca.h" #include "wimlib/assert.h" +#include "wimlib/blob_table.h" #include "wimlib/chunk_compressor.h" #include "wimlib/endianness.h" #include "wimlib/error.h" @@ -40,31 +47,24 @@ #include "wimlib/header.h" #include "wimlib/inode.h" #include "wimlib/integrity.h" -#include "wimlib/lookup_table.h" #include "wimlib/metadata.h" #include "wimlib/paths.h" #include "wimlib/progress.h" #include "wimlib/resource.h" +#include "wimlib/solid.h" #ifdef __WIN32__ # include "wimlib/win32.h" /* win32_rename_replacement() */ #endif #include "wimlib/write.h" #include "wimlib/xml.h" -#include -#include -#include -#include - -#ifdef HAVE_ALLOCA_H -# include -#endif /* wimlib internal flags used when writing resources. */ #define WRITE_RESOURCE_FLAG_RECOMPRESS 0x00000001 #define WRITE_RESOURCE_FLAG_PIPABLE 0x00000002 -#define WRITE_RESOURCE_FLAG_PACK_STREAMS 0x00000004 +#define WRITE_RESOURCE_FLAG_SOLID 0x00000004 #define WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE 0x00000008 +#define WRITE_RESOURCE_FLAG_SOLID_SORT 0x00000010 static inline int write_flags_to_resource_flags(int write_flags) @@ -75,10 +75,14 @@ write_flags_to_resource_flags(int write_flags) write_resource_flags |= WRITE_RESOURCE_FLAG_RECOMPRESS; if (write_flags & WIMLIB_WRITE_FLAG_PIPABLE) write_resource_flags |= WRITE_RESOURCE_FLAG_PIPABLE; - if (write_flags & WIMLIB_WRITE_FLAG_PACK_STREAMS) - write_resource_flags |= WRITE_RESOURCE_FLAG_PACK_STREAMS; + if (write_flags & WIMLIB_WRITE_FLAG_SOLID) + write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID; if (write_flags & WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES) write_resource_flags |= WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE; + if ((write_flags & (WIMLIB_WRITE_FLAG_SOLID | + WIMLIB_WRITE_FLAG_NO_SOLID_SORT)) == + WIMLIB_WRITE_FLAG_SOLID) + write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID_SORT; return write_resource_flags; } @@ -87,19 +91,20 @@ struct filter_context { WIMStruct *wim; }; -/* Determine specified stream should be filtered out from the write. +/* + * Determine whether the specified blob should be filtered out from the write. * * Return values: * - * < 0 : The stream should be hard-filtered; that is, not included in the - * output WIM at all. - * 0 : The stream should not be filtered out. - * > 0 : The stream should be soft-filtered; that is, it already exists in the + * < 0 : The blob should be hard-filtered; that is, not included in the output + * WIM file at all. + * 0 : The blob should not be filtered out. + * > 0 : The blob should be soft-filtered; that is, it already exists in the * WIM file and may not need to be written again. */ static int -stream_filtered(const struct wim_lookup_table_entry *lte, - const struct filter_context *ctx) +blob_filtered(const struct blob_descriptor *blob, + const struct filter_context *ctx) { int write_flags; WIMStruct *wim; @@ -111,27 +116,27 @@ stream_filtered(const struct wim_lookup_table_entry *lte, wim = ctx->wim; if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE && - lte->resource_location == RESOURCE_IN_WIM && - lte->rspec->wim == wim) + blob->blob_location == BLOB_IN_WIM && + blob->rdesc->wim == wim) return 1; if (write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS && - lte->resource_location == RESOURCE_IN_WIM && - lte->rspec->wim != wim) + blob->blob_location == BLOB_IN_WIM && + blob->rdesc->wim != wim) return -1; return 0; } static bool -stream_hard_filtered(const struct wim_lookup_table_entry *lte, - struct filter_context *ctx) +blob_hard_filtered(const struct blob_descriptor *blob, + struct filter_context *ctx) { - return stream_filtered(lte, ctx) < 0; + return blob_filtered(blob, ctx) < 0; } static inline int -may_soft_filter_streams(const struct filter_context *ctx) +may_soft_filter_blobs(const struct filter_context *ctx) { if (ctx == NULL) return 0; @@ -139,7 +144,7 @@ may_soft_filter_streams(const struct filter_context *ctx) } static inline int -may_hard_filter_streams(const struct filter_context *ctx) +may_hard_filter_blobs(const struct filter_context *ctx) { if (ctx == NULL) return 0; @@ -147,20 +152,18 @@ may_hard_filter_streams(const struct filter_context *ctx) } static inline int -may_filter_streams(const struct filter_context *ctx) +may_filter_blobs(const struct filter_context *ctx) { - return (may_soft_filter_streams(ctx) || - may_hard_filter_streams(ctx)); + return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx)); } - /* Return true if the specified resource is compressed and the compressed data * can be reused with the specified output parameters. */ static bool -can_raw_copy(const struct wim_lookup_table_entry *lte, +can_raw_copy(const struct blob_descriptor *blob, int write_resource_flags, int out_ctype, u32 out_chunk_size) { - const struct wim_resource_spec *rspec; + const struct wim_resource_descriptor *rdesc; if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS) return false; @@ -168,115 +171,106 @@ can_raw_copy(const struct wim_lookup_table_entry *lte, if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) return false; - if (lte->resource_location != RESOURCE_IN_WIM) + if (blob->blob_location != BLOB_IN_WIM) return false; - rspec = lte->rspec; + rdesc = blob->rdesc; - if (rspec->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) + if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) return false; - if (rspec->flags & WIM_RESHDR_FLAG_COMPRESSED) { + if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) { /* Normal compressed resource: Must use same compression type * and chunk size. */ - return (rspec->compression_type == out_ctype && - rspec->chunk_size == out_chunk_size); + return (rdesc->compression_type == out_ctype && + rdesc->chunk_size == out_chunk_size); } - if ((rspec->flags & WIM_RESHDR_FLAG_PACKED_STREAMS) && - (write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS)) + if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) && + (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)) { - /* Packed resource: Such resources may contain multiple streams, + /* Solid resource: Such resources may contain multiple blobs, * and in general only a subset of them need to be written. As * a heuristic, re-use the raw data if more than two-thirds the * uncompressed size is being written. */ - /* Note: packed resources contain a header that specifies the + /* Note: solid resources contain a header that specifies the * compression type and chunk size; therefore we don't need to * check if they are compatible with @out_ctype and * @out_chunk_size. */ - struct wim_lookup_table_entry *res_stream; + struct blob_descriptor *res_blob; u64 write_size = 0; - list_for_each_entry(res_stream, &rspec->stream_list, rspec_node) - if (res_stream->will_be_in_output_wim) - write_size += res_stream->size; + list_for_each_entry(res_blob, &rdesc->blob_list, rdesc_node) + if (res_blob->will_be_in_output_wim) + write_size += res_blob->size; - return (write_size > rspec->uncompressed_size * 2 / 3); + return (write_size > rdesc->uncompressed_size * 2 / 3); } return false; } -static u8 -filter_resource_flags(u8 flags) +static u32 +reshdr_flags_for_blob(const struct blob_descriptor *blob) { - return (flags & ~(WIM_RESHDR_FLAG_PACKED_STREAMS | - WIM_RESHDR_FLAG_COMPRESSED | - WIM_RESHDR_FLAG_SPANNED | - WIM_RESHDR_FLAG_FREE)); + u32 reshdr_flags = 0; + if (blob->is_metadata) + reshdr_flags |= WIM_RESHDR_FLAG_METADATA; + return reshdr_flags; } static void -stream_set_out_reshdr_for_reuse(struct wim_lookup_table_entry *lte) +blob_set_out_reshdr_for_reuse(struct blob_descriptor *blob) { - const struct wim_resource_spec *rspec; + const struct wim_resource_descriptor *rdesc; - wimlib_assert(lte->resource_location == RESOURCE_IN_WIM); - rspec = lte->rspec; + wimlib_assert(blob->blob_location == BLOB_IN_WIM); + rdesc = blob->rdesc; - if (rspec->flags & WIM_RESHDR_FLAG_PACKED_STREAMS) { + if (rdesc->flags & WIM_RESHDR_FLAG_SOLID) { + blob->out_reshdr.offset_in_wim = blob->offset_in_res; + blob->out_reshdr.uncompressed_size = 0; + blob->out_reshdr.size_in_wim = blob->size; - wimlib_assert(lte->flags & WIM_RESHDR_FLAG_PACKED_STREAMS); - - lte->out_reshdr.offset_in_wim = lte->offset_in_res; - lte->out_reshdr.uncompressed_size = 0; - lte->out_reshdr.size_in_wim = lte->size; - - lte->out_res_offset_in_wim = rspec->offset_in_wim; - lte->out_res_size_in_wim = rspec->size_in_wim; - lte->out_res_uncompressed_size = rspec->uncompressed_size; + blob->out_res_offset_in_wim = rdesc->offset_in_wim; + blob->out_res_size_in_wim = rdesc->size_in_wim; + blob->out_res_uncompressed_size = rdesc->uncompressed_size; } else { - wimlib_assert(!(lte->flags & WIM_RESHDR_FLAG_PACKED_STREAMS)); - - lte->out_reshdr.offset_in_wim = rspec->offset_in_wim; - lte->out_reshdr.uncompressed_size = rspec->uncompressed_size; - lte->out_reshdr.size_in_wim = rspec->size_in_wim; + blob->out_reshdr.offset_in_wim = rdesc->offset_in_wim; + blob->out_reshdr.uncompressed_size = rdesc->uncompressed_size; + blob->out_reshdr.size_in_wim = rdesc->size_in_wim; } - lte->out_reshdr.flags = lte->flags; + blob->out_reshdr.flags = rdesc->flags; } -/* Write the header for a stream in a pipable WIM. */ +/* Write the header for a blob in a pipable WIM. */ static int -write_pwm_stream_header(const struct wim_lookup_table_entry *lte, - struct filedes *out_fd, - int additional_reshdr_flags) +write_pwm_blob_header(const struct blob_descriptor *blob, + struct filedes *out_fd, bool compressed) { - struct pwm_stream_hdr stream_hdr; + struct pwm_blob_hdr blob_hdr; u32 reshdr_flags; int ret; - stream_hdr.magic = cpu_to_le64(PWM_STREAM_MAGIC); - stream_hdr.uncompressed_size = cpu_to_le64(lte->size); - if (additional_reshdr_flags & PWM_RESHDR_FLAG_UNHASHED) { - zero_out_hash(stream_hdr.hash); - } else { - wimlib_assert(!lte->unhashed); - copy_hash(stream_hdr.hash, lte->hash); - } + wimlib_assert(!blob->unhashed); - reshdr_flags = filter_resource_flags(lte->flags); - reshdr_flags |= additional_reshdr_flags; - stream_hdr.flags = cpu_to_le32(reshdr_flags); - ret = full_write(out_fd, &stream_hdr, sizeof(stream_hdr)); + blob_hdr.magic = cpu_to_le64(PWM_BLOB_MAGIC); + blob_hdr.uncompressed_size = cpu_to_le64(blob->size); + copy_hash(blob_hdr.hash, blob->hash); + reshdr_flags = reshdr_flags_for_blob(blob); + if (compressed) + reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED; + blob_hdr.flags = cpu_to_le32(reshdr_flags); + ret = full_write(out_fd, &blob_hdr, sizeof(blob_hdr)); if (ret) ERROR_WITH_ERRNO("Write error"); return ret; } -struct write_streams_progress_data { +struct write_blobs_progress_data { wimlib_progress_func_t progfunc; void *progctx; union wimlib_progress_info progress; @@ -284,10 +278,8 @@ struct write_streams_progress_data { }; static int -do_write_streams_progress(struct write_streams_progress_data *progress_data, - u64 complete_size, - u32 complete_count, - bool discarded) +do_write_blobs_progress(struct write_blobs_progress_data *progress_data, + u64 complete_size, u32 complete_count, bool discarded) { union wimlib_progress_info *progress = &progress_data->progress; int ret; @@ -344,13 +336,13 @@ do_write_streams_progress(struct write_streams_progress_data *progress_data, return 0; } -struct write_streams_ctx { - /* File descriptor the streams are being written to. */ +struct write_blobs_ctx { + /* File descriptor to which the blobs are being written. */ struct filedes *out_fd; - /* Lookup table for the WIMStruct on whose behalf the streams are being + /* Blob table for the WIMStruct on whose behalf the blobs are being * written. */ - struct wim_lookup_table *lookup_table; + struct blob_table *blob_table; /* Compression format to use. */ int out_ctype; @@ -358,11 +350,11 @@ struct write_streams_ctx { /* Maximum uncompressed chunk size in compressed resources to use. */ u32 out_chunk_size; - /* Flags that affect how the streams will be written. */ + /* Flags that affect how the blobs will be written. */ int write_resource_flags; /* Data used for issuing WRITE_STREAMS progress. */ - struct write_streams_progress_data progress_data; + struct write_blobs_progress_data progress_data; struct filter_context *filter_ctx; @@ -375,28 +367,29 @@ struct write_streams_ctx { * uncompressed. */ struct chunk_compressor *compressor; - /* Buffer for dividing the read data into chunks of size - * @out_chunk_size. */ - u8 *chunk_buf; + /* A buffer of size @out_chunk_size that has been loaned out from the + * chunk compressor and is currently being filled with the uncompressed + * data of the next chunk. */ + u8 *cur_chunk_buf; - /* Number of bytes in @chunk_buf that are currently filled. */ - size_t chunk_buf_filled; + /* Number of bytes in @cur_chunk_buf that are currently filled. */ + size_t cur_chunk_buf_filled; - /* List of streams that currently have chunks being compressed. */ - struct list_head pending_streams; + /* List of blobs that currently have chunks being compressed. */ + struct list_head blobs_being_compressed; - /* List of streams in the resource pack. Streams are moved here after - * @pending_streams only when writing a packed resource. */ - struct list_head pack_streams; + /* List of blobs in the solid resource. Blobs are moved here after + * @blobs_being_compressed only when writing a solid resource. */ + struct list_head blobs_in_solid_resource; - /* Current uncompressed offset in the stream being read. */ - u64 cur_read_stream_offset; + /* Current uncompressed offset in the blob being read. */ + u64 cur_read_blob_offset; - /* Uncompressed size of the stream currently being read. */ - u64 cur_read_stream_size; + /* Uncompressed size of the blob currently being read. */ + u64 cur_read_blob_size; - /* Current uncompressed offset in the stream being written. */ - u64 cur_write_stream_offset; + /* Current uncompressed offset in the blob being written. */ + u64 cur_write_blob_offset; /* Uncompressed size of resource currently being written. */ u64 cur_write_res_size; @@ -419,7 +412,7 @@ struct write_streams_ctx { /* Reserve space for the chunk table and prepare to accumulate the chunk table * in memory. */ static int -begin_chunk_table(struct write_streams_ctx *ctx, u64 res_expected_size) +begin_chunk_table(struct write_blobs_ctx *ctx, u64 res_expected_size) { u64 expected_num_chunks; u64 expected_num_chunk_entries; @@ -428,12 +421,12 @@ begin_chunk_table(struct write_streams_ctx *ctx, u64 res_expected_size) /* Calculate the number of chunks and chunk entries that should be * needed for the resource. These normally will be the final values, - * but in PACKED_STREAMS mode some of the streams we're planning to - * write into the resource may be duplicates, and therefore discarded, - * potentially decreasing the number of chunk entries needed. */ + * but in SOLID mode some of the blobs we're planning to write into the + * resource may be duplicates, and therefore discarded, potentially + * decreasing the number of chunk entries needed. */ expected_num_chunks = DIV_ROUND_UP(res_expected_size, ctx->out_chunk_size); expected_num_chunk_entries = expected_num_chunks; - if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS)) + if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)) expected_num_chunk_entries--; /* Make sure the chunk_csizes array is long enough to store the @@ -460,16 +453,16 @@ begin_chunk_table(struct write_streams_ctx *ctx, u64 res_expected_size) if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE)) { /* Reserve space for the chunk table in the output file. In the - * case of packed resources this reserves the upper bound for - * the needed space, not necessarily the exact space which will + * case of solid resources this reserves the upper bound for the + * needed space, not necessarily the exact space which will * prove to be needed. At this point, we just use @chunk_csizes * for a buffer of 0's because the actual compressed chunk sizes * are unknown. */ reserve_size = expected_num_chunk_entries * get_chunk_entry_size(res_expected_size, 0 != (ctx->write_resource_flags & - WRITE_RESOURCE_FLAG_PACK_STREAMS)); - if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) + WRITE_RESOURCE_FLAG_SOLID)); + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) reserve_size += sizeof(struct alt_chunk_table_header_disk); memset(ctx->chunk_csizes, 0, reserve_size); ret = full_write(ctx->out_fd, ctx->chunk_csizes, reserve_size); @@ -480,7 +473,7 @@ begin_chunk_table(struct write_streams_ctx *ctx, u64 res_expected_size) } static int -begin_write_resource(struct write_streams_ctx *ctx, u64 res_expected_size) +begin_write_resource(struct write_blobs_ctx *ctx, u64 res_expected_size) { int ret; @@ -495,13 +488,13 @@ begin_write_resource(struct write_streams_ctx *ctx, u64 res_expected_size) /* Output file descriptor is now positioned at the offset at which to * write the first chunk of the resource. */ ctx->chunks_start_offset = ctx->out_fd->offset; - ctx->cur_write_stream_offset = 0; + ctx->cur_write_blob_offset = 0; ctx->cur_write_res_size = res_expected_size; return 0; } static int -end_chunk_table(struct write_streams_ctx *ctx, u64 res_actual_size, +end_chunk_table(struct write_blobs_ctx *ctx, u64 res_actual_size, u64 *res_start_offset_ret, u64 *res_store_size_ret) { size_t actual_num_chunks; @@ -511,12 +504,12 @@ end_chunk_table(struct write_streams_ctx *ctx, u64 res_actual_size, actual_num_chunks = ctx->chunk_index; actual_num_chunk_entries = actual_num_chunks; - if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS)) + if (!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)) actual_num_chunk_entries--; chunk_entry_size = get_chunk_entry_size(res_actual_size, 0 != (ctx->write_resource_flags & - WRITE_RESOURCE_FLAG_PACK_STREAMS)); + WRITE_RESOURCE_FLAG_SOLID)); typedef le64 _may_alias_attribute aliased_le64_t; typedef le32 _may_alias_attribute aliased_le32_t; @@ -524,7 +517,7 @@ end_chunk_table(struct write_streams_ctx *ctx, u64 res_actual_size, if (chunk_entry_size == 4) { aliased_le32_t *entries = (aliased_le32_t*)ctx->chunk_csizes; - if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) { + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { for (size_t i = 0; i < actual_num_chunk_entries; i++) entries[i] = cpu_to_le32(ctx->chunk_csizes[i]); } else { @@ -538,7 +531,7 @@ end_chunk_table(struct write_streams_ctx *ctx, u64 res_actual_size, } else { aliased_le64_t *entries = (aliased_le64_t*)ctx->chunk_csizes; - if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) { + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { for (size_t i = 0; i < actual_num_chunk_entries; i++) entries[i] = cpu_to_le64(ctx->chunk_csizes[i]); } else { @@ -568,7 +561,7 @@ end_chunk_table(struct write_streams_ctx *ctx, u64 res_actual_size, chunk_table_offset = ctx->chunks_start_offset - chunk_table_size; - if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) { + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { struct alt_chunk_table_header_disk hdr; hdr.res_usize = cpu_to_le64(res_actual_size); @@ -607,15 +600,15 @@ write_error: /* Finish writing a WIM resource by writing or updating the chunk table (if not * writing the data uncompressed) and loading its metadata into @out_reshdr. */ static int -end_write_resource(struct write_streams_ctx *ctx, struct wim_reshdr *out_reshdr) +end_write_resource(struct write_blobs_ctx *ctx, struct wim_reshdr *out_reshdr) { int ret; u64 res_size_in_wim; u64 res_uncompressed_size; u64 res_offset_in_wim; - wimlib_assert(ctx->cur_write_stream_offset == ctx->cur_write_res_size || - (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS)); + wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size || + (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)); res_uncompressed_size = ctx->cur_write_res_size; if (ctx->compressor) { @@ -635,7 +628,7 @@ end_write_resource(struct write_streams_ctx *ctx, struct wim_reshdr *out_reshdr) return 0; } -/* No more data streams of the file at @path are needed. */ +/* Call when no more data from the file at @path is needed. */ static int done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx) { @@ -647,28 +640,17 @@ done_with_file(const tchar *path, wimlib_progress_func_t progfunc, void *progctx &info, progctx); } -static inline bool -is_file_stream(const struct wim_lookup_table_entry *lte) -{ - return lte->resource_location == RESOURCE_IN_FILE_ON_DISK -#ifdef __WIN32__ - || lte->resource_location == RESOURCE_IN_WINNT_FILE_ON_DISK - || lte->resource_location == RESOURCE_WIN32_ENCRYPTED -#endif - ; -} - static int -do_done_with_stream(struct wim_lookup_table_entry *lte, - wimlib_progress_func_t progfunc, void *progctx) +do_done_with_blob(struct blob_descriptor *blob, + wimlib_progress_func_t progfunc, void *progctx) { int ret; struct wim_inode *inode; - if (!lte->may_send_done_with_file) + if (!blob->may_send_done_with_file) return 0; - inode = lte->file_inode; + inode = blob->file_inode; wimlib_assert(inode != NULL); wimlib_assert(inode->num_remaining_streams > 0); @@ -678,14 +660,14 @@ do_done_with_stream(struct wim_lookup_table_entry *lte, #ifdef __WIN32__ /* XXX: This logic really should be somewhere else. */ - /* We want the path to the file, but lte->file_on_disk might actually + /* We want the path to the file, but blob->file_on_disk might actually * refer to a named data stream. Temporarily strip the named data * stream from the path. */ wchar_t *p_colon = NULL; wchar_t *p_question_mark = NULL; const wchar_t *p_stream_name; - p_stream_name = path_stream_name(lte->file_on_disk); + p_stream_name = path_stream_name(blob->file_on_disk); if (unlikely(p_stream_name)) { p_colon = (wchar_t *)(p_stream_name - 1); wimlib_assert(*p_colon == L':'); @@ -693,13 +675,13 @@ do_done_with_stream(struct wim_lookup_table_entry *lte, } /* We also should use a fake Win32 path instead of a NT path */ - if (!wcsncmp(lte->file_on_disk, L"\\??\\", 4)) { - p_question_mark = <e->file_on_disk[1]; + if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) { + p_question_mark = &blob->file_on_disk[1]; *p_question_mark = L'\\'; } #endif - ret = done_with_file(lte->file_on_disk, progfunc, progctx); + ret = done_with_file(blob->file_on_disk, progfunc, progctx); #ifdef __WIN32__ if (p_colon) @@ -712,115 +694,111 @@ do_done_with_stream(struct wim_lookup_table_entry *lte, /* Handle WIMLIB_WRITE_FLAG_SEND_DONE_WITH_FILE_MESSAGES mode. */ static inline int -done_with_stream(struct wim_lookup_table_entry *lte, - struct write_streams_ctx *ctx) +done_with_blob(struct blob_descriptor *blob, struct write_blobs_ctx *ctx) { if (likely(!(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE))) return 0; - return do_done_with_stream(lte, ctx->progress_data.progfunc, - ctx->progress_data.progctx); + return do_done_with_blob(blob, ctx->progress_data.progfunc, + ctx->progress_data.progctx); } -/* Begin processing a stream for writing. */ +/* Begin processing a blob for writing. */ static int -write_stream_begin_read(struct wim_lookup_table_entry *lte, void *_ctx) +write_blob_begin_read(struct blob_descriptor *blob, void *_ctx) { - struct write_streams_ctx *ctx = _ctx; + struct write_blobs_ctx *ctx = _ctx; int ret; - wimlib_assert(lte->size > 0); + wimlib_assert(blob->size > 0); - ctx->cur_read_stream_offset = 0; - ctx->cur_read_stream_size = lte->size; + ctx->cur_read_blob_offset = 0; + ctx->cur_read_blob_size = blob->size; - /* As an optimization, we allow some streams to be "unhashed", meaning - * their SHA1 message digests are unknown. This is the case with - * streams that are added by scanning a directry tree with - * wimlib_add_image(), for example. Since WIM uses single-instance - * streams, we don't know whether such each such stream really need to - * written until it is actually checksummed, unless it has a unique - * size. In such cases we read and checksum the stream in this - * function, thereby advancing ahead of read_stream_list(), which will - * still provide the data again to write_stream_process_chunk(). This - * is okay because an unhashed stream cannot be in a WIM resource, which - * might be costly to decompress. */ - if (ctx->lookup_table != NULL && lte->unhashed && !lte->unique_size) { + /* As an optimization, we allow some blobs to be "unhashed", meaning + * their SHA-1 message digests are unknown. This is the case with blobs + * that are added by scanning a directory tree with wimlib_add_image(), + * for example. Since WIM uses single-instance blobs, we don't know + * whether such each such blob really need to written until it is + * actually checksummed, unless it has a unique size. In such cases we + * read and checksum the blob in this function, thereby advancing ahead + * of read_blob_list(), which will still provide the data again to + * write_blob_process_chunk(). This is okay because an unhashed blob + * cannot be in a WIM resource, which might be costly to decompress. */ + if (ctx->blob_table != NULL && blob->unhashed && !blob->unique_size) { - struct wim_lookup_table_entry *lte_new; + struct blob_descriptor *new_blob; - ret = hash_unhashed_stream(lte, ctx->lookup_table, <e_new); + ret = hash_unhashed_blob(blob, ctx->blob_table, &new_blob); if (ret) return ret; - if (lte_new != lte) { - /* Duplicate stream detected. */ + if (new_blob != blob) { + /* Duplicate blob detected. */ - if (lte_new->will_be_in_output_wim || - stream_filtered(lte_new, ctx->filter_ctx)) + if (new_blob->will_be_in_output_wim || + blob_filtered(new_blob, ctx->filter_ctx)) { - /* The duplicate stream is already being - * included in the output WIM, or it would be - * filtered out if it had been. Skip writing - * this stream (and reading it again) entirely, - * passing its output reference count to the - * duplicate stream in the former case. */ - DEBUG("Discarding duplicate stream of " - "length %"PRIu64, lte->size); - ret = do_write_streams_progress(&ctx->progress_data, - lte->size, - 1, true); - list_del(<e->write_streams_list); - list_del(<e->lookup_table_list); - if (lte_new->will_be_in_output_wim) - lte_new->out_refcnt += lte->out_refcnt; - if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) - ctx->cur_write_res_size -= lte->size; + /* The duplicate blob is already being included + * in the output WIM, or it would be filtered + * out if it had been. Skip writing this blob + * (and reading it again) entirely, passing its + * output reference count to the duplicate blob + * in the former case. */ + DEBUG("Discarding duplicate blob of " + "length %"PRIu64, blob->size); + ret = do_write_blobs_progress(&ctx->progress_data, + blob->size, 1, true); + list_del(&blob->write_blobs_list); + list_del(&blob->blob_table_list); + if (new_blob->will_be_in_output_wim) + new_blob->out_refcnt += blob->out_refcnt; + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) + ctx->cur_write_res_size -= blob->size; if (!ret) - ret = done_with_stream(lte, ctx); - free_lookup_table_entry(lte); + ret = done_with_blob(blob, ctx); + free_blob_descriptor(blob); if (ret) return ret; - return BEGIN_STREAM_STATUS_SKIP_STREAM; + return BEGIN_BLOB_STATUS_SKIP_BLOB; } else { - /* The duplicate stream can validly be written, + /* The duplicate blob can validly be written, * but was not marked as such. Discard the - * current stream entry and use the duplicate, - * but actually freeing the current entry must - * wait until read_stream_list() has finished - * reading its data. */ - DEBUG("Stream duplicate, but not already " + * current blob descriptor and use the + * duplicate, but actually freeing the current + * blob descriptor must wait until + * read_blob_list() has finished reading its + * data. */ + DEBUG("Blob duplicate, but not already " "selected for writing."); - list_replace(<e->write_streams_list, - <e_new->write_streams_list); - list_replace(<e->lookup_table_list, - <e_new->lookup_table_list); - lte->will_be_in_output_wim = 0; - lte_new->out_refcnt = lte->out_refcnt; - lte_new->will_be_in_output_wim = 1; - lte_new->may_send_done_with_file = 0; - lte = lte_new; + list_replace(&blob->write_blobs_list, + &new_blob->write_blobs_list); + list_replace(&blob->blob_table_list, + &new_blob->blob_table_list); + blob->will_be_in_output_wim = 0; + new_blob->out_refcnt = blob->out_refcnt; + new_blob->will_be_in_output_wim = 1; + new_blob->may_send_done_with_file = 0; + blob = new_blob; } } } - list_move_tail(<e->write_streams_list, &ctx->pending_streams); + list_move_tail(&blob->write_blobs_list, &ctx->blobs_being_compressed); return 0; } -/* Rewrite a stream that was just written compressed as uncompressed instead. - * This function is optional, but if a stream did not compress to less than its - * original size, it might as well be written uncompressed. */ +/* Rewrite a blob that was just written compressed as uncompressed instead. + */ static int -write_stream_uncompressed(struct wim_lookup_table_entry *lte, - struct filedes *out_fd) +write_blob_uncompressed(struct blob_descriptor *blob, struct filedes *out_fd) { int ret; - u64 begin_offset = lte->out_reshdr.offset_in_wim; + u64 begin_offset = blob->out_reshdr.offset_in_wim; u64 end_offset = out_fd->offset; if (filedes_seek(out_fd, begin_offset) == -1) return 0; - ret = extract_full_stream_to_fd(lte, out_fd); + ret = extract_full_blob_to_fd(blob, out_fd); if (ret) { /* Error reading the uncompressed data. */ if (out_fd->offset == begin_offset && @@ -830,15 +808,14 @@ write_stream_uncompressed(struct wim_lookup_table_entry *lte, * seeked to the end of the compressed resource, so * don't issue a hard error; just keep the compressed * resource instead. */ - WARNING("Recovered compressed stream of " - "size %"PRIu64", continuing on.", - lte->size); + WARNING("Recovered compressed blob of " + "size %"PRIu64", continuing on.", blob->size); return 0; } return ret; } - wimlib_assert(out_fd->offset - begin_offset == lte->size); + wimlib_assert(out_fd->offset - begin_offset == blob->size); if (out_fd->offset < end_offset && 0 != ftruncate(out_fd->fd, out_fd->offset)) @@ -848,22 +825,22 @@ write_stream_uncompressed(struct wim_lookup_table_entry *lte, return WIMLIB_ERR_WRITE; } - lte->out_reshdr.size_in_wim = lte->size; - lte->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED | - WIM_RESHDR_FLAG_PACKED_STREAMS); + blob->out_reshdr.size_in_wim = blob->size; + blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED | + WIM_RESHDR_FLAG_SOLID); return 0; } -/* Returns true if the specified stream should be truncated from the WIM file - * and re-written as uncompressed. lte->out_reshdr must be filled in from the - * initial write of the stream. */ +/* Returns true if the specified blob, which was written as a non-solid + * resource, should be truncated from the WIM file and re-written uncompressed. + * blob->out_reshdr must be filled in from the initial write of the blob. */ static bool -should_rewrite_stream_uncompressed(const struct write_streams_ctx *ctx, - const struct wim_lookup_table_entry *lte) +should_rewrite_blob_uncompressed(const struct write_blobs_ctx *ctx, + const struct blob_descriptor *blob) { /* If the compressed data is smaller than the uncompressed data, prefer * the compressed data. */ - if (lte->out_reshdr.size_in_wim < lte->out_reshdr.uncompressed_size) + if (blob->out_reshdr.size_in_wim < blob->out_reshdr.uncompressed_size) return false; /* If we're not actually writing compressed data, then there's no need @@ -876,34 +853,35 @@ should_rewrite_stream_uncompressed(const struct write_streams_ctx *ctx, if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) return false; - /* If the stream that would need to be re-read is located in a solid - * block in another WIM file, then re-reading it would be costly. So + /* If the blob that would need to be re-read is located in a solid + * resource in another WIM file, then re-reading it would be costly. So * don't do it. * * Exception: if the compressed size happens to be *exactly* the same as - * the uncompressed size, then the stream *must* be written uncompressed + * the uncompressed size, then the blob *must* be written uncompressed * in order to remain compatible with the Windows Overlay Filesystem * Filter Driver (WOF). * * TODO: we are currently assuming that the optimization for - * single-chunk resources in maybe_rewrite_stream_uncompressed() - * prevents this case from being triggered too often. To fully prevent - * excessive decompressions in degenerate cases, we really should - * obtain the uncompressed data by decompressing the compressed data we - * wrote to the output file. + * single-chunk resources in maybe_rewrite_blob_uncompressed() prevents + * this case from being triggered too often. To fully prevent excessive + * decompressions in degenerate cases, we really should obtain the + * uncompressed data by decompressing the compressed data we wrote to + * the output file. */ - if ((lte->flags & WIM_RESHDR_FLAG_PACKED_STREAMS) && - (lte->out_reshdr.size_in_wim != lte->out_reshdr.uncompressed_size)) + if (blob->blob_location == BLOB_IN_WIM && + blob->size != blob->rdesc->uncompressed_size && + blob->size != blob->out_reshdr.size_in_wim) return false; return true; } static int -maybe_rewrite_stream_uncompressed(struct write_streams_ctx *ctx, - struct wim_lookup_table_entry *lte) +maybe_rewrite_blob_uncompressed(struct write_blobs_ctx *ctx, + struct blob_descriptor *blob) { - if (!should_rewrite_stream_uncompressed(ctx, lte)) + if (!should_rewrite_blob_uncompressed(ctx, blob)) return 0; /* Regular (non-solid) WIM resources with exactly one chunk and @@ -912,50 +890,44 @@ maybe_rewrite_stream_uncompressed(struct write_streams_ctx *ctx, * in the chunk table and the only chunk must be stored uncompressed. * In this case, there's no need to rewrite anything. */ if (ctx->chunk_index == 1 && - lte->out_reshdr.size_in_wim == lte->out_reshdr.uncompressed_size) + blob->out_reshdr.size_in_wim == blob->out_reshdr.uncompressed_size) { - lte->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED; + blob->out_reshdr.flags &= ~WIM_RESHDR_FLAG_COMPRESSED; return 0; } - return write_stream_uncompressed(lte, ctx->out_fd); + return write_blob_uncompressed(blob, ctx->out_fd); } /* Write the next chunk of (typically compressed) data to the output WIM, * handling the writing of the chunk table. */ static int -write_chunk(struct write_streams_ctx *ctx, const void *cchunk, +write_chunk(struct write_blobs_ctx *ctx, const void *cchunk, size_t csize, size_t usize) { int ret; - - struct wim_lookup_table_entry *lte; - u32 completed_stream_count; + struct blob_descriptor *blob; + u32 completed_blob_count; u32 completed_size; - lte = list_entry(ctx->pending_streams.next, - struct wim_lookup_table_entry, write_streams_list); + blob = list_entry(ctx->blobs_being_compressed.next, + struct blob_descriptor, write_blobs_list); - if (ctx->cur_write_stream_offset == 0 && - !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS)) + if (ctx->cur_write_blob_offset == 0 && + !(ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID)) { - /* Starting to write a new stream in non-packed mode. */ + /* Starting to write a new blob in non-solid mode. */ if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) { - int additional_reshdr_flags = 0; - if (ctx->compressor != NULL) - additional_reshdr_flags |= WIM_RESHDR_FLAG_COMPRESSED; - - DEBUG("Writing pipable WIM stream header " + DEBUG("Writing pipable WIM blob header " "(offset=%"PRIu64")", ctx->out_fd->offset); - - ret = write_pwm_stream_header(lte, ctx->out_fd, - additional_reshdr_flags); + ret = write_pwm_blob_header(blob, ctx->out_fd, + ctx->compressor != NULL); if (ret) return ret; } - ret = begin_write_resource(ctx, lte->size); + ret = begin_write_resource(ctx, blob->size); if (ret) return ret; } @@ -983,69 +955,68 @@ write_chunk(struct write_streams_ctx *ctx, const void *cchunk, if (ret) goto write_error; - ctx->cur_write_stream_offset += usize; + ctx->cur_write_blob_offset += usize; completed_size = usize; - completed_stream_count = 0; - if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) { - /* Wrote chunk in packed mode. It may have finished multiple - * streams. */ - struct wim_lookup_table_entry *next_lte; + completed_blob_count = 0; + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + /* Wrote chunk in solid mode. It may have finished multiple + * blobs. */ + struct blob_descriptor *next_blob; - while (lte && ctx->cur_write_stream_offset >= lte->size) { + while (blob && ctx->cur_write_blob_offset >= blob->size) { - ctx->cur_write_stream_offset -= lte->size; + ctx->cur_write_blob_offset -= blob->size; - if (ctx->cur_write_stream_offset) - next_lte = list_entry(lte->write_streams_list.next, - struct wim_lookup_table_entry, - write_streams_list); + if (ctx->cur_write_blob_offset) + next_blob = list_entry(blob->write_blobs_list.next, + struct blob_descriptor, + write_blobs_list); else - next_lte = NULL; + next_blob = NULL; - ret = done_with_stream(lte, ctx); + ret = done_with_blob(blob, ctx); if (ret) return ret; - list_move_tail(<e->write_streams_list, &ctx->pack_streams); - completed_stream_count++; + list_move_tail(&blob->write_blobs_list, &ctx->blobs_in_solid_resource); + completed_blob_count++; - lte = next_lte; + blob = next_blob; } } else { - /* Wrote chunk in non-packed mode. It may have finished a - * stream. */ - if (ctx->cur_write_stream_offset == lte->size) { + /* Wrote chunk in non-solid mode. It may have finished a + * blob. */ + if (ctx->cur_write_blob_offset == blob->size) { - wimlib_assert(ctx->cur_write_stream_offset == + wimlib_assert(ctx->cur_write_blob_offset == ctx->cur_write_res_size); - ret = end_write_resource(ctx, <e->out_reshdr); + ret = end_write_resource(ctx, &blob->out_reshdr); if (ret) return ret; - lte->out_reshdr.flags = filter_resource_flags(lte->flags); + blob->out_reshdr.flags = reshdr_flags_for_blob(blob); if (ctx->compressor != NULL) - lte->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED; + blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED; - ret = maybe_rewrite_stream_uncompressed(ctx, lte); + ret = maybe_rewrite_blob_uncompressed(ctx, blob); if (ret) return ret; - wimlib_assert(lte->out_reshdr.uncompressed_size == lte->size); + wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size); - ctx->cur_write_stream_offset = 0; + ctx->cur_write_blob_offset = 0; - ret = done_with_stream(lte, ctx); + ret = done_with_blob(blob, ctx); if (ret) return ret; - list_del(<e->write_streams_list); - completed_stream_count++; + list_del(&blob->write_blobs_list); + completed_blob_count++; } } - return do_write_streams_progress(&ctx->progress_data, - completed_size, completed_stream_count, - false); + return do_write_blobs_progress(&ctx->progress_data, completed_size, + completed_blob_count, false); write_error: ERROR_WITH_ERRNO("Write error"); @@ -1053,22 +1024,23 @@ write_error: } static int -submit_chunk_for_compression(struct write_streams_ctx *ctx, - const void *chunk, size_t size) +prepare_chunk_buffer(struct write_blobs_ctx *ctx) { - /* While we are unable to submit the chunk for compression (due to too - * many chunks already outstanding), retrieve and write the next - * compressed chunk. */ - while (!ctx->compressor->submit_chunk(ctx->compressor, chunk, size)) { + /* While we are unable to get a new chunk buffer due to too many chunks + * already outstanding, retrieve and write the next compressed chunk. */ + while (!(ctx->cur_chunk_buf = + ctx->compressor->get_chunk_buffer(ctx->compressor))) + { const void *cchunk; u32 csize; u32 usize; bool bret; int ret; - bret = ctx->compressor->get_chunk(ctx->compressor, - &cchunk, &csize, &usize); - + bret = ctx->compressor->get_compression_result(ctx->compressor, + &cchunk, + &csize, + &usize); wimlib_assert(bret); ret = write_chunk(ctx, cchunk, csize, usize); @@ -1080,9 +1052,9 @@ submit_chunk_for_compression(struct write_streams_ctx *ctx, /* Process the next chunk of data to be written to a WIM resource. */ static int -write_stream_process_chunk(const void *chunk, size_t size, void *_ctx) +write_blob_process_chunk(const void *chunk, size_t size, void *_ctx) { - struct write_streams_ctx *ctx = _ctx; + struct write_blobs_ctx *ctx = _ctx; int ret; const u8 *chunkptr, *chunkend; @@ -1093,7 +1065,7 @@ write_stream_process_chunk(const void *chunk, size_t size, void *_ctx) ret = write_chunk(ctx, chunk, size, size); if (ret) return ret; - ctx->cur_read_stream_offset += size; + ctx->cur_read_blob_offset += size; return 0; } @@ -1103,120 +1075,105 @@ write_stream_process_chunk(const void *chunk, size_t size, void *_ctx) chunkptr = chunk; chunkend = chunkptr + size; do { - const u8 *resized_chunk; size_t needed_chunk_size; + size_t bytes_consumed; + + if (!ctx->cur_chunk_buf) { + ret = prepare_chunk_buffer(ctx); + if (ret) + return ret; + } - if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) { + if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { needed_chunk_size = ctx->out_chunk_size; } else { - u64 res_bytes_remaining; - - res_bytes_remaining = ctx->cur_read_stream_size - - ctx->cur_read_stream_offset; needed_chunk_size = min(ctx->out_chunk_size, - ctx->chunk_buf_filled + - res_bytes_remaining); + ctx->cur_chunk_buf_filled + + (ctx->cur_read_blob_size - + ctx->cur_read_blob_offset)); } - if (ctx->chunk_buf_filled == 0 && - chunkend - chunkptr >= needed_chunk_size) - { - /* No intermediate buffering needed. */ - resized_chunk = chunkptr; - chunkptr += needed_chunk_size; - ctx->cur_read_stream_offset += needed_chunk_size; - } else { - /* Intermediate buffering needed. */ - size_t bytes_consumed; - - bytes_consumed = min(chunkend - chunkptr, - needed_chunk_size - ctx->chunk_buf_filled); + bytes_consumed = min(chunkend - chunkptr, + needed_chunk_size - ctx->cur_chunk_buf_filled); - memcpy(&ctx->chunk_buf[ctx->chunk_buf_filled], - chunkptr, bytes_consumed); + memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled], + chunkptr, bytes_consumed); - chunkptr += bytes_consumed; - ctx->cur_read_stream_offset += bytes_consumed; - ctx->chunk_buf_filled += bytes_consumed; - if (ctx->chunk_buf_filled == needed_chunk_size) { - resized_chunk = ctx->chunk_buf; - ctx->chunk_buf_filled = 0; - } else { - break; - } + chunkptr += bytes_consumed; + ctx->cur_read_blob_offset += bytes_consumed; + ctx->cur_chunk_buf_filled += bytes_consumed; + if (ctx->cur_chunk_buf_filled == needed_chunk_size) { + ctx->compressor->signal_chunk_filled(ctx->compressor, + ctx->cur_chunk_buf_filled); + ctx->cur_chunk_buf = NULL; + ctx->cur_chunk_buf_filled = 0; } - - ret = submit_chunk_for_compression(ctx, resized_chunk, - needed_chunk_size); - if (ret) - return ret; - } while (chunkptr != chunkend); return 0; } -/* Finish processing a stream for writing. It may not have been completely +/* Finish processing a blob for writing. It may not have been completely * written yet, as the chunk_compressor implementation may still have chunks * buffered or being compressed. */ static int -write_stream_end_read(struct wim_lookup_table_entry *lte, int status, void *_ctx) +write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx) { - struct write_streams_ctx *ctx = _ctx; + struct write_blobs_ctx *ctx = _ctx; - wimlib_assert(ctx->cur_read_stream_offset == ctx->cur_read_stream_size || status); + wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status); - if (!lte->will_be_in_output_wim) { - /* The 'lte' stream was a duplicate. Now that its data has - * finished being read, it is being discarded in favor of the - * duplicate entry. It therefore is no longer needed, and we - * can fire the DONE_WITH_FILE callback because the file will - * not be read again. + if (!blob->will_be_in_output_wim) { + /* The blob was a duplicate. Now that its data has finished + * being read, it is being discarded in favor of the duplicate + * entry. It therefore is no longer needed, and we can fire the + * DONE_WITH_FILE callback because the file will not be read + * again. * * Note: we can't yet fire DONE_WITH_FILE for non-duplicate - * streams, since it needs to be possible to re-read the file if + * blobs, since it needs to be possible to re-read the file if * it does not compress to less than its original size. */ if (!status) - status = done_with_stream(lte, ctx); - free_lookup_table_entry(lte); - } else if (!status && lte->unhashed && ctx->lookup_table != NULL) { - /* The 'lte' stream was not a duplicate and was previously - * unhashed. Since we passed COMPUTE_MISSING_STREAM_HASHES to - * read_stream_list(), lte->hash is now computed and valid. So - * turn this stream into a "hashed" stream. */ - list_del(<e->unhashed_list); - lookup_table_insert(ctx->lookup_table, lte); - lte->unhashed = 0; + status = done_with_blob(blob, ctx); + free_blob_descriptor(blob); + } else if (!status && blob->unhashed && ctx->blob_table != NULL) { + /* The blob was not a duplicate and was previously unhashed. + * Since we passed COMPUTE_MISSING_BLOB_HASHES to + * read_blob_list(), blob->hash is now computed and valid. So + * turn this blob into a "hashed" blob. */ + list_del(&blob->unhashed_list); + blob_table_insert(ctx->blob_table, blob); + blob->unhashed = 0; } return status; } -/* Compute statistics about a list of streams that will be written. +/* Compute statistics about a list of blobs that will be written. * - * Assumes the streams are sorted such that all streams located in each distinct - * WIM (specified by WIMStruct) are together. */ + * Assumes the blobs are sorted such that all blobs located in each distinct WIM + * (specified by WIMStruct) are together. */ static void -compute_stream_list_stats(struct list_head *stream_list, - struct write_streams_ctx *ctx) +compute_blob_list_stats(struct list_head *blob_list, + struct write_blobs_ctx *ctx) { - struct wim_lookup_table_entry *lte; + struct blob_descriptor *blob; u64 total_bytes = 0; - u64 num_streams = 0; + u64 num_blobs = 0; u64 total_parts = 0; WIMStruct *prev_wim_part = NULL; - list_for_each_entry(lte, stream_list, write_streams_list) { - num_streams++; - total_bytes += lte->size; - if (lte->resource_location == RESOURCE_IN_WIM) { - if (prev_wim_part != lte->rspec->wim) { - prev_wim_part = lte->rspec->wim; + list_for_each_entry(blob, blob_list, write_blobs_list) { + num_blobs++; + total_bytes += blob->size; + if (blob->blob_location == BLOB_IN_WIM) { + if (prev_wim_part != blob->rdesc->wim) { + prev_wim_part = blob->rdesc->wim; total_parts++; } } } ctx->progress_data.progress.write_streams.total_bytes = total_bytes; - ctx->progress_data.progress.write_streams.total_streams = num_streams; + ctx->progress_data.progress.write_streams.total_streams = num_blobs; ctx->progress_data.progress.write_streams.completed_bytes = 0; ctx->progress_data.progress.write_streams.completed_streams = 0; ctx->progress_data.progress.write_streams.compression_type = ctx->out_ctype; @@ -1225,41 +1182,41 @@ compute_stream_list_stats(struct list_head *stream_list, ctx->progress_data.next_progress = 0; } -/* Find streams in @stream_list that can be copied to the output WIM in raw form - * rather than compressed. Delete these streams from @stream_list and move them - * to @raw_copy_streams. Return the total uncompressed size of the streams that - * need to be compressed. */ +/* Find blobs in @blob_list that can be copied to the output WIM in raw form + * rather than compressed. Delete these blobs from @blob_list and move them to + * @raw_copy_blobs. Return the total uncompressed size of the blobs that need + * to be compressed. */ static u64 -find_raw_copy_streams(struct list_head *stream_list, - int write_resource_flags, - int out_ctype, - u32 out_chunk_size, - struct list_head *raw_copy_streams) +find_raw_copy_blobs(struct list_head *blob_list, + int write_resource_flags, + int out_ctype, + u32 out_chunk_size, + struct list_head *raw_copy_blobs) { - struct wim_lookup_table_entry *lte, *tmp; + struct blob_descriptor *blob, *tmp; u64 num_bytes_to_compress = 0; - INIT_LIST_HEAD(raw_copy_streams); + INIT_LIST_HEAD(raw_copy_blobs); /* Initialize temporary raw_copy_ok flag. */ - list_for_each_entry(lte, stream_list, write_streams_list) - if (lte->resource_location == RESOURCE_IN_WIM) - lte->rspec->raw_copy_ok = 0; + list_for_each_entry(blob, blob_list, write_blobs_list) + if (blob->blob_location == BLOB_IN_WIM) + blob->rdesc->raw_copy_ok = 0; - list_for_each_entry_safe(lte, tmp, stream_list, write_streams_list) { - if (lte->resource_location == RESOURCE_IN_WIM && - lte->rspec->raw_copy_ok) + list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) { + if (blob->blob_location == BLOB_IN_WIM && + blob->rdesc->raw_copy_ok) { - list_move_tail(<e->write_streams_list, - raw_copy_streams); - } else if (can_raw_copy(lte, write_resource_flags, - out_ctype, out_chunk_size)) + list_move_tail(&blob->write_blobs_list, + raw_copy_blobs); + } else if (can_raw_copy(blob, write_resource_flags, + out_ctype, out_chunk_size)) { - lte->rspec->raw_copy_ok = 1; - list_move_tail(<e->write_streams_list, - raw_copy_streams); + blob->rdesc->raw_copy_ok = 1; + list_move_tail(&blob->write_blobs_list, + raw_copy_blobs); } else { - num_bytes_to_compress += lte->size; + num_bytes_to_compress += blob->size; } } @@ -1269,7 +1226,7 @@ find_raw_copy_streams(struct list_head *stream_list, /* Copy a raw compressed resource located in another WIM file to the WIM file * being written. */ static int -write_raw_copy_resource(struct wim_resource_spec *in_rspec, +write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc, struct filedes *out_fd) { u64 cur_read_offset; @@ -1278,26 +1235,26 @@ write_raw_copy_resource(struct wim_resource_spec *in_rspec, size_t bytes_to_read; int ret; struct filedes *in_fd; - struct wim_lookup_table_entry *lte; + struct blob_descriptor *blob; u64 out_offset_in_wim; DEBUG("Copying raw compressed data (size_in_wim=%"PRIu64", " "uncompressed_size=%"PRIu64")", - in_rspec->size_in_wim, in_rspec->uncompressed_size); + in_rdesc->size_in_wim, in_rdesc->uncompressed_size); /* Copy the raw data. */ - cur_read_offset = in_rspec->offset_in_wim; - end_read_offset = cur_read_offset + in_rspec->size_in_wim; + cur_read_offset = in_rdesc->offset_in_wim; + end_read_offset = cur_read_offset + in_rdesc->size_in_wim; out_offset_in_wim = out_fd->offset; - if (in_rspec->is_pipable) { - if (cur_read_offset < sizeof(struct pwm_stream_hdr)) + if (in_rdesc->is_pipable) { + if (cur_read_offset < sizeof(struct pwm_blob_hdr)) return WIMLIB_ERR_INVALID_PIPABLE_WIM; - cur_read_offset -= sizeof(struct pwm_stream_hdr); - out_offset_in_wim += sizeof(struct pwm_stream_hdr); + cur_read_offset -= sizeof(struct pwm_blob_hdr); + out_offset_in_wim += sizeof(struct pwm_blob_hdr); } - in_fd = &in_rspec->wim->in_fd; + in_fd = &in_rdesc->wim->in_fd; wimlib_assert(cur_read_offset != end_read_offset); do { @@ -1315,13 +1272,13 @@ write_raw_copy_resource(struct wim_resource_spec *in_rspec, } while (cur_read_offset != end_read_offset); - list_for_each_entry(lte, &in_rspec->stream_list, rspec_node) { - if (lte->will_be_in_output_wim) { - stream_set_out_reshdr_for_reuse(lte); - if (in_rspec->flags & WIM_RESHDR_FLAG_PACKED_STREAMS) - lte->out_res_offset_in_wim = out_offset_in_wim; + list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) { + if (blob->will_be_in_output_wim) { + blob_set_out_reshdr_for_reuse(blob); + if (in_rdesc->flags & WIM_RESHDR_FLAG_SOLID) + blob->out_res_offset_in_wim = out_offset_in_wim; else - lte->out_reshdr.offset_in_wim = out_offset_in_wim; + blob->out_reshdr.offset_in_wim = out_offset_in_wim; } } @@ -1331,27 +1288,26 @@ write_raw_copy_resource(struct wim_resource_spec *in_rspec, /* Copy a list of raw compressed resources located in other WIM file(s) to the * WIM file being written. */ static int -write_raw_copy_resources(struct list_head *raw_copy_streams, +write_raw_copy_resources(struct list_head *raw_copy_blobs, struct filedes *out_fd, - struct write_streams_progress_data *progress_data) + struct write_blobs_progress_data *progress_data) { - struct wim_lookup_table_entry *lte; + struct blob_descriptor *blob; int ret; - list_for_each_entry(lte, raw_copy_streams, write_streams_list) - lte->rspec->raw_copy_ok = 1; + list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) + blob->rdesc->raw_copy_ok = 1; - list_for_each_entry(lte, raw_copy_streams, write_streams_list) { - if (lte->rspec->raw_copy_ok) { - /* Write each packed resource only one time, no matter - * how many streams reference it. */ - ret = write_raw_copy_resource(lte->rspec, out_fd); + list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) { + if (blob->rdesc->raw_copy_ok) { + /* Write each solid resource only one time. */ + ret = write_raw_copy_resource(blob->rdesc, out_fd); if (ret) return ret; - lte->rspec->raw_copy_ok = 0; + blob->rdesc->raw_copy_ok = 0; } - ret = do_write_streams_progress(progress_data, lte->size, - 1, false); + ret = do_write_blobs_progress(progress_data, blob->size, + 1, false); if (ret) return ret; } @@ -1360,7 +1316,7 @@ write_raw_copy_resources(struct list_head *raw_copy_streams, /* Wait for and write all chunks pending in the compressor. */ static int -finish_remaining_chunks(struct write_streams_ctx *ctx) +finish_remaining_chunks(struct write_blobs_ctx *ctx) { const void *cdata; u32 csize; @@ -1370,14 +1326,14 @@ finish_remaining_chunks(struct write_streams_ctx *ctx) if (ctx->compressor == NULL) return 0; - if (ctx->chunk_buf_filled != 0) { - ret = submit_chunk_for_compression(ctx, ctx->chunk_buf, - ctx->chunk_buf_filled); - if (ret) - return ret; + if (ctx->cur_chunk_buf_filled != 0) { + ctx->compressor->signal_chunk_filled(ctx->compressor, + ctx->cur_chunk_buf_filled); } - while (ctx->compressor->get_chunk(ctx->compressor, &cdata, &csize, &usize)) { + while (ctx->compressor->get_compression_result(ctx->compressor, &cdata, + &csize, &usize)) + { ret = write_chunk(ctx, cdata, csize, usize); if (ret) return ret; @@ -1386,53 +1342,64 @@ finish_remaining_chunks(struct write_streams_ctx *ctx) } static void -remove_zero_length_streams(struct list_head *stream_list) -{ - struct wim_lookup_table_entry *lte, *tmp; - - list_for_each_entry_safe(lte, tmp, stream_list, write_streams_list) { - wimlib_assert(lte->will_be_in_output_wim); - if (lte->size == 0) { - list_del(<e->write_streams_list); - lte->out_reshdr.offset_in_wim = 0; - lte->out_reshdr.size_in_wim = 0; - lte->out_reshdr.uncompressed_size = 0; - lte->out_reshdr.flags = filter_resource_flags(lte->flags); +remove_empty_blobs(struct list_head *blob_list) +{ + struct blob_descriptor *blob, *tmp; + + list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) { + wimlib_assert(blob->will_be_in_output_wim); + if (blob->size == 0) { + list_del(&blob->write_blobs_list); + blob->out_reshdr.offset_in_wim = 0; + blob->out_reshdr.size_in_wim = 0; + blob->out_reshdr.uncompressed_size = 0; + blob->out_reshdr.flags = reshdr_flags_for_blob(blob); } } } +static inline bool +blob_is_in_file(const struct blob_descriptor *blob) +{ + return blob->blob_location == BLOB_IN_FILE_ON_DISK +#ifdef __WIN32__ + || blob->blob_location == BLOB_IN_WINNT_FILE_ON_DISK + || blob->blob_location == BLOB_WIN32_ENCRYPTED +#endif + ; +} + static void -init_done_with_file_info(struct list_head *stream_list) +init_done_with_file_info(struct list_head *blob_list) { - struct wim_lookup_table_entry *lte; + struct blob_descriptor *blob; - list_for_each_entry(lte, stream_list, write_streams_list) { - if (is_file_stream(lte)) { - lte->file_inode->num_remaining_streams = 0; - lte->may_send_done_with_file = 1; + list_for_each_entry(blob, blob_list, write_blobs_list) { + if (blob_is_in_file(blob)) { + blob->file_inode->num_remaining_streams = 0; + blob->may_send_done_with_file = 1; } else { - lte->may_send_done_with_file = 0; + blob->may_send_done_with_file = 0; } } - list_for_each_entry(lte, stream_list, write_streams_list) - if (lte->may_send_done_with_file) - lte->file_inode->num_remaining_streams++; + list_for_each_entry(blob, blob_list, write_blobs_list) + if (blob->may_send_done_with_file) + blob->file_inode->num_remaining_streams++; } /* - * Write a list of streams to the output WIM file. + * Write a list of blobs to the output WIM file. * - * @stream_list - * The list of streams to write, specified by a list of `struct - * wim_lookup_table_entry's linked by the 'write_streams_list' member. + * @blob_list + * The list of blobs to write, specified by a list of 'struct blob_descriptor' linked + * by the 'write_blobs_list' member. * * @out_fd - * The file descriptor, opened for writing, to which to write the streams. + * The file descriptor, opened for writing, to which to write the blobs. * * @write_resource_flags - * Flags to modify how the streams are written: + * Flags to modify how the blobs are written: * * WRITE_RESOURCE_FLAG_RECOMPRESS: * Force compression of all resources, even if they could otherwise @@ -1444,166 +1411,166 @@ init_done_with_file_info(struct list_head *stream_list) * furthermore do so in such a way that no seeking backwards in * @out_fd will be performed (so it may be a pipe). * - * WRITE_RESOURCE_FLAG_PACK_STREAMS: - * Pack all the streams into a single resource rather than writing + * WRITE_RESOURCE_FLAG_SOLID: + * Combine all the blobs into a single resource rather than writing * them in separate resources. This flag is only valid if the WIM - * version number has been, or will be, set to - * WIM_VERSION_PACKED_STREAMS. This flag may not be combined with - * WRITE_RESOURCE_FLAG_PIPABLE. + * version number has been, or will be, set to WIM_VERSION_SOLID. + * This flag may not be combined with WRITE_RESOURCE_FLAG_PIPABLE. * * @out_ctype - * Compression format to use to write the output streams, specified as one - * of the WIMLIB_COMPRESSION_TYPE_* constants. - * WIMLIB_COMPRESSION_TYPE_NONE is allowed. + * Compression format to use in the output resources, specified as one of + * the WIMLIB_COMPRESSION_TYPE_* constants. WIMLIB_COMPRESSION_TYPE_NONE + * is allowed. * * @out_chunk_size - * Chunk size to use to write the streams. It must be a valid chunk size - * for the specified compression format @out_ctype, unless @out_ctype is - * WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter is ignored. + * Compression chunk size to use in the output resources. It must be a + * valid chunk size for the specified compression format @out_ctype, unless + * @out_ctype is WIMLIB_COMPRESSION_TYPE_NONE, in which case this parameter + * is ignored. * * @num_threads * Number of threads to use to compress data. If 0, a default number of * threads will be chosen. The number of threads still may be decreased * from the specified value if insufficient memory is detected. * - * @lookup_table - * If on-the-fly deduplication of unhashed streams is desired, this - * parameter must be pointer to the lookup table for the WIMStruct on whose - * behalf the streams are being written. Otherwise, this parameter can be - * NULL. + * @blob_table + * If on-the-fly deduplication of unhashed blobs is desired, this parameter + * must be pointer to the blob table for the WIMStruct on whose behalf the + * blobs are being written. Otherwise, this parameter can be NULL. * * @filter_ctx - * If on-the-fly deduplication of unhashed streams is desired, this - * parameter can be a pointer to a context for stream filtering used to - * detect whether the duplicate stream has been hard-filtered or not. If - * no streams are hard-filtered or no streams are unhashed, this parameter - * can be NULL. - * - * This function will write the streams in @stream_list to resources in - * consecutive positions in the output WIM file, or to a single packed resource - * if WRITE_RESOURCE_FLAG_PACK_STREAMS was specified in @write_resource_flags. - * In both cases, the @out_reshdr of the `struct wim_lookup_table_entry' for - * each stream written will be updated to specify its location, size, and flags - * in the output WIM. In the packed resource case, - * WIM_RESHDR_FLAG_PACKED_STREAMS will be set in the @flags field of each - * @out_reshdr, and furthermore @out_res_offset_in_wim and @out_res_size_in_wim - * of each @out_reshdr will be set to the offset and size, respectively, in the - * output WIM of the packed resource containing the corresponding stream. - * - * Each of the streams to write may be in any location supported by the - * resource-handling code (specifically, read_stream_list()), such as the - * contents of external file that has been logically added to the output WIM, or - * a stream in another WIM file that has been imported, or even a stream in the - * "same" WIM file of which a modified copy is being written. In the case that - * a stream is already in a WIM file and uses compatible compression parameters, - * by default this function will re-use the raw data instead of decompressing - * it, then recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS + * If on-the-fly deduplication of unhashed blobs is desired, this parameter + * can be a pointer to a context for blob filtering used to detect whether + * the duplicate blob has been hard-filtered or not. If no blobs are + * hard-filtered or no blobs are unhashed, this parameter can be NULL. + * + * This function will write the blobs in @blob_list to resources in + * consecutive positions in the output WIM file, or to a single solid resource + * if WRITE_RESOURCE_FLAG_SOLID was specified in @write_resource_flags. In both + * cases, the @out_reshdr of the `struct blob_descriptor' for each blob written will be + * updated to specify its location, size, and flags in the output WIM. In the + * solid resource case, WIM_RESHDR_FLAG_SOLID will be set in the @flags field of + * each @out_reshdr, and furthermore @out_res_offset_in_wim and + * @out_res_size_in_wim of each @out_reshdr will be set to the offset and size, + * respectively, in the output WIM of the solid resource containing the + * corresponding blob. + * + * Each of the blobs to write may be in any location supported by the + * resource-handling code (specifically, read_blob_list()), such as the contents + * of external file that has been logically added to the output WIM, or a blob + * in another WIM file that has been imported, or even a blob in the "same" WIM + * file of which a modified copy is being written. In the case that a blob is + * already in a WIM file and uses compatible compression parameters, by default + * this function will re-use the raw data instead of decompressing it, then + * recompressing it; however, with WRITE_RESOURCE_FLAG_RECOMPRESS * specified in @write_resource_flags, this is not done. * * As a further requirement, this function requires that the - * @will_be_in_output_wim member be set to 1 on all streams in @stream_list as - * well as any other streams not in @stream_list that will be in the output WIM - * file, but set to 0 on any other streams in the output WIM's lookup table or - * sharing a packed resource with a stream in @stream_list. Still furthermore, - * if on-the-fly deduplication of streams is possible, then all streams in - * @stream_list must also be linked by @lookup_table_list along with any other - * streams that have @will_be_in_output_wim set. - * - * This function handles on-the-fly deduplication of streams for which SHA1 - * message digests have not yet been calculated. Such streams may or may not - * need to be written. If @lookup_table is non-NULL, then each stream in - * @stream_list that has @unhashed set but not @unique_size set is checksummed - * immediately before it would otherwise be read for writing in order to - * determine if it is identical to another stream already being written or one - * that would be filtered out of the output WIM using stream_filtered() with the - * context @filter_ctx. Each such duplicate stream will be removed from - * @stream_list, its reference count transfered to the pre-existing duplicate - * stream, its memory freed, and will not be written. Alternatively, if a - * stream in @stream_list is a duplicate with any stream in @lookup_table that - * has not been marked for writing or would not be hard-filtered, it is freed - * and the pre-existing duplicate is written instead, taking ownership of the - * reference count and slot in the @lookup_table_list. - * - * Returns 0 if every stream was either written successfully or did not need to - * be written; otherwise returns a non-zero error code. + * @will_be_in_output_wim member be set to 1 on all blobs in @blob_list as well + * as any other blobs not in @blob_list that will be in the output WIM file, but + * set to 0 on any other blobs in the output WIM's blob table or sharing a solid + * resource with a blob in @blob_list. Still furthermore, if on-the-fly + * deduplication of blobs is possible, then all blobs in @blob_list must also be + * linked by @blob_table_list along with any other blobs that have + * @will_be_in_output_wim set. + * + * This function handles on-the-fly deduplication of blobs for which SHA-1 + * message digests have not yet been calculated. Such blobs may or may not need + * to be written. If @blob_table is non-NULL, then each blob in @blob_list that + * has @unhashed set but not @unique_size set is checksummed immediately before + * it would otherwise be read for writing in order to determine if it is + * identical to another blob already being written or one that would be filtered + * out of the output WIM using blob_filtered() with the context @filter_ctx. + * Each such duplicate blob will be removed from @blob_list, its reference count + * transfered to the pre-existing duplicate blob, its memory freed, and will not + * be written. Alternatively, if a blob in @blob_list is a duplicate with any + * blob in @blob_table that has not been marked for writing or would not be + * hard-filtered, it is freed and the pre-existing duplicate is written instead, + * taking ownership of the reference count and slot in the @blob_table_list. + * + * Returns 0 if every blob was either written successfully or did not need to be + * written; otherwise returns a non-zero error code. */ static int -write_stream_list(struct list_head *stream_list, - struct filedes *out_fd, - int write_resource_flags, - int out_ctype, - u32 out_chunk_size, - unsigned num_threads, - struct wim_lookup_table *lookup_table, - struct filter_context *filter_ctx, - wimlib_progress_func_t progfunc, - void *progctx) +write_blob_list(struct list_head *blob_list, + struct filedes *out_fd, + int write_resource_flags, + int out_ctype, + u32 out_chunk_size, + unsigned num_threads, + struct blob_table *blob_table, + struct filter_context *filter_ctx, + wimlib_progress_func_t progfunc, + void *progctx) { int ret; - struct write_streams_ctx ctx; - struct list_head raw_copy_streams; + struct write_blobs_ctx ctx; + struct list_head raw_copy_blobs; wimlib_assert((write_resource_flags & - (WRITE_RESOURCE_FLAG_PACK_STREAMS | + (WRITE_RESOURCE_FLAG_SOLID | WRITE_RESOURCE_FLAG_PIPABLE)) != - (WRITE_RESOURCE_FLAG_PACK_STREAMS | + (WRITE_RESOURCE_FLAG_SOLID | WRITE_RESOURCE_FLAG_PIPABLE)); - remove_zero_length_streams(stream_list); + remove_empty_blobs(blob_list); - if (list_empty(stream_list)) { - DEBUG("No streams to write."); + if (list_empty(blob_list)) { + DEBUG("No blobs to write."); return 0; } /* If needed, set auxiliary information so that we can detect when the * library has finished using each external file. */ if (unlikely(write_resource_flags & WRITE_RESOURCE_FLAG_SEND_DONE_WITH_FILE)) - init_done_with_file_info(stream_list); + init_done_with_file_info(blob_list); memset(&ctx, 0, sizeof(ctx)); - /* Pre-sorting the streams is required for compute_stream_list_stats(). - * Afterwards, read_stream_list() need not sort them again. */ - ret = sort_stream_list_by_sequential_order(stream_list, - offsetof(struct wim_lookup_table_entry, - write_streams_list)); - if (ret) - return ret; - ctx.out_fd = out_fd; - ctx.lookup_table = lookup_table; + ctx.blob_table = blob_table; ctx.out_ctype = out_ctype; ctx.out_chunk_size = out_chunk_size; ctx.write_resource_flags = write_resource_flags; ctx.filter_ctx = filter_ctx; - if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) { - wimlib_assert(out_chunk_size != 0); - if (out_chunk_size <= STACK_MAX) { - ctx.chunk_buf = alloca(out_chunk_size); - } else { - ctx.chunk_buf = MALLOC(out_chunk_size); - if (ctx.chunk_buf == NULL) { - ret = WIMLIB_ERR_NOMEM; - goto out_destroy_context; - } - } - } - ctx.chunk_buf_filled = 0; + /* + * We normally sort the blobs to write by a "sequential" order that is + * optimized for reading. But when using solid compression, we instead + * sort the blobs by file extension and file name (when applicable; and + * we don't do this for blobs from solid resources) so that similar + * files are grouped together, which improves the compression ratio. + * This is somewhat of a hack since a blob does not necessarily + * correspond one-to-one with a filename, nor is there any guarantee + * that two files with similar names or extensions are actually similar + * in content. A potential TODO is to sort the blobs based on some + * measure of similarity of their actual contents. + */ + + ret = sort_blob_list_by_sequential_order(blob_list, + offsetof(struct blob_descriptor, + write_blobs_list)); + if (ret) + return ret; + + compute_blob_list_stats(blob_list, &ctx); - compute_stream_list_stats(stream_list, &ctx); + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) { + ret = sort_blob_list_for_solid_compression(blob_list); + if (unlikely(ret)) + WARNING("Failed to sort blobs for solid compression. Continuing anyways."); + } ctx.progress_data.progfunc = progfunc; ctx.progress_data.progctx = progctx; - ctx.num_bytes_to_compress = find_raw_copy_streams(stream_list, - write_resource_flags, - out_ctype, - out_chunk_size, - &raw_copy_streams); + ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list, + write_resource_flags, + out_ctype, + out_chunk_size, + &raw_copy_blobs); - DEBUG("Writing stream list " + DEBUG("Writing blob list " "(offset = %"PRIu64", write_resource_flags=0x%08x, " "out_ctype=%d, out_chunk_size=%u, num_threads=%u, " "total_bytes=%"PRIu64", num_bytes_to_compress=%"PRIu64")", @@ -1654,8 +1621,8 @@ write_stream_list(struct list_head *stream_list, DEBUG("Actually using %u threads", ctx.progress_data.progress.write_streams.num_threads); - INIT_LIST_HEAD(&ctx.pending_streams); - INIT_LIST_HEAD(&ctx.pack_streams); + INIT_LIST_HEAD(&ctx.blobs_being_compressed); + INIT_LIST_HEAD(&ctx.blobs_in_solid_resource); ret = call_progress(ctx.progress_data.progfunc, WIMLIB_PROGRESS_MSG_WRITE_STREAMS, @@ -1664,30 +1631,30 @@ write_stream_list(struct list_head *stream_list, if (ret) goto out_destroy_context; - if (write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) { + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress); if (ret) goto out_destroy_context; } - /* Read the list of streams needing to be compressed, using the - * specified callbacks to execute processing of the data. */ + /* Read the list of blobs needing to be compressed, using the specified + * callbacks to execute processing of the data. */ - struct read_stream_list_callbacks cbs = { - .begin_stream = write_stream_begin_read, - .begin_stream_ctx = &ctx, - .consume_chunk = write_stream_process_chunk, + struct read_blob_list_callbacks cbs = { + .begin_blob = write_blob_begin_read, + .begin_blob_ctx = &ctx, + .consume_chunk = write_blob_process_chunk, .consume_chunk_ctx = &ctx, - .end_stream = write_stream_end_read, - .end_stream_ctx = &ctx, + .end_blob = write_blob_end_read, + .end_blob_ctx = &ctx, }; - ret = read_stream_list(stream_list, - offsetof(struct wim_lookup_table_entry, write_streams_list), - &cbs, - STREAM_LIST_ALREADY_SORTED | - VERIFY_STREAM_HASHES | - COMPUTE_MISSING_STREAM_HASHES); + ret = read_blob_list(blob_list, + offsetof(struct blob_descriptor, write_blobs_list), + &cbs, + BLOB_LIST_ALREADY_SORTED | + VERIFY_BLOB_HASHES | + COMPUTE_MISSING_BLOB_HASHES); if (ret) goto out_destroy_context; @@ -1696,31 +1663,31 @@ write_stream_list(struct list_head *stream_list, if (ret) goto out_destroy_context; - if (write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) { + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { struct wim_reshdr reshdr; - struct wim_lookup_table_entry *lte; + struct blob_descriptor *blob; u64 offset_in_res; ret = end_write_resource(&ctx, &reshdr); if (ret) goto out_destroy_context; - DEBUG("Ending packed resource: %lu %lu %lu.", + DEBUG("Ending solid resource: %lu %lu %lu.", reshdr.offset_in_wim, reshdr.size_in_wim, reshdr.uncompressed_size); offset_in_res = 0; - list_for_each_entry(lte, &ctx.pack_streams, write_streams_list) { - lte->out_reshdr.size_in_wim = lte->size; - lte->out_reshdr.flags = filter_resource_flags(lte->flags); - lte->out_reshdr.flags |= WIM_RESHDR_FLAG_PACKED_STREAMS; - lte->out_reshdr.uncompressed_size = 0; - lte->out_reshdr.offset_in_wim = offset_in_res; - lte->out_res_offset_in_wim = reshdr.offset_in_wim; - lte->out_res_size_in_wim = reshdr.size_in_wim; - lte->out_res_uncompressed_size = reshdr.uncompressed_size; - offset_in_res += lte->size; + list_for_each_entry(blob, &ctx.blobs_in_solid_resource, write_blobs_list) { + blob->out_reshdr.size_in_wim = blob->size; + blob->out_reshdr.flags = reshdr_flags_for_blob(blob) | + WIM_RESHDR_FLAG_SOLID; + blob->out_reshdr.uncompressed_size = 0; + blob->out_reshdr.offset_in_wim = offset_in_res; + blob->out_res_offset_in_wim = reshdr.offset_in_wim; + blob->out_res_size_in_wim = reshdr.size_in_wim; + blob->out_res_uncompressed_size = reshdr.uncompressed_size; + offset_in_res += blob->size; } wimlib_assert(offset_in_res == reshdr.uncompressed_size); } @@ -1728,12 +1695,10 @@ write_stream_list(struct list_head *stream_list, out_write_raw_copy_resources: /* Copy any compressed resources for which the raw data can be reused * without decompression. */ - ret = write_raw_copy_resources(&raw_copy_streams, ctx.out_fd, + ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd, &ctx.progress_data); out_destroy_context: - if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE && out_chunk_size > STACK_MAX) - FREE(ctx.chunk_buf); FREE(ctx.chunk_csizes); if (ctx.compressor) ctx.compressor->destroy(ctx.compressor); @@ -1741,24 +1706,13 @@ out_destroy_context: return ret; } -static int -is_stream_packed(struct wim_lookup_table_entry *lte, void *_ignore) -{ - return lte_is_partial(lte); -} - -static bool -wim_has_packed_streams(WIMStruct *wim) -{ - return for_lookup_table_entry(wim->lookup_table, is_stream_packed, NULL); -} static int -wim_write_stream_list(WIMStruct *wim, - struct list_head *stream_list, - int write_flags, - unsigned num_threads, - struct filter_context *filter_ctx) +wim_write_blob_list(WIMStruct *wim, + struct list_head *blob_list, + int write_flags, + unsigned num_threads, + struct filter_context *filter_ctx) { int out_ctype; u32 out_chunk_size; @@ -1766,113 +1720,99 @@ wim_write_stream_list(WIMStruct *wim, write_resource_flags = write_flags_to_resource_flags(write_flags); - /* wimlib v1.7.0: pack streams by default if the WIM version has been - * set to WIM_VERSION_PACKED_STREAMS and at least one stream in the - * WIM's lookup table is located in a packed resource (may be the same - * WIM, or a different one in the case of export). */ - if (wim->hdr.wim_version == WIM_VERSION_PACKED_STREAMS && - wim_has_packed_streams(wim)) + /* wimlib v1.7.0: create a solid WIM file by default if the WIM version + * has been set to WIM_VERSION_SOLID and at least one blob in the WIM's + * blob table is located in a solid resource (may be the same WIM, or a + * different one in the case of export). */ + if (wim->hdr.wim_version == WIM_VERSION_SOLID && + wim_has_solid_resources(wim)) { - write_resource_flags |= WRITE_RESOURCE_FLAG_PACK_STREAMS; + write_resource_flags |= WRITE_RESOURCE_FLAG_SOLID; } - if (write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) { - out_chunk_size = wim->out_pack_chunk_size; - out_ctype = wim->out_pack_compression_type; + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + out_chunk_size = wim->out_solid_chunk_size; + out_ctype = wim->out_solid_compression_type; } else { out_chunk_size = wim->out_chunk_size; out_ctype = wim->out_compression_type; } - return write_stream_list(stream_list, - &wim->out_fd, - write_resource_flags, - out_ctype, - out_chunk_size, - num_threads, - wim->lookup_table, - filter_ctx, - wim->progfunc, - wim->progctx); + return write_blob_list(blob_list, + &wim->out_fd, + write_resource_flags, + out_ctype, + out_chunk_size, + num_threads, + wim->blob_table, + filter_ctx, + wim->progfunc, + wim->progctx); } +/* Write the contents of the specified blob as a WIM resource. */ static int -write_wim_resource(struct wim_lookup_table_entry *lte, +write_wim_resource(struct blob_descriptor *blob, struct filedes *out_fd, int out_ctype, u32 out_chunk_size, int write_resource_flags) { - LIST_HEAD(stream_list); - list_add(<e->write_streams_list, &stream_list); - lte->will_be_in_output_wim = 1; - return write_stream_list(&stream_list, - out_fd, - write_resource_flags & ~WRITE_RESOURCE_FLAG_PACK_STREAMS, - out_ctype, - out_chunk_size, - 1, - NULL, - NULL, - NULL, - NULL); -} - + LIST_HEAD(blob_list); + list_add(&blob->write_blobs_list, &blob_list); + blob->will_be_in_output_wim = 1; + return write_blob_list(&blob_list, + out_fd, + write_resource_flags & ~WRITE_RESOURCE_FLAG_SOLID, + out_ctype, + out_chunk_size, + 1, + NULL, + NULL, + NULL, + NULL); +} + +/* Write the contents of the specified buffer as a WIM resource. */ int -write_wim_resource_from_buffer(const void *buf, size_t buf_size, - int reshdr_flags, struct filedes *out_fd, +write_wim_resource_from_buffer(const void *buf, + size_t buf_size, + bool is_metadata, + struct filedes *out_fd, int out_ctype, u32 out_chunk_size, struct wim_reshdr *out_reshdr, - u8 *hash, + u8 *hash_ret, int write_resource_flags) { int ret; - struct wim_lookup_table_entry *lte; - - /* Set up a temporary lookup table entry to provide to - * write_wim_resource(). */ - - lte = new_lookup_table_entry(); - if (lte == NULL) - return WIMLIB_ERR_NOMEM; - - lte->resource_location = RESOURCE_IN_ATTACHED_BUFFER; - lte->attached_buffer = (void*)buf; - lte->size = buf_size; - lte->flags = reshdr_flags; + struct blob_descriptor blob; - if (write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE) { - sha1_buffer(buf, buf_size, lte->hash); - lte->unhashed = 0; - } else { - lte->unhashed = 1; - } + blob_set_is_located_in_attached_buffer(&blob, (void *)buf, buf_size); + sha1_buffer(buf, buf_size, blob.hash); + blob.unhashed = 0; + blob.is_metadata = is_metadata; - ret = write_wim_resource(lte, out_fd, out_ctype, out_chunk_size, + ret = write_wim_resource(&blob, out_fd, out_ctype, out_chunk_size, write_resource_flags); if (ret) - goto out_free_lte; + return ret; - copy_reshdr(out_reshdr, <e->out_reshdr); + copy_reshdr(out_reshdr, &blob.out_reshdr); - if (hash) - copy_hash(hash, lte->hash); - ret = 0; -out_free_lte: - lte->resource_location = RESOURCE_NONEXISTENT; - free_lookup_table_entry(lte); - return ret; + if (hash_ret) + copy_hash(hash_ret, blob.hash); + return 0; } -struct stream_size_table { +struct blob_size_table { struct hlist_head *array; size_t num_entries; size_t capacity; }; static int -init_stream_size_table(struct stream_size_table *tab, size_t capacity) +init_blob_size_table(struct blob_size_table *tab, size_t capacity) { tab->array = CALLOC(capacity, sizeof(tab->array[0])); if (tab->array == NULL) @@ -1883,110 +1823,107 @@ init_stream_size_table(struct stream_size_table *tab, size_t capacity) } static void -destroy_stream_size_table(struct stream_size_table *tab) +destroy_blob_size_table(struct blob_size_table *tab) { FREE(tab->array); } static int -stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab) +blob_size_table_insert(struct blob_descriptor *blob, void *_tab) { - struct stream_size_table *tab = _tab; + struct blob_size_table *tab = _tab; size_t pos; - struct wim_lookup_table_entry *same_size_lte; + struct blob_descriptor *same_size_blob; struct hlist_node *tmp; - pos = hash_u64(lte->size) % tab->capacity; - lte->unique_size = 1; - hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) { - if (same_size_lte->size == lte->size) { - lte->unique_size = 0; - same_size_lte->unique_size = 0; + pos = hash_u64(blob->size) % tab->capacity; + blob->unique_size = 1; + hlist_for_each_entry(same_size_blob, tmp, &tab->array[pos], hash_list_2) { + if (same_size_blob->size == blob->size) { + blob->unique_size = 0; + same_size_blob->unique_size = 0; break; } } - hlist_add_head(<e->hash_list_2, &tab->array[pos]); + hlist_add_head(&blob->hash_list_2, &tab->array[pos]); tab->num_entries++; return 0; } -struct find_streams_ctx { +struct find_blobs_ctx { WIMStruct *wim; int write_flags; - struct list_head stream_list; - struct stream_size_table stream_size_tab; + struct list_head blob_list; + struct blob_size_table blob_size_tab; }; static void -reference_stream_for_write(struct wim_lookup_table_entry *lte, - struct list_head *stream_list, u32 nref) +reference_blob_for_write(struct blob_descriptor *blob, + struct list_head *blob_list, u32 nref) { - if (!lte->will_be_in_output_wim) { - lte->out_refcnt = 0; - list_add_tail(<e->write_streams_list, stream_list); - lte->will_be_in_output_wim = 1; + if (!blob->will_be_in_output_wim) { + blob->out_refcnt = 0; + list_add_tail(&blob->write_blobs_list, blob_list); + blob->will_be_in_output_wim = 1; } - lte->out_refcnt += nref; + blob->out_refcnt += nref; } static int -fully_reference_stream_for_write(struct wim_lookup_table_entry *lte, - void *_stream_list) +fully_reference_blob_for_write(struct blob_descriptor *blob, void *_blob_list) { - struct list_head *stream_list = _stream_list; - lte->will_be_in_output_wim = 0; - reference_stream_for_write(lte, stream_list, lte->refcnt); + struct list_head *blob_list = _blob_list; + blob->will_be_in_output_wim = 0; + reference_blob_for_write(blob, blob_list, blob->refcnt); return 0; } static int -inode_find_streams_to_reference(const struct wim_inode *inode, - const struct wim_lookup_table *table, - struct list_head *stream_list) +inode_find_blobs_to_reference(const struct wim_inode *inode, + const struct blob_table *table, + struct list_head *blob_list) { - struct wim_lookup_table_entry *lte; - unsigned i; - wimlib_assert(inode->i_nlink > 0); - for (i = 0; i <= inode->i_num_ads; i++) { - lte = inode_stream_lte(inode, i, table); - if (lte) - reference_stream_for_write(lte, stream_list, - inode->i_nlink); - else if (!is_zero_hash(inode_stream_hash(inode, i))) + for (unsigned i = 0; i < inode->i_num_streams; i++) { + struct blob_descriptor *blob; + + blob = stream_blob(&inode->i_streams[i], table); + if (blob) + reference_blob_for_write(blob, blob_list, inode->i_nlink); + else if (!is_zero_hash(stream_hash(&inode->i_streams[i]))) return WIMLIB_ERR_RESOURCE_NOT_FOUND; } return 0; } static int -do_stream_set_not_in_output_wim(struct wim_lookup_table_entry *lte, void *_ignore) +do_blob_set_not_in_output_wim(struct blob_descriptor *blob, void *_ignore) { - lte->will_be_in_output_wim = 0; + blob->will_be_in_output_wim = 0; return 0; } static int -image_find_streams_to_reference(WIMStruct *wim) +image_find_blobs_to_reference(WIMStruct *wim) { struct wim_image_metadata *imd; struct wim_inode *inode; - struct wim_lookup_table_entry *lte; - struct list_head *stream_list; + struct blob_descriptor *blob; + struct list_head *blob_list; int ret; imd = wim_get_current_image_metadata(wim); - image_for_each_unhashed_stream(lte, imd) - lte->will_be_in_output_wim = 0; + image_for_each_unhashed_blob(blob, imd) + blob->will_be_in_output_wim = 0; - stream_list = wim->private; + blob_list = wim->private; image_for_each_inode(inode, imd) { - ret = inode_find_streams_to_reference(inode, - wim->lookup_table, - stream_list); + ret = inode_find_blobs_to_reference(inode, + wim->blob_table, + blob_list); if (ret) return ret; } @@ -1994,40 +1931,40 @@ image_find_streams_to_reference(WIMStruct *wim) } static int -prepare_unfiltered_list_of_streams_in_output_wim(WIMStruct *wim, - int image, - int streams_ok, - struct list_head *stream_list_ret) +prepare_unfiltered_list_of_blobs_in_output_wim(WIMStruct *wim, + int image, + int blobs_ok, + struct list_head *blob_list_ret) { int ret; - INIT_LIST_HEAD(stream_list_ret); + INIT_LIST_HEAD(blob_list_ret); - if (streams_ok && (image == WIMLIB_ALL_IMAGES || - (image == 1 && wim->hdr.image_count == 1))) + if (blobs_ok && (image == WIMLIB_ALL_IMAGES || + (image == 1 && wim->hdr.image_count == 1))) { - /* Fast case: Assume that all streams are being written and - * that the reference counts are correct. */ - struct wim_lookup_table_entry *lte; + /* Fast case: Assume that all blobs are being written and that + * the reference counts are correct. */ + struct blob_descriptor *blob; struct wim_image_metadata *imd; unsigned i; - for_lookup_table_entry(wim->lookup_table, - fully_reference_stream_for_write, - stream_list_ret); + for_blob_in_table(wim->blob_table, + fully_reference_blob_for_write, + blob_list_ret); for (i = 0; i < wim->hdr.image_count; i++) { imd = wim->image_metadata[i]; - image_for_each_unhashed_stream(lte, imd) - fully_reference_stream_for_write(lte, stream_list_ret); + image_for_each_unhashed_blob(blob, imd) + fully_reference_blob_for_write(blob, blob_list_ret); } } else { /* Slow case: Walk through the images being written and - * determine the streams referenced. */ - for_lookup_table_entry(wim->lookup_table, - do_stream_set_not_in_output_wim, NULL); - wim->private = stream_list_ret; - ret = for_image(wim, image, image_find_streams_to_reference); + * determine the blobs referenced. */ + for_blob_in_table(wim->blob_table, + do_blob_set_not_in_output_wim, NULL); + wim->private = blob_list_ret; + ret = for_image(wim, image, image_find_blobs_to_reference); if (ret) return ret; } @@ -2036,59 +1973,57 @@ prepare_unfiltered_list_of_streams_in_output_wim(WIMStruct *wim, } struct insert_other_if_hard_filtered_ctx { - struct stream_size_table *tab; + struct blob_size_table *tab; struct filter_context *filter_ctx; }; static int -insert_other_if_hard_filtered(struct wim_lookup_table_entry *lte, void *_ctx) +insert_other_if_hard_filtered(struct blob_descriptor *blob, void *_ctx) { struct insert_other_if_hard_filtered_ctx *ctx = _ctx; - if (!lte->will_be_in_output_wim && - stream_hard_filtered(lte, ctx->filter_ctx)) - stream_size_table_insert(lte, ctx->tab); + if (!blob->will_be_in_output_wim && + blob_hard_filtered(blob, ctx->filter_ctx)) + blob_size_table_insert(blob, ctx->tab); return 0; } static int -determine_stream_size_uniquity(struct list_head *stream_list, - struct wim_lookup_table *lt, - struct filter_context *filter_ctx) +determine_blob_size_uniquity(struct list_head *blob_list, + struct blob_table *lt, + struct filter_context *filter_ctx) { int ret; - struct stream_size_table tab; - struct wim_lookup_table_entry *lte; + struct blob_size_table tab; + struct blob_descriptor *blob; - ret = init_stream_size_table(&tab, 9001); + ret = init_blob_size_table(&tab, 9001); if (ret) return ret; - if (may_hard_filter_streams(filter_ctx)) { + if (may_hard_filter_blobs(filter_ctx)) { struct insert_other_if_hard_filtered_ctx ctx = { .tab = &tab, .filter_ctx = filter_ctx, }; - for_lookup_table_entry(lt, insert_other_if_hard_filtered, &ctx); + for_blob_in_table(lt, insert_other_if_hard_filtered, &ctx); } - list_for_each_entry(lte, stream_list, write_streams_list) - stream_size_table_insert(lte, &tab); + list_for_each_entry(blob, blob_list, write_blobs_list) + blob_size_table_insert(blob, &tab); - destroy_stream_size_table(&tab); + destroy_blob_size_table(&tab); return 0; } static void -filter_stream_list_for_write(struct list_head *stream_list, - struct filter_context *filter_ctx) +filter_blob_list_for_write(struct list_head *blob_list, + struct filter_context *filter_ctx) { - struct wim_lookup_table_entry *lte, *tmp; + struct blob_descriptor *blob, *tmp; - list_for_each_entry_safe(lte, tmp, - stream_list, write_streams_list) - { - int status = stream_filtered(lte, filter_ctx); + list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) { + int status = blob_filtered(blob, filter_ctx); if (status == 0) { /* Not filtered. */ @@ -2098,19 +2033,19 @@ filter_stream_list_for_write(struct list_head *stream_list, /* Soft filtered. */ } else { /* Hard filtered. */ - lte->will_be_in_output_wim = 0; - list_del(<e->lookup_table_list); + blob->will_be_in_output_wim = 0; + list_del(&blob->blob_table_list); } - list_del(<e->write_streams_list); + list_del(&blob->write_blobs_list); } } } /* - * prepare_stream_list_for_write() - + * prepare_blob_list_for_write() - * - * Prepare the list of streams to write for writing a WIM containing the - * specified image(s) with the specified write flags. + * Prepare the list of blobs to write for writing a WIM containing the specified + * image(s) with the specified write flags. * * @wim * The WIMStruct on whose behalf the write is occurring. @@ -2121,142 +2056,140 @@ filter_stream_list_for_write(struct list_head *stream_list, * @write_flags * WIMLIB_WRITE_FLAG_* flags for the write operation: * - * STREAMS_OK: For writes of all images, assume that all streams in the - * lookup table of @wim and the per-image lists of unhashed streams should - * be taken as-is, and image metadata should not be searched for - * references. This does not exclude filtering with OVERWRITE and - * SKIP_EXTERNAL_WIMS, below. + * STREAMS_OK: For writes of all images, assume that all blobs in the blob + * table of @wim and the per-image lists of unhashed blobs should be taken + * as-is, and image metadata should not be searched for references. This + * does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below. * - * OVERWRITE: Streams already present in @wim shall not be returned in - * @stream_list_ret. + * OVERWRITE: Blobs already present in @wim shall not be returned in + * @blob_list_ret. * - * SKIP_EXTERNAL_WIMS: Streams already present in a WIM file, but not - * @wim, shall be returned in neither @stream_list_ret nor - * @lookup_table_list_ret. + * SKIP_EXTERNAL_WIMS: Blobs already present in a WIM file, but not @wim, + * shall be returned in neither @blob_list_ret nor @blob_table_list_ret. * - * @stream_list_ret - * List of streams, linked by write_streams_list, that need to be written - * will be returned here. + * @blob_list_ret + * List of blobs, linked by write_blobs_list, that need to be written will + * be returned here. * - * Note that this function assumes that unhashed streams will be written; - * it does not take into account that they may become duplicates when - * actually hashed. + * Note that this function assumes that unhashed blobs will be written; it + * does not take into account that they may become duplicates when actually + * hashed. * - * @lookup_table_list_ret - * List of streams, linked by lookup_table_list, that need to be included - * in the WIM's lookup table will be returned here. This will be a - * superset of the streams in @stream_list_ret. + * @blob_table_list_ret + * List of blobs, linked by blob_table_list, that need to be included in + * the WIM's blob table will be returned here. This will be a superset of + * the blobs in @blob_list_ret. * - * This list will be a proper superset of @stream_list_ret if and only if + * This list will be a proper superset of @blob_list_ret if and only if * WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of - * the streams that would otherwise need to be written were already located + * the blobs that would otherwise need to be written were already located * in the WIM file. * - * All streams in this list will have @out_refcnt set to the number of - * references to the stream in the output WIM. If + * All blobs in this list will have @out_refcnt set to the number of + * references to the blob in the output WIM. If * WIMLIB_WRITE_FLAG_STREAMS_OK was specified in @write_flags, @out_refcnt * may be as low as 0. * * @filter_ctx_ret - * A context for queries of stream filter status with stream_filtered() is + * A context for queries of blob filter status with blob_filtered() is * returned in this location. * - * In addition, @will_be_in_output_wim will be set to 1 in all stream entries - * inserted into @lookup_table_list_ret and to 0 in all stream entries in the - * lookup table of @wim not inserted into @lookup_table_list_ret. + * In addition, @will_be_in_output_wim will be set to 1 in all blobs inserted + * into @blob_table_list_ret and to 0 in all blobs in the blob table of @wim not + * inserted into @blob_table_list_ret. * - * Still furthermore, @unique_size will be set to 1 on all stream entries in - * @stream_list_ret that have unique size among all stream entries in - * @stream_list_ret and among all stream entries in the lookup table of @wim - * that are ineligible for being written due to filtering. + * Still furthermore, @unique_size will be set to 1 on all blobs in + * @blob_list_ret that have unique size among all blobs in @blob_list_ret and + * among all blobs in the blob table of @wim that are ineligible for being + * written due to filtering. * * Returns 0 on success; nonzero on read error, memory allocation error, or * otherwise. */ static int -prepare_stream_list_for_write(WIMStruct *wim, int image, - int write_flags, - struct list_head *stream_list_ret, - struct list_head *lookup_table_list_ret, - struct filter_context *filter_ctx_ret) +prepare_blob_list_for_write(WIMStruct *wim, int image, + int write_flags, + struct list_head *blob_list_ret, + struct list_head *blob_table_list_ret, + struct filter_context *filter_ctx_ret) { int ret; - struct wim_lookup_table_entry *lte; + struct blob_descriptor *blob; filter_ctx_ret->write_flags = write_flags; filter_ctx_ret->wim = wim; - ret = prepare_unfiltered_list_of_streams_in_output_wim( + ret = prepare_unfiltered_list_of_blobs_in_output_wim( wim, image, write_flags & WIMLIB_WRITE_FLAG_STREAMS_OK, - stream_list_ret); + blob_list_ret); if (ret) return ret; - INIT_LIST_HEAD(lookup_table_list_ret); - list_for_each_entry(lte, stream_list_ret, write_streams_list) - list_add_tail(<e->lookup_table_list, lookup_table_list_ret); + INIT_LIST_HEAD(blob_table_list_ret); + list_for_each_entry(blob, blob_list_ret, write_blobs_list) + list_add_tail(&blob->blob_table_list, blob_table_list_ret); - ret = determine_stream_size_uniquity(stream_list_ret, wim->lookup_table, - filter_ctx_ret); + ret = determine_blob_size_uniquity(blob_list_ret, wim->blob_table, + filter_ctx_ret); if (ret) return ret; - if (may_filter_streams(filter_ctx_ret)) - filter_stream_list_for_write(stream_list_ret, filter_ctx_ret); + if (may_filter_blobs(filter_ctx_ret)) + filter_blob_list_for_write(blob_list_ret, filter_ctx_ret); return 0; } static int -write_wim_streams(WIMStruct *wim, int image, int write_flags, - unsigned num_threads, - struct list_head *stream_list_override, - struct list_head *lookup_table_list_ret) +write_file_blobs(WIMStruct *wim, int image, int write_flags, + unsigned num_threads, + struct list_head *blob_list_override, + struct list_head *blob_table_list_ret) { int ret; - struct list_head _stream_list; - struct list_head *stream_list; - struct wim_lookup_table_entry *lte; + struct list_head _blob_list; + struct list_head *blob_list; + struct blob_descriptor *blob; struct filter_context _filter_ctx; struct filter_context *filter_ctx; - if (stream_list_override == NULL) { - /* Normal case: prepare stream list from image(s) being written. + if (blob_list_override == NULL) { + /* Normal case: prepare blob list from image(s) being written. */ - stream_list = &_stream_list; + blob_list = &_blob_list; filter_ctx = &_filter_ctx; - ret = prepare_stream_list_for_write(wim, image, write_flags, - stream_list, - lookup_table_list_ret, - filter_ctx); + ret = prepare_blob_list_for_write(wim, image, write_flags, + blob_list, + blob_table_list_ret, + filter_ctx); if (ret) return ret; } else { /* Currently only as a result of wimlib_split() being called: - * use stream list already explicitly provided. Use existing + * use blob list already explicitly provided. Use existing * reference counts. */ - stream_list = stream_list_override; + blob_list = blob_list_override; filter_ctx = NULL; - INIT_LIST_HEAD(lookup_table_list_ret); - list_for_each_entry(lte, stream_list, write_streams_list) { - lte->out_refcnt = lte->refcnt; - lte->will_be_in_output_wim = 1; - lte->unique_size = 0; - list_add_tail(<e->lookup_table_list, lookup_table_list_ret); + INIT_LIST_HEAD(blob_table_list_ret); + list_for_each_entry(blob, blob_list, write_blobs_list) { + blob->out_refcnt = blob->refcnt; + blob->will_be_in_output_wim = 1; + blob->unique_size = 0; + list_add_tail(&blob->blob_table_list, blob_table_list_ret); } } - return wim_write_stream_list(wim, - stream_list, - write_flags, - num_threads, - filter_ctx); + return wim_write_blob_list(wim, + blob_list, + write_flags, + num_threads, + filter_ctx); } static int -write_wim_metadata_resources(WIMStruct *wim, int image, int write_flags) +write_metadata_resources(WIMStruct *wim, int image, int write_flags) { int ret; int start_image; @@ -2270,7 +2203,7 @@ write_wim_metadata_resources(WIMStruct *wim, int image, int write_flags) write_resource_flags = write_flags_to_resource_flags(write_flags); - write_resource_flags &= ~WRITE_RESOURCE_FLAG_PACK_STREAMS; + write_resource_flags &= ~WRITE_RESOURCE_FLAG_SOLID; DEBUG("Writing metadata resources (offset=%"PRIu64")", wim->out_fd.offset); @@ -2304,12 +2237,12 @@ write_wim_metadata_resources(WIMStruct *wim, int image, int write_flags) } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) { DEBUG("Image %u was not modified; re-using existing " "metadata resource.", i); - stream_set_out_reshdr_for_reuse(imd->metadata_lte); + blob_set_out_reshdr_for_reuse(imd->metadata_blob); ret = 0; } else { DEBUG("Image %u was not modified; copying existing " "metadata resource.", i); - ret = write_wim_resource(imd->metadata_lte, + ret = write_wim_resource(imd->metadata_blob, &wim->out_fd, wim->out_compression_type, wim->out_chunk_size, @@ -2355,52 +2288,51 @@ close_wim_writable(WIMStruct *wim, int write_flags) } static int -cmp_streams_by_out_rspec(const void *p1, const void *p2) +cmp_blobs_by_out_rdesc(const void *p1, const void *p2) { - const struct wim_lookup_table_entry *lte1, *lte2; + const struct blob_descriptor *blob1, *blob2; - lte1 = *(const struct wim_lookup_table_entry**)p1; - lte2 = *(const struct wim_lookup_table_entry**)p2; + blob1 = *(const struct blob_descriptor**)p1; + blob2 = *(const struct blob_descriptor**)p2; - if (lte1->out_reshdr.flags & WIM_RESHDR_FLAG_PACKED_STREAMS) { - if (lte2->out_reshdr.flags & WIM_RESHDR_FLAG_PACKED_STREAMS) { - if (lte1->out_res_offset_in_wim != lte2->out_res_offset_in_wim) - return cmp_u64(lte1->out_res_offset_in_wim, - lte2->out_res_offset_in_wim); + if (blob1->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) { + if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) { + if (blob1->out_res_offset_in_wim != blob2->out_res_offset_in_wim) + return cmp_u64(blob1->out_res_offset_in_wim, + blob2->out_res_offset_in_wim); } else { return 1; } } else { - if (lte2->out_reshdr.flags & WIM_RESHDR_FLAG_PACKED_STREAMS) + if (blob2->out_reshdr.flags & WIM_RESHDR_FLAG_SOLID) return -1; } - return cmp_u64(lte1->out_reshdr.offset_in_wim, - lte2->out_reshdr.offset_in_wim); + return cmp_u64(blob1->out_reshdr.offset_in_wim, + blob2->out_reshdr.offset_in_wim); } static int -write_wim_lookup_table(WIMStruct *wim, int image, int write_flags, - struct wim_reshdr *out_reshdr, - struct list_head *lookup_table_list) +write_blob_table(WIMStruct *wim, int image, int write_flags, + struct wim_reshdr *out_reshdr, + struct list_head *blob_table_list) { int ret; - /* Set output resource metadata for streams already present in WIM. */ + /* Set output resource metadata for blobs already present in WIM. */ if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) { - struct wim_lookup_table_entry *lte; - list_for_each_entry(lte, lookup_table_list, lookup_table_list) - { - if (lte->resource_location == RESOURCE_IN_WIM && - lte->rspec->wim == wim) + struct blob_descriptor *blob; + list_for_each_entry(blob, blob_table_list, blob_table_list) { + if (blob->blob_location == BLOB_IN_WIM && + blob->rdesc->wim == wim) { - stream_set_out_reshdr_for_reuse(lte); + blob_set_out_reshdr_for_reuse(blob); } } } - ret = sort_stream_list(lookup_table_list, - offsetof(struct wim_lookup_table_entry, lookup_table_list), - cmp_streams_by_out_rspec); + ret = sort_blob_list(blob_table_list, + offsetof(struct blob_descriptor, blob_table_list), + cmp_blobs_by_out_rdesc); if (ret) return ret; @@ -2417,30 +2349,30 @@ write_wim_lookup_table(WIMStruct *wim, int image, int write_flags, end_image = image; } - /* Push metadata resource lookup table entries onto the front of - * the list in reverse order, so that they're written in order. + /* Push metadata blob table entries onto the front of the list + * in reverse order, so that they're written in order. */ for (int i = end_image; i >= start_image; i--) { - struct wim_lookup_table_entry *metadata_lte; + struct blob_descriptor *metadata_blob; - metadata_lte = wim->image_metadata[i - 1]->metadata_lte; - wimlib_assert(metadata_lte->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA); - metadata_lte->out_refcnt = 1; - list_add(&metadata_lte->lookup_table_list, lookup_table_list); + metadata_blob = wim->image_metadata[i - 1]->metadata_blob; + wimlib_assert(metadata_blob->out_reshdr.flags & WIM_RESHDR_FLAG_METADATA); + metadata_blob->out_refcnt = 1; + list_add(&metadata_blob->blob_table_list, blob_table_list); } } - return write_wim_lookup_table_from_stream_list(lookup_table_list, - &wim->out_fd, - wim->hdr.part_number, - out_reshdr, - write_flags_to_resource_flags(write_flags)); + return write_blob_table_from_blob_list(blob_table_list, + &wim->out_fd, + wim->hdr.part_number, + out_reshdr, + write_flags_to_resource_flags(write_flags)); } /* * finish_write(): * - * Finish writing a WIM file: write the lookup table, xml data, and integrity + * Finish writing a WIM file: write the blob table, xml data, and integrity * table, then overwrite the WIM header. By default, closes the WIM file * descriptor (@wim->out_fd) if successful. * @@ -2454,19 +2386,18 @@ write_wim_lookup_table(WIMStruct *wim, int image, int write_flags, * * (public) WIMLIB_WRITE_FLAG_PIPABLE: * Writing a pipable WIM, possibly to a pipe; include pipable WIM - * stream headers before the lookup table and XML data, and also - * write the WIM header at the end instead of seeking to the - * beginning. Can't be combined with - * WIMLIB_WRITE_FLAG_CHECK_INTEGRITY. + * blob headers before the blob table and XML data, and also write + * the WIM header at the end instead of seeking to the beginning. + * Can't be combined with WIMLIB_WRITE_FLAG_CHECK_INTEGRITY. * - * (private) WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE: - * Don't write the lookup table. + * (private) WIMLIB_WRITE_FLAG_NO_BLOB_TABLE: + * Don't write the blob table. * * (private) WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML: * After writing the XML data but before writing the integrity - * table, write a temporary WIM header and flush the stream so that - * the WIM is less likely to become corrupted upon abrupt program - * termination. + * table, write a temporary WIM header and flush the file + * descriptor so that the WIM is less likely to become corrupted + * upon abrupt program termination. * (private) WIMLIB_WRITE_FLAG_HEADER_AT_END: * Instead of overwriting the WIM header at the beginning of the * file, simply append it to the end of the file. (Used when @@ -2484,13 +2415,13 @@ write_wim_lookup_table(WIMStruct *wim, int image, int write_flags, */ static int finish_write(WIMStruct *wim, int image, int write_flags, - struct list_head *lookup_table_list) + struct list_head *blob_table_list) { int ret; off_t hdr_offset; int write_resource_flags; - off_t old_lookup_table_end = 0; - off_t new_lookup_table_end; + off_t old_blob_table_end = 0; + off_t new_blob_table_end; u64 xml_totalbytes; struct integrity_table *old_integrity_table = NULL; @@ -2508,7 +2439,7 @@ finish_write(WIMStruct *wim, int image, int write_flags, } else { copy_reshdr(&wim->hdr.boot_metadata_reshdr, &wim->image_metadata[ - wim->hdr.boot_idx - 1]->metadata_lte->out_reshdr); + wim->hdr.boot_idx - 1]->metadata_blob->out_reshdr); } /* If overwriting the WIM file containing an integrity table in-place, @@ -2522,21 +2453,21 @@ finish_write(WIMStruct *wim, int image, int write_flags, WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) && wim_has_integrity_table(wim)) { - old_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim + - wim->hdr.lookup_table_reshdr.size_in_wim; + old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim + + wim->hdr.blob_table_reshdr.size_in_wim; (void)read_integrity_table(wim, - old_lookup_table_end - WIM_HEADER_DISK_SIZE, + old_blob_table_end - WIM_HEADER_DISK_SIZE, &old_integrity_table); /* If we couldn't read the old integrity table, we can still * re-calculate the full integrity table ourselves. Hence the * ignoring of the return value. */ } - /* Write lookup table. */ - if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { - ret = write_wim_lookup_table(wim, image, write_flags, - &wim->hdr.lookup_table_reshdr, - lookup_table_list); + /* Write blob table. */ + if (!(write_flags & WIMLIB_WRITE_FLAG_NO_BLOB_TABLE)) { + ret = write_blob_table(wim, image, write_flags, + &wim->hdr.blob_table_reshdr, + blob_table_list); if (ret) { free_integrity_table(old_integrity_table); return ret; @@ -2570,12 +2501,12 @@ finish_write(WIMStruct *wim, int image, int write_flags, } } - new_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim + - wim->hdr.lookup_table_reshdr.size_in_wim; + new_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim + + wim->hdr.blob_table_reshdr.size_in_wim; ret = write_integrity_table(wim, - new_lookup_table_end, - old_lookup_table_end, + new_blob_table_end, + old_blob_table_end, old_integrity_table); free_integrity_table(old_integrity_table); if (ret) @@ -2664,9 +2595,9 @@ unlock_wim_for_append(WIMStruct *wim) * stops other software from trying to read the file as a normal WIM. * * - The header at the beginning of the file does not contain all the normal - * information; in particular it will have all 0's for the lookup table and - * XML data resource entries. This is because this information cannot be - * determined until the lookup table and XML data have been written. + * information; in particular it will have all 0's for the blob table and XML + * data resource entries. This is because this information cannot be + * determined until the blob table and XML data have been written. * Consequently, wimlib will write the full header at the very end of the * file. The header at the end, however, is only used when reading the WIM * from a seekable file (not a pipe). @@ -2676,14 +2607,14 @@ unlock_wim_for_append(WIMStruct *wim) * reading the WIM from a pipe. This copy of the XML data is ignored if the * WIM is read from a seekable file (not a pipe). * - * - The format of resources, or streams, has been modified to allow them to be - * used before the "lookup table" has been read. Each stream is prefixed with - * a `struct pwm_stream_hdr' that is basically an abbreviated form of `struct - * wim_lookup_table_entry_disk' that only contains the SHA1 message digest, - * uncompressed stream size, and flags that indicate whether the stream is - * compressed. The data of uncompressed streams then follows literally, while - * the data of compressed streams follows in a modified format. Compressed - * streams do not begin with a chunk table, since the chunk table cannot be + * - The format of resources, or blobs, has been modified to allow them to be + * used before the "blob table" has been read. Each blob is prefixed with a + * `struct pwm_blob_hdr' that is basically an abbreviated form of `struct + * blob_descriptor_disk' that only contains the SHA-1 message digest, + * uncompressed blob size, and flags that indicate whether the blob is + * compressed. The data of uncompressed blobs then follows literally, while + * the data of compressed blobs follows in a modified format. Compressed + * blobs do not begin with a chunk table, since the chunk table cannot be * written until all chunks have been compressed. Instead, each compressed * chunk is prefixed by a `struct pwm_chunk_hdr' that gives its size. * Furthermore, the chunk table is written at the end of the resource instead @@ -2691,29 +2622,29 @@ unlock_wim_for_append(WIMStruct *wim) * `struct pwm_chunk_hdr's were not present; also, the chunk table is only * used if the WIM is being read from a seekable file (not a pipe). * - * - Metadata resources always come before other file resources (streams). - * (This does not by itself constitute an incompatibility with normal WIMs, - * since this is valid in normal WIMs.) + * - Metadata blobs always come before non-metadata blobs. (This does not by + * itself constitute an incompatibility with normal WIMs, since this is valid + * in normal WIMs.) * - * - At least up to the end of the file resources, all components must be packed - * as tightly as possible; there cannot be any "holes" in the WIM. (This does + * - At least up to the end of the blobs, all components must be packed as + * tightly as possible; there cannot be any "holes" in the WIM. (This does * not by itself consititute an incompatibility with normal WIMs, since this * is valid in normal WIMs.) * - * Note: the lookup table, XML data, and header at the end are not used when + * Note: the blob table, XML data, and header at the end are not used when * applying from a pipe. They exist to support functionality such as image * application and export when the WIM is *not* read from a pipe. * * Layout of pipable WIM: * * ---------+----------+--------------------+----------------+--------------+-----------+--------+ - * | Header | XML data | Metadata resources | File resources | Lookup table | XML data | Header | + * | Header | XML data | Metadata resources | File resources | Blob table | XML data | Header | * ---------+----------+--------------------+----------------+--------------+-----------+--------+ * * Layout of normal WIM: * * +--------+-----------------------------+-------------------------+ - * | Header | File and metadata resources | Lookup table | XML data | + * | Header | File and metadata resources | Blob table | XML data | * +--------+-----------------------------+-------------------------+ * * An optional integrity table can follow the final XML data in both normal and @@ -2729,8 +2660,8 @@ unlock_wim_for_append(WIMStruct *wim) static int write_pipable_wim(WIMStruct *wim, int image, int write_flags, unsigned num_threads, - struct list_head *stream_list_override, - struct list_head *lookup_table_list_ret) + struct list_head *blob_list_override, + struct list_head *blob_table_list_ret) { int ret; struct wim_reshdr xml_reshdr; @@ -2743,36 +2674,36 @@ write_pipable_wim(WIMStruct *wim, int image, int write_flags, * been written. */ /* For efficiency, when wimlib adds an image to the WIM with - * wimlib_add_image(), the SHA1 message digests of files is not + * wimlib_add_image(), the SHA-1 message digests of files is not * calculated; instead, they are calculated while the files are being * written. However, this does not work when writing a pipable WIM, - * since when writing a stream to a pipable WIM, its SHA1 message digest - * needs to be known before the stream data is written. Therefore, - * before getting much farther, we need to pre-calculate the SHA1 - * message digests of all streams that will be written. */ - ret = wim_checksum_unhashed_streams(wim); + * since when writing a blob to a pipable WIM, its SHA-1 message digest + * needs to be known before the blob data is written. Therefore, before + * getting much farther, we need to pre-calculate the SHA-1 message + * digests of all blobs that will be written. */ + ret = wim_checksum_unhashed_blobs(wim); if (ret) return ret; /* Write extra copy of the XML data. */ ret = write_wim_xml_data(wim, image, WIM_TOTALBYTES_OMIT, - &xml_reshdr, - WRITE_RESOURCE_FLAG_PIPABLE); + &xml_reshdr, WRITE_RESOURCE_FLAG_PIPABLE); if (ret) return ret; /* Write metadata resources for the image(s) being included in the * output WIM. */ - ret = write_wim_metadata_resources(wim, image, write_flags); + ret = write_metadata_resources(wim, image, write_flags); if (ret) return ret; - /* Write streams needed for the image(s) being included in the output - * WIM, or streams needed for the split WIM part. */ - return write_wim_streams(wim, image, write_flags, num_threads, - stream_list_override, lookup_table_list_ret); + /* Write blobs needed for the image(s) being included in the output WIM, + * or blobs needed for the split WIM part. */ + return write_file_blobs(wim, image, write_flags, + num_threads, blob_list_override, + blob_table_list_ret); - /* The lookup table, XML data, and header at end are handled by + /* The blob table, XML data, and header at end are handled by * finish_write(). */ } @@ -2786,12 +2717,12 @@ write_wim_part(WIMStruct *wim, unsigned num_threads, unsigned part_number, unsigned total_parts, - struct list_head *stream_list_override, + struct list_head *blob_list_override, const u8 *guid) { int ret; struct wim_header hdr_save; - struct list_head lookup_table_list; + struct list_head blob_table_list; if (total_parts == 1) DEBUG("Writing standalone WIM."); @@ -2843,8 +2774,8 @@ write_wim_part(WIMStruct *wim, if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID) DEBUG("\tRETAIN_GUID"); - if (write_flags & WIMLIB_WRITE_FLAG_PACK_STREAMS) - DEBUG("\tPACK_STREAMS"); + if (write_flags & WIMLIB_WRITE_FLAG_SOLID) + DEBUG("\tSOLID"); if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) DEBUG("\tFILE_DESCRIPTOR"); @@ -2860,7 +2791,7 @@ write_wim_part(WIMStruct *wim, else DEBUG("Number of threads: %u", num_threads); DEBUG("Progress function: %s", (wim->progfunc ? "yes" : "no")); - DEBUG("Stream list: %s", (stream_list_override ? "specified" : "autodetect")); + DEBUG("Blob list: %s", (blob_list_override ? "specified" : "autodetect")); DEBUG("GUID: %s", (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID) ? "retain" : guid ? "explicit" : "generate new"); @@ -2898,7 +2829,7 @@ write_wim_part(WIMStruct *wim, /* Save previous header, then start initializing the new one. */ memcpy(&hdr_save, &wim->hdr, sizeof(struct wim_header)); - /* Set default integrity, pipable, and packed stream flags. */ + /* Set default integrity, pipable, and solid flags. */ if (!(write_flags & (WIMLIB_WRITE_FLAG_PIPABLE | WIMLIB_WRITE_FLAG_NOT_PIPABLE))) if (wim_is_pipable(wim)) { @@ -2914,11 +2845,11 @@ write_wim_part(WIMStruct *wim, } if ((write_flags & (WIMLIB_WRITE_FLAG_PIPABLE | - WIMLIB_WRITE_FLAG_PACK_STREAMS)) + WIMLIB_WRITE_FLAG_SOLID)) == (WIMLIB_WRITE_FLAG_PIPABLE | - WIMLIB_WRITE_FLAG_PACK_STREAMS)) + WIMLIB_WRITE_FLAG_SOLID)) { - ERROR("Cannot specify both PIPABLE and PACK_STREAMS!"); + ERROR("Cannot specify both PIPABLE and SOLID!"); return WIMLIB_ERR_INVALID_PARAM; } @@ -2929,9 +2860,9 @@ write_wim_part(WIMStruct *wim, wim->hdr.magic = WIM_MAGIC; /* Set appropriate version number. */ - if ((write_flags & WIMLIB_WRITE_FLAG_PACK_STREAMS) || + if ((write_flags & WIMLIB_WRITE_FLAG_SOLID) || wim->out_compression_type == WIMLIB_COMPRESSION_TYPE_LZMS) - wim->hdr.wim_version = WIM_VERSION_PACKED_STREAMS; + wim->hdr.wim_version = WIM_VERSION_SOLID; else wim->hdr.wim_version = WIM_VERSION_DEFAULT; @@ -2968,7 +2899,7 @@ write_wim_part(WIMStruct *wim, } /* Clear references to resources that have not been written yet. */ - zero_reshdr(&wim->hdr.lookup_table_reshdr); + zero_reshdr(&wim->hdr.blob_table_reshdr); zero_reshdr(&wim->hdr.xml_data_reshdr); zero_reshdr(&wim->hdr.boot_metadata_reshdr); zero_reshdr(&wim->hdr.integrity_table_reshdr); @@ -3023,31 +2954,32 @@ write_wim_part(WIMStruct *wim, if (ret) goto out_restore_hdr; - /* Write metadata resources and streams. */ + /* Write file blobs and metadata resources. */ if (!(write_flags & WIMLIB_WRITE_FLAG_PIPABLE)) { /* Default case: create a normal (non-pipable) WIM. */ - ret = write_wim_streams(wim, image, write_flags, num_threads, - stream_list_override, - &lookup_table_list); + ret = write_file_blobs(wim, image, write_flags, + num_threads, + blob_list_override, + &blob_table_list); if (ret) goto out_restore_hdr; - ret = write_wim_metadata_resources(wim, image, write_flags); + ret = write_metadata_resources(wim, image, write_flags); if (ret) goto out_restore_hdr; } else { /* Non-default case: create pipable WIM. */ ret = write_pipable_wim(wim, image, write_flags, num_threads, - stream_list_override, - &lookup_table_list); + blob_list_override, + &blob_table_list); if (ret) goto out_restore_hdr; write_flags |= WIMLIB_WRITE_FLAG_HEADER_AT_END; } - /* Write lookup table, XML data, and (optional) integrity table. */ - ret = finish_write(wim, image, write_flags, &lookup_table_list); + /* Write blob table, XML data, and (optional) integrity table. */ + ret = finish_write(wim, image, write_flags, &blob_table_list); out_restore_hdr: memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header)); (void)close_wim_writable(wim, write_flags); @@ -3104,13 +3036,14 @@ any_images_modified(WIMStruct *wim) } static int -check_resource_offset(struct wim_lookup_table_entry *lte, void *_wim) +check_resource_offset(struct blob_descriptor *blob, void *_wim) { const WIMStruct *wim = _wim; off_t end_offset = *(const off_t*)wim->private; - if (lte->resource_location == RESOURCE_IN_WIM && lte->rspec->wim == wim && - lte->rspec->offset_in_wim + lte->rspec->size_in_wim > end_offset) + if (blob->blob_location == BLOB_IN_WIM && + blob->rdesc->wim == wim && + blob->rdesc->offset_in_wim + blob->rdesc->size_in_wim > end_offset) return WIMLIB_ERR_RESOURCE_ORDER; return 0; } @@ -3125,12 +3058,12 @@ check_resource_offsets(WIMStruct *wim, off_t end_offset) unsigned i; wim->private = &end_offset; - ret = for_lookup_table_entry(wim->lookup_table, check_resource_offset, wim); + ret = for_blob_in_table(wim->blob_table, check_resource_offset, wim); if (ret) return ret; for (i = 0; i < wim->hdr.image_count; i++) { - ret = check_resource_offset(wim->image_metadata[i]->metadata_lte, wim); + ret = check_resource_offset(wim->image_metadata[i]->metadata_blob, wim); if (ret) return ret; } @@ -3138,17 +3071,17 @@ check_resource_offsets(WIMStruct *wim, off_t end_offset) } /* - * Overwrite a WIM, possibly appending streams to it. + * Overwrite a WIM, possibly appending new resources to it. * * A WIM looks like (or is supposed to look like) the following: * * Header (212 bytes) - * Streams and metadata resources (variable size) - * Lookup table (variable size) + * Resources for metadata and files (variable size) + * Blob table (variable size) * XML data (variable size) * Integrity table (optional) (variable size) * - * If we are not adding any streams or metadata resources, the lookup table is + * If we are not adding any new files or metadata, then the blob table is * unchanged--- so we only need to overwrite the XML data, integrity table, and * header. This operation is potentially unsafe if the program is abruptly * terminated while the XML data or integrity table are being overwritten, but @@ -3159,50 +3092,50 @@ check_resource_offsets(WIMStruct *wim, off_t end_offset) * the program is terminated while the integrity table is being calculated (but * no guarantees, due to write re-ordering...). * - * If we are adding new streams or images (metadata resources), the lookup table - * needs to be changed, and those streams need to be written. In this case, we - * try to perform a safe update of the WIM file by writing the streams *after* - * the end of the previous WIM, then writing the new lookup table, XML data, and - * (optionally) integrity table following the new streams. This will produce a - * layout like the following: + * If we are adding new blobs, including new file data as well as any metadata + * for any new images, then the blob table needs to be changed, and those blobs + * need to be written. In this case, we try to perform a safe update of the WIM + * file by writing the blobs *after* the end of the previous WIM, then writing + * the new blob table, XML data, and (optionally) integrity table following the + * new blobs. This will produce a layout like the following: * * Header (212 bytes) - * (OLD) Streams and metadata resources (variable size) - * (OLD) Lookup table (variable size) + * (OLD) Resources for metadata and files (variable size) + * (OLD) Blob table (variable size) * (OLD) XML data (variable size) * (OLD) Integrity table (optional) (variable size) - * (NEW) Streams and metadata resources (variable size) - * (NEW) Lookup table (variable size) + * (NEW) Resources for metadata and files (variable size) + * (NEW) Blob table (variable size) * (NEW) XML data (variable size) * (NEW) Integrity table (optional) (variable size) * * At all points, the WIM is valid as nothing points to the new data yet. Then, - * the header is overwritten to point to the new lookup table, XML data, and + * the header is overwritten to point to the new blob table, XML data, and * integrity table, to produce the following layout: * * Header (212 bytes) - * Streams and metadata resources (variable size) + * Resources for metadata and files (variable size) * Nothing (variable size) - * More Streams and metadata resources (variable size) - * Lookup table (variable size) + * Resources for metadata and files (variable size) + * Blob table (variable size) * XML data (variable size) * Integrity table (optional) (variable size) * * This method allows an image to be appended to a large WIM very quickly, and - * is crash-safe except in the case of write re-ordering, but the - * disadvantage is that a small hole is left in the WIM where the old lookup - * table, xml data, and integrity table were. (These usually only take up a - * small amount of space compared to the streams, however.) + * is crash-safe except in the case of write re-ordering, but the disadvantage + * is that a small hole is left in the WIM where the old blob table, xml data, + * and integrity table were. (These usually only take up a small amount of + * space compared to the blobs, however.) */ static int overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) { int ret; off_t old_wim_end; - u64 old_lookup_table_end, old_xml_begin, old_xml_end; + u64 old_blob_table_end, old_xml_begin, old_xml_end; struct wim_header hdr_save; - struct list_head stream_list; - struct list_head lookup_table_list; + struct list_head blob_list; + struct list_head blob_table_list; struct filter_context filter_ctx; DEBUG("Overwriting `%"TS"' in-place", wim->filename); @@ -3216,22 +3149,21 @@ overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) if (wim_has_integrity_table(wim)) write_flags |= WIMLIB_WRITE_FLAG_CHECK_INTEGRITY; - /* Set WIM version if adding packed streams. */ - if (write_flags & WIMLIB_WRITE_FLAG_PACK_STREAMS) - wim->hdr.wim_version = WIM_VERSION_PACKED_STREAMS; + /* Set WIM version if writing solid resources. */ + if (write_flags & WIMLIB_WRITE_FLAG_SOLID) + wim->hdr.wim_version = WIM_VERSION_SOLID; /* Set additional flags for overwrite. */ write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE | WIMLIB_WRITE_FLAG_STREAMS_OK; - /* Make sure that the integrity table (if present) is after the XML - * data, and that there are no stream resources, metadata resources, or - * lookup tables after the XML data. Otherwise, these data would be - * overwritten. */ + /* Make sure there is no data after the XML data, except possibily an + * integrity table. If this were the case, then this data would be + * overwritten. */ old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim; old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim; - old_lookup_table_end = wim->hdr.lookup_table_reshdr.offset_in_wim + - wim->hdr.lookup_table_reshdr.size_in_wim; + old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim + + wim->hdr.blob_table_reshdr.size_in_wim; if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0 && wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) { WARNING("Didn't expect the integrity table to be before the XML data"); @@ -3239,8 +3171,8 @@ overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) goto out_restore_memory_hdr; } - if (old_lookup_table_end > old_xml_begin) { - WARNING("Didn't expect the lookup table to be after the XML data"); + if (old_blob_table_end > old_xml_begin) { + WARNING("Didn't expect the blob table to be after the XML data"); ret = WIMLIB_ERR_RESOURCE_ORDER; goto out_restore_memory_hdr; } @@ -3251,22 +3183,22 @@ overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) * overwrite these resources). */ if (!wim->image_deletion_occurred && !any_images_modified(wim)) { /* If no images have been modified and no images have been - * deleted, a new lookup table does not need to be written. We + * deleted, a new blob table does not need to be written. We * shall write the new XML data and optional integrity table - * immediately after the lookup table. Note that this may + * immediately after the blob table. Note that this may * overwrite an existing integrity table. */ - DEBUG("Skipping writing lookup table " + DEBUG("Skipping writing blob table " "(no images modified or deleted)"); - old_wim_end = old_lookup_table_end; - write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE | + old_wim_end = old_blob_table_end; + write_flags |= WIMLIB_WRITE_FLAG_NO_BLOB_TABLE | WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML; } else if (wim->hdr.integrity_table_reshdr.offset_in_wim != 0) { - /* Old WIM has an integrity table; begin writing new streams - * after it. */ + /* Old WIM has an integrity table; begin writing new blobs after + * it. */ old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim + wim->hdr.integrity_table_reshdr.size_in_wim; } else { - /* No existing integrity table; begin writing new streams after + /* No existing integrity table; begin writing new blobs after * the old XML data. */ old_wim_end = old_xml_end; } @@ -3275,9 +3207,9 @@ overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) if (ret) goto out_restore_memory_hdr; - ret = prepare_stream_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags, - &stream_list, &lookup_table_list, - &filter_ctx); + ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags, + &blob_list, &blob_table_list, + &filter_ctx); if (ret) goto out_restore_memory_hdr; @@ -3303,20 +3235,17 @@ overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) goto out_restore_physical_hdr; } - ret = wim_write_stream_list(wim, - &stream_list, - write_flags, - num_threads, - &filter_ctx); + ret = wim_write_blob_list(wim, &blob_list, write_flags, + num_threads, &filter_ctx); if (ret) goto out_truncate; - ret = write_wim_metadata_resources(wim, WIMLIB_ALL_IMAGES, write_flags); + ret = write_metadata_resources(wim, WIMLIB_ALL_IMAGES, write_flags); if (ret) goto out_truncate; ret = finish_write(wim, WIMLIB_ALL_IMAGES, write_flags, - &lookup_table_list); + &blob_table_list); if (ret) goto out_truncate; @@ -3324,7 +3253,7 @@ overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) return 0; out_truncate: - if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { + if (!(write_flags & WIMLIB_WRITE_FLAG_NO_BLOB_TABLE)) { WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)", wim->filename, old_wim_end); /* Return value of ftruncate() is ignored because this is