#if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
/* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
- * overwrite the LIST_HEAD macro. */
+ * override the LIST_HEAD macro. */
# include <sys/file.h>
#endif
write_flags = ctx->write_flags;
wim = ctx->wim;
- if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE &&
+ if (write_flags & WIMLIB_WRITE_FLAG_APPEND &&
blob->blob_location == BLOB_IN_WIM &&
blob->rdesc->wim == wim)
return 1;
return blob_filtered(blob, ctx) < 0;
}
-static inline int
+static inline bool
may_soft_filter_blobs(const struct filter_context *ctx)
{
- if (ctx == NULL)
- return 0;
- return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE;
+ return ctx && (ctx->write_flags & WIMLIB_WRITE_FLAG_APPEND);
}
-static inline int
+static inline bool
may_hard_filter_blobs(const struct filter_context *ctx)
{
- if (ctx == NULL)
- return 0;
- return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS;
+ return ctx && (ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS);
}
-static inline int
+static inline bool
may_filter_blobs(const struct filter_context *ctx)
{
return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx));
}
-/* Return true if the specified resource is compressed and the compressed data
- * can be reused with the specified output parameters. */
+/* Return true if the specified blob is located in a WIM resource which can be
+ * reused in the output WIM file, without being recompressed. */
static bool
-can_raw_copy(const struct blob_descriptor *blob,
- int write_resource_flags, int out_ctype, u32 out_chunk_size)
+can_raw_copy(const struct blob_descriptor *blob, int write_resource_flags,
+ int out_ctype, u32 out_chunk_size)
{
const struct wim_resource_descriptor *rdesc;
+ /* Recompress everything if requested. */
if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS)
return false;
- if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
- return false;
-
+ /* A blob not located in a WIM resource cannot be reused. */
if (blob->blob_location != BLOB_IN_WIM)
return false;
rdesc = blob->rdesc;
- if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
+ /* Only reuse compressed resources. */
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
+ !(rdesc->flags & (WIM_RESHDR_FLAG_COMPRESSED |
+ WIM_RESHDR_FLAG_SOLID)))
+ return false;
+
+ /* When writing a pipable WIM, we can only reuse pipable resources; and
+ * when writing a non-pipable WIM, we can only reuse non-pipable
+ * resources. */
+ if (rdesc->is_pipable !=
+ !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
return false;
+ /* When writing a solid WIM, we can only reuse solid resources; and when
+ * writing a non-solid WIM, we can only reuse non-solid resources. */
+ if (!!(rdesc->flags & WIM_RESHDR_FLAG_SOLID) !=
+ !!(write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
+ return false;
+
+ /* Note: it is theoretically possible to copy chunks of compressed data
+ * between non-solid, solid, and pipable resources. However, we don't
+ * currently implement this optimization because it would be complex and
+ * would usually go unused. */
+
if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
- /* Normal compressed resource: Must use same compression type
- * and chunk size. */
+ /* To re-use a non-solid resource, it must use the desired
+ * compression type and chunk size. */
return (rdesc->compression_type == out_ctype &&
rdesc->chunk_size == out_chunk_size);
- }
-
- if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) &&
- (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
- {
+ } else {
/* Solid resource: Such resources may contain multiple blobs,
* and in general only a subset of them need to be written. As
* a heuristic, re-use the raw data if more than two-thirds the
* check if they are compatible with @out_ctype and
* @out_chunk_size. */
+ /* Did we already decide to reuse the resource? */
+ if (rdesc->raw_copy_ok)
+ return true;
+
struct blob_descriptor *res_blob;
u64 write_size = 0;
return (write_size > rdesc->uncompressed_size * 2 / 3);
}
-
- return false;
}
static u32
progress->write_streams.completed_streams += complete_count;
}
- if (progress->write_streams.completed_bytes >= progress_data->next_progress)
- {
+ if (progress->write_streams.completed_bytes >= progress_data->next_progress) {
+
ret = call_progress(progress_data->progfunc,
WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
progress,
if (ret)
return ret;
- if (progress_data->next_progress == progress->write_streams.total_bytes) {
- progress_data->next_progress = ~(u64)0;
- } else {
- /* Handle rate-limiting of messages */
-
- /* Send new message as soon as another 1/128 of the
- * total has been written. (Arbitrary number.) */
- progress_data->next_progress =
- progress->write_streams.completed_bytes +
- progress->write_streams.total_bytes / 128;
-
- /* ... Unless that would be more than 5000000 bytes, in
- * which case send the next after the next 5000000
- * bytes. (Another arbitrary number.) */
- if (progress->write_streams.completed_bytes + 5000000 <
- progress_data->next_progress)
- progress_data->next_progress =
- progress->write_streams.completed_bytes + 5000000;
-
- /* ... But always send a message as soon as we're
- * completely done. */
- if (progress->write_streams.total_bytes <
- progress_data->next_progress)
- progress_data->next_progress =
- progress->write_streams.total_bytes;
- }
+ set_next_progress(progress->write_streams.completed_bytes,
+ progress->write_streams.total_bytes,
+ &progress_data->next_progress);
}
return 0;
}
struct filter_context *filter_ctx;
- /* Upper bound on the total number of bytes that need to be compressed.
- * */
- u64 num_bytes_to_compress;
-
/* Pointer to the chunk_compressor implementation being used for
* compressing chunks of data, or NULL if chunks are being written
* uncompressed. */
hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size);
hdr.compression_format = cpu_to_le32(ctx->out_ctype);
- BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1);
- BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2);
- BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
+ STATIC_ASSERT(WIMLIB_COMPRESSION_TYPE_XPRESS == 1);
+ STATIC_ASSERT(WIMLIB_COMPRESSION_TYPE_LZX == 2);
+ STATIC_ASSERT(WIMLIB_COMPRESSION_TYPE_LZMS == 3);
ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr),
chunk_table_offset - sizeof(hdr));
{
int ret;
struct wim_inode *inode;
+ tchar *cookie1;
+ tchar *cookie2;
if (!blob->may_send_done_with_file)
return 0;
inode = blob->file_inode;
wimlib_assert(inode != NULL);
- wimlib_assert(inode->num_remaining_streams > 0);
- if (--inode->num_remaining_streams > 0)
+ wimlib_assert(inode->i_num_remaining_streams > 0);
+ if (--inode->i_num_remaining_streams > 0)
return 0;
-#ifdef __WIN32__
- /* XXX: This logic really should be somewhere else. */
-
- /* We want the path to the file, but blob->file_on_disk might actually
- * refer to a named data stream. Temporarily strip the named data
- * stream from the path. */
- wchar_t *p_colon = NULL;
- wchar_t *p_question_mark = NULL;
- const wchar_t *p_stream_name;
-
- p_stream_name = path_stream_name(blob->file_on_disk);
- if (unlikely(p_stream_name)) {
- p_colon = (wchar_t *)(p_stream_name - 1);
- wimlib_assert(*p_colon == L':');
- *p_colon = L'\0';
- }
-
- /* We also should use a fake Win32 path instead of a NT path */
- if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) {
- p_question_mark = &blob->file_on_disk[1];
- *p_question_mark = L'\\';
- }
-#endif
+ cookie1 = progress_get_streamless_path(blob->file_on_disk);
+ cookie2 = progress_get_win32_path(blob->file_on_disk);
ret = done_with_file(blob->file_on_disk, progfunc, progctx);
-#ifdef __WIN32__
- if (p_colon)
- *p_colon = L':';
- if (p_question_mark)
- *p_question_mark = L'?';
-#endif
+ progress_put_win32_path(cookie2);
+ progress_put_streamless_path(cookie1);
+
return ret;
}
if (filedes_seek(out_fd, begin_offset) == -1)
return 0;
- ret = extract_full_blob_to_fd(blob, out_fd);
+ ret = extract_blob_to_fd(blob, out_fd);
if (ret) {
/* Error reading the uncompressed data. */
if (out_fd->offset == begin_offset &&
wimlib_assert(out_fd->offset - begin_offset == blob->size);
- if (out_fd->offset < end_offset &&
- 0 != ftruncate(out_fd->fd, out_fd->offset))
- {
- ERROR_WITH_ERRNO("Can't truncate output file to "
- "offset %"PRIu64, out_fd->offset);
- return WIMLIB_ERR_WRITE;
- }
+ /* We could ftruncate() the file to 'out_fd->offset' here, but there
+ * isn't much point. Usually we will only be truncating by a few bytes
+ * and will just overwrite the data immediately. */
blob->out_reshdr.size_in_wim = blob->size;
blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED |
* @raw_copy_blobs. Return the total uncompressed size of the blobs that need
* to be compressed. */
static u64
-find_raw_copy_blobs(struct list_head *blob_list,
- int write_resource_flags,
- int out_ctype,
- u32 out_chunk_size,
+find_raw_copy_blobs(struct list_head *blob_list, int write_resource_flags,
+ int out_ctype, u32 out_chunk_size,
struct list_head *raw_copy_blobs)
{
struct blob_descriptor *blob, *tmp;
- u64 num_bytes_to_compress = 0;
+ u64 num_nonraw_bytes = 0;
INIT_LIST_HEAD(raw_copy_blobs);
blob->rdesc->raw_copy_ok = 0;
list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
- if (blob->blob_location == BLOB_IN_WIM &&
- blob->rdesc->raw_copy_ok)
- {
- list_move_tail(&blob->write_blobs_list,
- raw_copy_blobs);
- } else if (can_raw_copy(blob, write_resource_flags,
- out_ctype, out_chunk_size))
+ if (can_raw_copy(blob, write_resource_flags,
+ out_ctype, out_chunk_size))
{
blob->rdesc->raw_copy_ok = 1;
- list_move_tail(&blob->write_blobs_list,
- raw_copy_blobs);
+ list_move_tail(&blob->write_blobs_list, raw_copy_blobs);
} else {
- num_bytes_to_compress += blob->size;
+ num_nonraw_bytes += blob->size;
}
}
- return num_bytes_to_compress;
+ return num_nonraw_bytes;
}
/* Copy a raw compressed resource located in another WIM file to the WIM file
}
static void
-remove_empty_blobs(struct list_head *blob_list)
+validate_blob_list(struct list_head *blob_list)
{
- struct blob_descriptor *blob, *tmp;
+ struct blob_descriptor *blob;
- list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
+ list_for_each_entry(blob, blob_list, write_blobs_list) {
wimlib_assert(blob->will_be_in_output_wim);
- if (blob->size == 0) {
- list_del(&blob->write_blobs_list);
- blob->out_reshdr.offset_in_wim = 0;
- blob->out_reshdr.size_in_wim = 0;
- blob->out_reshdr.uncompressed_size = 0;
- blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
- }
+ wimlib_assert(blob->size != 0);
}
}
list_for_each_entry(blob, blob_list, write_blobs_list) {
if (blob_is_in_file(blob)) {
- blob->file_inode->num_remaining_streams = 0;
+ blob->file_inode->i_num_remaining_streams = 0;
blob->may_send_done_with_file = 1;
} else {
blob->may_send_done_with_file = 0;
list_for_each_entry(blob, blob_list, write_blobs_list)
if (blob->may_send_done_with_file)
- blob->file_inode->num_remaining_streams++;
+ blob->file_inode->i_num_remaining_streams++;
}
/*
int ret;
struct write_blobs_ctx ctx;
struct list_head raw_copy_blobs;
+ u64 num_nonraw_bytes;
wimlib_assert((write_resource_flags &
(WRITE_RESOURCE_FLAG_SOLID |
(WRITE_RESOURCE_FLAG_SOLID |
WRITE_RESOURCE_FLAG_PIPABLE));
- remove_empty_blobs(blob_list);
+ validate_blob_list(blob_list);
if (list_empty(blob_list))
return 0;
ctx.progress_data.progfunc = progfunc;
ctx.progress_data.progctx = progctx;
- ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list,
- write_resource_flags,
- out_ctype,
- out_chunk_size,
- &raw_copy_blobs);
+ num_nonraw_bytes = find_raw_copy_blobs(blob_list, write_resource_flags,
+ out_ctype, out_chunk_size,
+ &raw_copy_blobs);
- if (ctx.num_bytes_to_compress == 0)
+ if (num_nonraw_bytes == 0)
goto out_write_raw_copy_resources;
/* Unless uncompressed output was required, allocate a chunk_compressor
if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
#ifdef ENABLE_MULTITHREADED_COMPRESSION
- if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) {
+ if (num_nonraw_bytes > max(2000000, out_chunk_size)) {
ret = new_parallel_chunk_compressor(out_ctype,
out_chunk_size,
num_threads, 0,
goto out_destroy_context;
if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
- ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
+ ret = begin_write_resource(&ctx, num_nonraw_bytes);
if (ret)
goto out_destroy_context;
}
/* Read the list of blobs needing to be compressed, using the specified
* callbacks to execute processing of the data. */
- struct read_blob_list_callbacks cbs = {
- .begin_blob = write_blob_begin_read,
- .begin_blob_ctx = &ctx,
- .consume_chunk = write_blob_process_chunk,
- .consume_chunk_ctx = &ctx,
- .end_blob = write_blob_end_read,
- .end_blob_ctx = &ctx,
+ struct read_blob_callbacks cbs = {
+ .begin_blob = write_blob_begin_read,
+ .consume_chunk = write_blob_process_chunk,
+ .end_blob = write_blob_end_read,
+ .ctx = &ctx,
};
ret = read_blob_list(blob_list,
int ret;
struct blob_descriptor blob;
+ if (unlikely(buf_size == 0)) {
+ zero_reshdr(out_reshdr);
+ if (hash_ret)
+ copy_hash(hash_ret, zero_hash);
+ return 0;
+ }
+
blob_set_is_located_in_attached_buffer(&blob, (void *)buf, buf_size);
sha1_buffer(buf, buf_size, blob.hash);
blob.unhashed = 0;
* STREAMS_OK: For writes of all images, assume that all blobs in the blob
* table of @wim and the per-image lists of unhashed blobs should be taken
* as-is, and image metadata should not be searched for references. This
- * does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below.
+ * does not exclude filtering with APPEND and SKIP_EXTERNAL_WIMS, below.
*
- * OVERWRITE: Blobs already present in @wim shall not be returned in
+ * APPEND: Blobs already present in @wim shall not be returned in
* @blob_list_ret.
*
* SKIP_EXTERNAL_WIMS: Blobs already present in a WIM file, but not @wim,
* the blobs in @blob_list_ret.
*
* This list will be a proper superset of @blob_list_ret if and only if
- * WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
- * the blobs that would otherwise need to be written were already located
- * in the WIM file.
+ * WIMLIB_WRITE_FLAG_APPEND was specified in @write_flags and some of the
+ * blobs that would otherwise need to be written were already located in
+ * the WIM file.
*
* All blobs in this list will have @out_refcnt set to the number of
* references to the blob in the output WIM. If
if (imd->modified) {
ret = write_metadata_resource(wim, i,
write_resource_flags);
- } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
+ } else if (write_flags & WIMLIB_WRITE_FLAG_APPEND) {
blob_set_out_reshdr_for_reuse(imd->metadata_blob);
ret = 0;
} else {
int ret;
/* Set output resource metadata for blobs already present in WIM. */
- if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
+ if (write_flags & WIMLIB_WRITE_FLAG_APPEND) {
struct blob_descriptor *blob;
list_for_each_entry(blob, blob_table_list, blob_table_list) {
if (blob->blob_location == BLOB_IN_WIM &&
wim->out_hdr.boot_idx - 1]->metadata_blob->out_reshdr);
}
- /* If overwriting the WIM file containing an integrity table in-place,
- * we'd like to re-use the information in the old integrity table
- * instead of recalculating it. But we might overwrite the old
- * integrity table when we expand the XML data. Read it into memory
- * just in case. */
- if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
+ /* If appending to a WIM file containing an integrity table, we'd like
+ * to re-use the information in the old integrity table instead of
+ * recalculating it. But we might overwrite the old integrity table
+ * when we expand the XML data. Read it into memory just in case. */
+ if ((write_flags & (WIMLIB_WRITE_FLAG_APPEND |
WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) ==
- (WIMLIB_WRITE_FLAG_OVERWRITE |
+ (WIMLIB_WRITE_FLAG_APPEND |
WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
&& wim_has_integrity_table(wim))
{
if (write_flags & WIMLIB_WRITE_FLAG_RETAIN_GUID)
guid = wim->hdr.guid;
if (guid)
- memcpy(wim->out_hdr.guid, guid, WIMLIB_GUID_LEN);
+ copy_guid(wim->out_hdr.guid, guid);
else
- randomize_byte_array(wim->out_hdr.guid, WIMLIB_GUID_LEN);
+ generate_guid(wim->out_hdr.guid);
/* Set the part number and total parts. */
wim->out_hdr.part_number = part_number;
}
/* Make sure no file or metadata resources are located after the XML data (or
- * integrity table if present)--- otherwise we can't safely overwrite the WIM in
- * place and we return WIMLIB_ERR_RESOURCE_ORDER. */
+ * integrity table if present)--- otherwise we can't safely append to the WIM
+ * file and we return WIMLIB_ERR_RESOURCE_ORDER. */
static int
check_resource_offsets(WIMStruct *wim, off_t end_offset)
{
* XML data (variable size)
* Integrity table (optional) (variable size)
*
- * This method allows an image to be appended to a large WIM very quickly, and
+ * This function allows an image to be appended to a large WIM very quickly, and
* is crash-safe except in the case of write re-ordering, but the disadvantage
* is that a small hole is left in the WIM where the old blob table, xml data,
* and integrity table were. (These usually only take up a small amount of
write_flags |= WIMLIB_WRITE_FLAG_SOLID;
/* Set additional flags for overwrite. */
- write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
+ write_flags |= WIMLIB_WRITE_FLAG_APPEND |
WIMLIB_WRITE_FLAG_STREAMS_OK;
/* Make sure there is no data after the XML data, except possibily an
&progress, wim->progctx);
}
-/* Determine if the specified WIM file may be updated by appending in-place
- * rather than writing and replacing it with an entirely new file. */
+/* Determine if the specified WIM file may be updated in-place rather than by
+ * writing and replacing it with an entirely new file. */
static bool
can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags)
{