#if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK)
/* On BSD, this should be included before "wimlib/list.h" so that "wimlib/list.h" can
- * overwrite the LIST_HEAD macro. */
+ * override the LIST_HEAD macro. */
# include <sys/file.h>
#endif
write_flags = ctx->write_flags;
wim = ctx->wim;
- if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE &&
+ if (write_flags & WIMLIB_WRITE_FLAG_APPEND &&
blob->blob_location == BLOB_IN_WIM &&
blob->rdesc->wim == wim)
return 1;
return blob_filtered(blob, ctx) < 0;
}
-static inline int
+static inline bool
may_soft_filter_blobs(const struct filter_context *ctx)
{
- if (ctx == NULL)
- return 0;
- return ctx->write_flags & WIMLIB_WRITE_FLAG_OVERWRITE;
+ return ctx && (ctx->write_flags & WIMLIB_WRITE_FLAG_APPEND);
}
-static inline int
+static inline bool
may_hard_filter_blobs(const struct filter_context *ctx)
{
- if (ctx == NULL)
- return 0;
- return ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS;
+ return ctx && (ctx->write_flags & WIMLIB_WRITE_FLAG_SKIP_EXTERNAL_WIMS);
}
-static inline int
+static inline bool
may_filter_blobs(const struct filter_context *ctx)
{
return (may_soft_filter_blobs(ctx) || may_hard_filter_blobs(ctx));
}
-/* Return true if the specified resource is compressed and the compressed data
- * can be reused with the specified output parameters. */
+/* Return true if the specified blob is located in a WIM resource which can be
+ * reused in the output WIM file, without being recompressed. */
static bool
-can_raw_copy(const struct blob_descriptor *blob,
- int write_resource_flags, int out_ctype, u32 out_chunk_size)
+can_raw_copy(const struct blob_descriptor *blob, int write_resource_flags,
+ int out_ctype, u32 out_chunk_size)
{
const struct wim_resource_descriptor *rdesc;
+ /* Recompress everything if requested. */
if (write_resource_flags & WRITE_RESOURCE_FLAG_RECOMPRESS)
return false;
- if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
- return false;
-
+ /* A blob not located in a WIM resource cannot be reused. */
if (blob->blob_location != BLOB_IN_WIM)
return false;
rdesc = blob->rdesc;
- if (rdesc->is_pipable != !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
+ /* In the case of an in-place compaction, always reuse resources located
+ * in the WIM being compacted. */
+ if (rdesc->wim->being_compacted)
+ return true;
+
+ /* Otherwise, only reuse compressed resources. */
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
+ !(rdesc->flags & (WIM_RESHDR_FLAG_COMPRESSED |
+ WIM_RESHDR_FLAG_SOLID)))
+ return false;
+
+ /* When writing a pipable WIM, we can only reuse pipable resources; and
+ * when writing a non-pipable WIM, we can only reuse non-pipable
+ * resources. */
+ if (rdesc->is_pipable !=
+ !!(write_resource_flags & WRITE_RESOURCE_FLAG_PIPABLE))
return false;
+ /* When writing a solid WIM, we can only reuse solid resources; and when
+ * writing a non-solid WIM, we can only reuse non-solid resources. */
+ if (!!(rdesc->flags & WIM_RESHDR_FLAG_SOLID) !=
+ !!(write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
+ return false;
+
+ /* Note: it is theoretically possible to copy chunks of compressed data
+ * between non-solid, solid, and pipable resources. However, we don't
+ * currently implement this optimization because it would be complex and
+ * would usually go unused. */
+
if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
- /* Normal compressed resource: Must use same compression type
- * and chunk size. */
+ /* To re-use a non-solid resource, it must use the desired
+ * compression type and chunk size. */
return (rdesc->compression_type == out_ctype &&
rdesc->chunk_size == out_chunk_size);
- }
-
- if ((rdesc->flags & WIM_RESHDR_FLAG_SOLID) &&
- (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID))
- {
+ } else {
/* Solid resource: Such resources may contain multiple blobs,
* and in general only a subset of them need to be written. As
* a heuristic, re-use the raw data if more than two-thirds the
* check if they are compatible with @out_ctype and
* @out_chunk_size. */
+ /* Did we already decide to reuse the resource? */
+ if (rdesc->raw_copy_ok)
+ return true;
+
struct blob_descriptor *res_blob;
u64 write_size = 0;
return (write_size > rdesc->uncompressed_size * 2 / 3);
}
-
- return false;
}
static u32
struct filter_context *filter_ctx;
- /* Upper bound on the total number of bytes that need to be compressed.
- * */
- u64 num_bytes_to_compress;
-
/* Pointer to the chunk_compressor implementation being used for
* compressing chunks of data, or NULL if chunks are being written
* uncompressed. */
hdr.chunk_size = cpu_to_le32(ctx->out_chunk_size);
hdr.compression_format = cpu_to_le32(ctx->out_ctype);
- BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 1);
- BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 2);
- BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
+ STATIC_ASSERT(WIMLIB_COMPRESSION_TYPE_XPRESS == 1);
+ STATIC_ASSERT(WIMLIB_COMPRESSION_TYPE_LZX == 2);
+ STATIC_ASSERT(WIMLIB_COMPRESSION_TYPE_LZMS == 3);
ret = full_pwrite(ctx->out_fd, &hdr, sizeof(hdr),
chunk_table_offset - sizeof(hdr));
{
int ret;
struct wim_inode *inode;
+ tchar *cookie1;
+ tchar *cookie2;
if (!blob->may_send_done_with_file)
return 0;
if (--inode->i_num_remaining_streams > 0)
return 0;
-#ifdef __WIN32__
- /* XXX: This logic really should be somewhere else. */
-
- /* We want the path to the file, but blob->file_on_disk might actually
- * refer to a named data stream. Temporarily strip the named data
- * stream from the path. */
- wchar_t *p_colon = NULL;
- wchar_t *p_question_mark = NULL;
- const wchar_t *p_stream_name;
-
- p_stream_name = path_stream_name(blob->file_on_disk);
- if (unlikely(p_stream_name)) {
- p_colon = (wchar_t *)(p_stream_name - 1);
- wimlib_assert(*p_colon == L':');
- *p_colon = L'\0';
- }
-
- /* We also should use a fake Win32 path instead of a NT path */
- if (!wcsncmp(blob->file_on_disk, L"\\??\\", 4)) {
- p_question_mark = &blob->file_on_disk[1];
- *p_question_mark = L'\\';
- }
-#endif
+ cookie1 = progress_get_streamless_path(blob->file_on_disk);
+ cookie2 = progress_get_win32_path(blob->file_on_disk);
ret = done_with_file(blob->file_on_disk, progfunc, progctx);
-#ifdef __WIN32__
- if (p_colon)
- *p_colon = L':';
- if (p_question_mark)
- *p_question_mark = L'?';
-#endif
+ progress_put_win32_path(cookie2);
+ progress_put_streamless_path(cookie1);
+
return ret;
}
wimlib_assert(out_fd->offset - begin_offset == blob->size);
- if (out_fd->offset < end_offset &&
- 0 != ftruncate(out_fd->fd, out_fd->offset))
- {
- ERROR_WITH_ERRNO("Can't truncate output file to "
- "offset %"PRIu64, out_fd->offset);
- return WIMLIB_ERR_WRITE;
- }
+ /* We could ftruncate() the file to 'out_fd->offset' here, but there
+ * isn't much point. Usually we will only be truncating by a few bytes
+ * and will just overwrite the data immediately. */
blob->out_reshdr.size_in_wim = blob->size;
blob->out_reshdr.flags &= ~(WIM_RESHDR_FLAG_COMPRESSED |
return status;
}
-/* Compute statistics about a list of blobs that will be written.
+/*
+ * Compute statistics about a list of blobs that will be written.
*
* Assumes the blobs are sorted such that all blobs located in each distinct WIM
- * (specified by WIMStruct) are together. */
-static void
+ * (specified by WIMStruct) are together.
+ *
+ * For compactions, also verify that there are no overlapping resources. This
+ * really should be checked earlier, but for now it's easiest to check here.
+ */
+static int
compute_blob_list_stats(struct list_head *blob_list,
struct write_blobs_ctx *ctx)
{
u64 num_blobs = 0;
u64 total_parts = 0;
WIMStruct *prev_wim_part = NULL;
+ const struct wim_resource_descriptor *prev_rdesc = NULL;
list_for_each_entry(blob, blob_list, write_blobs_list) {
num_blobs++;
total_bytes += blob->size;
if (blob->blob_location == BLOB_IN_WIM) {
- if (prev_wim_part != blob->rdesc->wim) {
- prev_wim_part = blob->rdesc->wim;
+ const struct wim_resource_descriptor *rdesc = blob->rdesc;
+ WIMStruct *wim = rdesc->wim;
+
+ if (prev_wim_part != wim) {
+ prev_wim_part = wim;
total_parts++;
}
+ if (unlikely(wim->being_compacted) && rdesc != prev_rdesc) {
+ if (prev_rdesc != NULL &&
+ rdesc->offset_in_wim <
+ prev_rdesc->offset_in_wim +
+ prev_rdesc->size_in_wim)
+ {
+ WARNING("WIM file contains overlapping "
+ "resources! Compaction is not "
+ "possible.");
+ return WIMLIB_ERR_RESOURCE_ORDER;
+ }
+ prev_rdesc = rdesc;
+ }
}
}
ctx->progress_data.progress.write_streams.total_bytes = total_bytes;
ctx->progress_data.progress.write_streams.total_parts = total_parts;
ctx->progress_data.progress.write_streams.completed_parts = 0;
ctx->progress_data.next_progress = 0;
+ return 0;
}
/* Find blobs in @blob_list that can be copied to the output WIM in raw form
* @raw_copy_blobs. Return the total uncompressed size of the blobs that need
* to be compressed. */
static u64
-find_raw_copy_blobs(struct list_head *blob_list,
- int write_resource_flags,
- int out_ctype,
- u32 out_chunk_size,
+find_raw_copy_blobs(struct list_head *blob_list, int write_resource_flags,
+ int out_ctype, u32 out_chunk_size,
struct list_head *raw_copy_blobs)
{
struct blob_descriptor *blob, *tmp;
- u64 num_bytes_to_compress = 0;
+ u64 num_nonraw_bytes = 0;
INIT_LIST_HEAD(raw_copy_blobs);
blob->rdesc->raw_copy_ok = 0;
list_for_each_entry_safe(blob, tmp, blob_list, write_blobs_list) {
- if (blob->blob_location == BLOB_IN_WIM &&
- blob->rdesc->raw_copy_ok)
- {
- list_move_tail(&blob->write_blobs_list,
- raw_copy_blobs);
- } else if (can_raw_copy(blob, write_resource_flags,
- out_ctype, out_chunk_size))
+ if (can_raw_copy(blob, write_resource_flags,
+ out_ctype, out_chunk_size))
{
blob->rdesc->raw_copy_ok = 1;
- list_move_tail(&blob->write_blobs_list,
- raw_copy_blobs);
+ list_move_tail(&blob->write_blobs_list, raw_copy_blobs);
} else {
- num_bytes_to_compress += blob->size;
+ num_nonraw_bytes += blob->size;
}
}
- return num_bytes_to_compress;
+ return num_nonraw_bytes;
}
/* Copy a raw compressed resource located in another WIM file to the WIM file
}
in_fd = &in_rdesc->wim->in_fd;
wimlib_assert(cur_read_offset != end_read_offset);
- do {
- bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
+ if (likely(!in_rdesc->wim->being_compacted) ||
+ in_rdesc->offset_in_wim > out_fd->offset) {
+ do {
+ bytes_to_read = min(sizeof(buf),
+ end_read_offset - cur_read_offset);
- ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
- if (ret)
- return ret;
+ ret = full_pread(in_fd, buf, bytes_to_read,
+ cur_read_offset);
+ if (ret)
+ return ret;
- ret = full_write(out_fd, buf, bytes_to_read);
- if (ret)
- return ret;
+ ret = full_write(out_fd, buf, bytes_to_read);
+ if (ret)
+ return ret;
+
+ cur_read_offset += bytes_to_read;
- cur_read_offset += bytes_to_read;
+ } while (cur_read_offset != end_read_offset);
+ } else {
+ /* Optimization: the WIM file is being compacted and the
+ * resource being written is already in the desired location.
+ * Skip over the data instead of re-writing it. */
- } while (cur_read_offset != end_read_offset);
+ /* Due the earlier check for overlapping resources, it should
+ * never be the case that we already overwrote the resource. */
+ wimlib_assert(!(in_rdesc->offset_in_wim < out_fd->offset));
+
+ if (-1 == filedes_seek(out_fd, out_fd->offset + in_rdesc->size_in_wim))
+ return WIMLIB_ERR_WRITE;
+ }
list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) {
if (blob->will_be_in_output_wim) {
* identical to another blob already being written or one that would be filtered
* out of the output WIM using blob_filtered() with the context @filter_ctx.
* Each such duplicate blob will be removed from @blob_list, its reference count
- * transfered to the pre-existing duplicate blob, its memory freed, and will not
- * be written. Alternatively, if a blob in @blob_list is a duplicate with any
- * blob in @blob_table that has not been marked for writing or would not be
+ * transferred to the pre-existing duplicate blob, its memory freed, and will
+ * not be written. Alternatively, if a blob in @blob_list is a duplicate with
+ * any blob in @blob_table that has not been marked for writing or would not be
* hard-filtered, it is freed and the pre-existing duplicate is written instead,
* taking ownership of the reference count and slot in the @blob_table_list.
*
int ret;
struct write_blobs_ctx ctx;
struct list_head raw_copy_blobs;
+ u64 num_nonraw_bytes;
wimlib_assert((write_resource_flags &
(WRITE_RESOURCE_FLAG_SOLID |
if (ret)
return ret;
- compute_blob_list_stats(blob_list, &ctx);
+ ret = compute_blob_list_stats(blob_list, &ctx);
+ if (ret)
+ return ret;
if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) {
ret = sort_blob_list_for_solid_compression(blob_list);
ctx.progress_data.progfunc = progfunc;
ctx.progress_data.progctx = progctx;
- ctx.num_bytes_to_compress = find_raw_copy_blobs(blob_list,
- write_resource_flags,
- out_ctype,
- out_chunk_size,
- &raw_copy_blobs);
+ num_nonraw_bytes = find_raw_copy_blobs(blob_list, write_resource_flags,
+ out_ctype, out_chunk_size,
+ &raw_copy_blobs);
- if (ctx.num_bytes_to_compress == 0)
- goto out_write_raw_copy_resources;
+ /* Copy any compressed resources for which the raw data can be reused
+ * without decompression. */
+ ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd,
+ &ctx.progress_data);
+
+ if (ret || num_nonraw_bytes == 0)
+ goto out_destroy_context;
/* Unless uncompressed output was required, allocate a chunk_compressor
* to do compression. There are serial and parallel implementations of
if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
#ifdef ENABLE_MULTITHREADED_COMPRESSION
- if (ctx.num_bytes_to_compress > max(2000000, out_chunk_size)) {
+ if (num_nonraw_bytes > max(2000000, out_chunk_size)) {
ret = new_parallel_chunk_compressor(out_ctype,
out_chunk_size,
num_threads, 0,
goto out_destroy_context;
if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
- ret = begin_write_resource(&ctx, ctx.num_bytes_to_compress);
+ ret = begin_write_resource(&ctx, num_nonraw_bytes);
if (ret)
goto out_destroy_context;
}
wimlib_assert(offset_in_res == reshdr.uncompressed_size);
}
-out_write_raw_copy_resources:
- /* Copy any compressed resources for which the raw data can be reused
- * without decompression. */
- ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd,
- &ctx.progress_data);
-
out_destroy_context:
FREE(ctx.chunk_csizes);
if (ctx.compressor)
* STREAMS_OK: For writes of all images, assume that all blobs in the blob
* table of @wim and the per-image lists of unhashed blobs should be taken
* as-is, and image metadata should not be searched for references. This
- * does not exclude filtering with OVERWRITE and SKIP_EXTERNAL_WIMS, below.
+ * does not exclude filtering with APPEND and SKIP_EXTERNAL_WIMS, below.
*
- * OVERWRITE: Blobs already present in @wim shall not be returned in
+ * APPEND: Blobs already present in @wim shall not be returned in
* @blob_list_ret.
*
* SKIP_EXTERNAL_WIMS: Blobs already present in a WIM file, but not @wim,
* the blobs in @blob_list_ret.
*
* This list will be a proper superset of @blob_list_ret if and only if
- * WIMLIB_WRITE_FLAG_OVERWRITE was specified in @write_flags and some of
- * the blobs that would otherwise need to be written were already located
- * in the WIM file.
+ * WIMLIB_WRITE_FLAG_APPEND was specified in @write_flags and some of the
+ * blobs that would otherwise need to be written were already located in
+ * the WIM file.
*
* All blobs in this list will have @out_refcnt set to the number of
* references to the blob in the output WIM. If
if (imd->modified) {
ret = write_metadata_resource(wim, i,
write_resource_flags);
- } else if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
+ } else if (write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT) {
+ /* For compactions, existing metadata resources are
+ * written along with the existing file resources. */
+ ret = 0;
+ } else if (write_flags & WIMLIB_WRITE_FLAG_APPEND) {
blob_set_out_reshdr_for_reuse(imd->metadata_blob);
ret = 0;
} else {
int ret;
/* Set output resource metadata for blobs already present in WIM. */
- if (write_flags & WIMLIB_WRITE_FLAG_OVERWRITE) {
+ if (write_flags & WIMLIB_WRITE_FLAG_APPEND) {
struct blob_descriptor *blob;
list_for_each_entry(blob, blob_table_list, blob_table_list) {
if (blob->blob_location == BLOB_IN_WIM &&
wim->out_hdr.boot_idx - 1]->metadata_blob->out_reshdr);
}
- /* If overwriting the WIM file containing an integrity table in-place,
- * we'd like to re-use the information in the old integrity table
- * instead of recalculating it. But we might overwrite the old
- * integrity table when we expand the XML data. Read it into memory
- * just in case. */
- if ((write_flags & (WIMLIB_WRITE_FLAG_OVERWRITE |
+ /* If appending to a WIM file containing an integrity table, we'd like
+ * to re-use the information in the old integrity table instead of
+ * recalculating it. But we might overwrite the old integrity table
+ * when we expand the XML data. Read it into memory just in case. */
+ if ((write_flags & (WIMLIB_WRITE_FLAG_APPEND |
WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)) ==
- (WIMLIB_WRITE_FLAG_OVERWRITE |
+ (WIMLIB_WRITE_FLAG_APPEND |
WIMLIB_WRITE_FLAG_CHECK_INTEGRITY)
&& wim_has_integrity_table(wim))
{
if (ret)
return ret;
+ if (unlikely(write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT)) {
+ /* Truncate any data the compaction freed up. */
+ if (ftruncate(wim->out_fd.fd, wim->out_fd.offset)) {
+ ERROR_WITH_ERRNO("Failed to truncate the output WIM file");
+ return WIMLIB_ERR_WRITE;
+ }
+ }
+
/* Possibly sync file data to disk before closing. On POSIX systems, it
* is necessary to do this before using rename() to overwrite an
* existing file with a new file. Otherwise, data loss would occur if
WIMLIB_WRITE_FLAG_NOT_PIPABLE))
return WIMLIB_ERR_INVALID_PARAM;
+ /* Only wimlib_overwrite() accepts UNSAFE_COMPACT. */
+ if (write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT)
+ return WIMLIB_ERR_INVALID_PARAM;
+
/* Include an integrity table by default if no preference was given and
* the WIM already had an integrity table. */
if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY |
}
/* Make sure no file or metadata resources are located after the XML data (or
- * integrity table if present)--- otherwise we can't safely overwrite the WIM in
- * place and we return WIMLIB_ERR_RESOURCE_ORDER. */
+ * integrity table if present)--- otherwise we can't safely append to the WIM
+ * file and we return WIMLIB_ERR_RESOURCE_ORDER. */
static int
check_resource_offsets(WIMStruct *wim, off_t end_offset)
{
* XML data (variable size)
* Integrity table (optional) (variable size)
*
- * This method allows an image to be appended to a large WIM very quickly, and
+ * This function allows an image to be appended to a large WIM very quickly, and
* is crash-safe except in the case of write re-ordering, but the disadvantage
* is that a small hole is left in the WIM where the old blob table, xml data,
* and integrity table were. (These usually only take up a small amount of
* space compared to the blobs, however.)
+ *
+ * Finally, this function also supports "compaction" overwrites as an
+ * alternative to the normal "append" overwrites described above. In a
+ * compaction, data is written starting immediately from the end of the header.
+ * All existing resources are written first, in order by file offset. New
+ * resources are written afterwards, and at the end any extra data is truncated
+ * from the file. The advantage of this approach is that is that the WIM file
+ * ends up fully optimized, without any holes remaining. The main disadavantage
+ * is that this operation is fundamentally unsafe and cannot be interrupted
+ * without data corruption. Consequently, compactions are only ever done when
+ * explicitly requested by the library user with the flag
+ * WIMLIB_WRITE_FLAG_UNSAFE_COMPACT. (Another disadvantage is that a compaction
+ * can be much slower than an append.)
*/
static int
overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads)
{
int ret;
off_t old_wim_end;
- u64 old_blob_table_end, old_xml_begin, old_xml_end;
struct list_head blob_list;
struct list_head blob_table_list;
struct filter_context filter_ctx;
if (should_default_to_solid_compression(wim, write_flags))
write_flags |= WIMLIB_WRITE_FLAG_SOLID;
- /* Set additional flags for overwrite. */
- write_flags |= WIMLIB_WRITE_FLAG_OVERWRITE |
- WIMLIB_WRITE_FLAG_STREAMS_OK;
-
- /* Make sure there is no data after the XML data, except possibily an
- * integrity table. If this were the case, then this data would be
- * overwritten. */
- old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
- old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
- old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
- wim->hdr.blob_table_reshdr.size_in_wim;
- if (wim_has_integrity_table(wim) &&
- wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
- WARNING("Didn't expect the integrity table to be before the XML data");
- ret = WIMLIB_ERR_RESOURCE_ORDER;
- goto out;
- }
+ if (unlikely(write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT)) {
- if (old_blob_table_end > old_xml_begin) {
- WARNING("Didn't expect the blob table to be after the XML data");
- ret = WIMLIB_ERR_RESOURCE_ORDER;
- goto out;
- }
+ /* In-place compaction */
+
+ WARNING("The WIM file \"%"TS"\" is being compacted in place.\n"
+ " Do *not* interrupt the operation, or else "
+ "the WIM file will be\n"
+ " corrupted!", wim->filename);
+ wim->being_compacted = 1;
+ old_wim_end = WIM_HEADER_DISK_SIZE;
- /* Set @old_wim_end, which indicates the point beyond which we don't
- * allow any file and metadata resources to appear without returning
- * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
- * overwrite these resources). */
- if (!wim->image_deletion_occurred && !any_images_modified(wim)) {
- /* If no images have been modified and no images have been
- * deleted, a new blob table does not need to be written. We
- * shall write the new XML data and optional integrity table
- * immediately after the blob table. Note that this may
- * overwrite an existing integrity table. */
- old_wim_end = old_blob_table_end;
- write_flags |= WIMLIB_WRITE_FLAG_NO_NEW_BLOBS;
- } else if (wim_has_integrity_table(wim)) {
- /* Old WIM has an integrity table; begin writing new blobs after
- * it. */
- old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
- wim->hdr.integrity_table_reshdr.size_in_wim;
+ ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES,
+ write_flags, &blob_list,
+ &blob_table_list, &filter_ctx);
+ if (ret)
+ goto out;
+
+ if (wim_has_metadata(wim)) {
+ /* Add existing metadata resources to be compacted along
+ * with the file resources. */
+ for (int i = 0; i < wim->hdr.image_count; i++) {
+ struct wim_image_metadata *imd = wim->image_metadata[i];
+ if (!imd->modified) {
+ fully_reference_blob_for_write(imd->metadata_blob,
+ &blob_list);
+ }
+ }
+ }
} else {
- /* No existing integrity table; begin writing new blobs after
- * the old XML data. */
- old_wim_end = old_xml_end;
- }
+ u64 old_blob_table_end, old_xml_begin, old_xml_end;
- ret = check_resource_offsets(wim, old_wim_end);
- if (ret)
- goto out;
+ /* Set additional flags for append. */
+ write_flags |= WIMLIB_WRITE_FLAG_APPEND |
+ WIMLIB_WRITE_FLAG_STREAMS_OK;
- ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags,
- &blob_list, &blob_table_list,
- &filter_ctx);
- if (ret)
- goto out;
+ /* Make sure there is no data after the XML data, except
+ * possibily an integrity table. If this were the case, then
+ * this data would be overwritten. */
+ old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim;
+ old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim;
+ old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim +
+ wim->hdr.blob_table_reshdr.size_in_wim;
+ if (wim_has_integrity_table(wim) &&
+ wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) {
+ WARNING("Didn't expect the integrity table to be "
+ "before the XML data");
+ ret = WIMLIB_ERR_RESOURCE_ORDER;
+ goto out;
+ }
+
+ if (old_blob_table_end > old_xml_begin) {
+ WARNING("Didn't expect the blob table to be after "
+ "the XML data");
+ ret = WIMLIB_ERR_RESOURCE_ORDER;
+ goto out;
+ }
+ /* Set @old_wim_end, which indicates the point beyond which we
+ * don't allow any file and metadata resources to appear without
+ * returning WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we
+ * would otherwise overwrite these resources). */
+ if (!wim->image_deletion_occurred && !any_images_modified(wim)) {
+ /* If no images have been modified and no images have
+ * been deleted, a new blob table does not need to be
+ * written. We shall write the new XML data and
+ * optional integrity table immediately after the blob
+ * table. Note that this may overwrite an existing
+ * integrity table. */
+ old_wim_end = old_blob_table_end;
+ write_flags |= WIMLIB_WRITE_FLAG_NO_NEW_BLOBS;
+ } else if (wim_has_integrity_table(wim)) {
+ /* Old WIM has an integrity table; begin writing new
+ * blobs after it. */
+ old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim +
+ wim->hdr.integrity_table_reshdr.size_in_wim;
+ } else {
+ /* No existing integrity table; begin writing new blobs
+ * after the old XML data. */
+ old_wim_end = old_xml_end;
+ }
+
+ ret = check_resource_offsets(wim, old_wim_end);
+ if (ret)
+ goto out;
- if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)
- wimlib_assert(list_empty(&blob_list));
+ ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES,
+ write_flags, &blob_list,
+ &blob_table_list, &filter_ctx);
+ if (ret)
+ goto out;
+
+ if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)
+ wimlib_assert(list_empty(&blob_list));
+ }
ret = open_wim_writable(wim, wim->filename, O_RDWR);
if (ret)
return 0;
out_truncate:
- if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) {
+ if (!(write_flags & (WIMLIB_WRITE_FLAG_NO_NEW_BLOBS |
+ WIMLIB_WRITE_FLAG_UNSAFE_COMPACT))) {
WARNING("Truncating \"%"TS"\" to its original size "
"(%"PRIu64" bytes)", wim->filename, old_wim_end);
/* Return value of ftruncate() is ignored because this is
out_close_wim:
(void)close_wim_writable(wim, write_flags);
out:
+ wim->being_compacted = 0;
return ret;
}
&progress, wim->progctx);
}
-/* Determine if the specified WIM file may be updated by appending in-place
- * rather than writing and replacing it with an entirely new file. */
+/* Determine if the specified WIM file may be updated in-place rather than by
+ * writing and replacing it with an entirely new file. */
static bool
can_overwrite_wim_inplace(const WIMStruct *wim, int write_flags)
{
if (!wim->filename)
return WIMLIB_ERR_NO_FILENAME;
+ if (unlikely(write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT)) {
+ /*
+ * In UNSAFE_COMPACT mode:
+ * - RECOMPRESS is forbidden
+ * - REBUILD is ignored
+ * - SOFT_DELETE and NO_SOLID_SORT are implied
+ */
+ if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+ return WIMLIB_ERR_COMPACTION_NOT_POSSIBLE;
+ write_flags &= ~WIMLIB_WRITE_FLAG_REBUILD;
+ write_flags |= WIMLIB_WRITE_FLAG_SOFT_DELETE;
+ write_flags |= WIMLIB_WRITE_FLAG_NO_SOLID_SORT;
+ }
+
orig_hdr_flags = wim->hdr.flags;
if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG)
wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY;
return ret;
WARNING("Falling back to re-building entire WIM");
}
+ if (write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT)
+ return WIMLIB_ERR_COMPACTION_NOT_POSSIBLE;
return overwrite_wim_via_tmpfile(wim, write_flags, num_threads);
}