X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Fwrite.c;h=9e396162491d9ab6895dbb9fddf18a6327c218bd;hp=13ff87a7cc6b8e162be8c6c806b9b00804d9de45;hb=35a448862fb198323481044e0d820c7c631e4dab;hpb=5c39470a315a563ef896bb58e7560164cc24df04 diff --git a/src/write.c b/src/write.c index 13ff87a7..9e396162 100644 --- a/src/write.c +++ b/src/write.c @@ -174,7 +174,12 @@ can_raw_copy(const struct blob_descriptor *blob, int write_resource_flags, rdesc = blob->rdesc; - /* Only reuse compressed resources. */ + /* In the case of an in-place compaction, always reuse resources located + * in the WIM being compacted. */ + if (rdesc->wim->being_compacted) + return true; + + /* Otherwise, only reuse compressed resources. */ if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE || !(rdesc->flags & (WIM_RESHDR_FLAG_COMPRESSED | WIM_RESHDR_FLAG_SOLID))) @@ -1104,11 +1109,16 @@ write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx) return status; } -/* Compute statistics about a list of blobs that will be written. +/* + * Compute statistics about a list of blobs that will be written. * * Assumes the blobs are sorted such that all blobs located in each distinct WIM - * (specified by WIMStruct) are together. */ -static void + * (specified by WIMStruct) are together. + * + * For compactions, also verify that there are no overlapping resources. This + * really should be checked earlier, but for now it's easiest to check here. + */ +static int compute_blob_list_stats(struct list_head *blob_list, struct write_blobs_ctx *ctx) { @@ -1117,15 +1127,32 @@ compute_blob_list_stats(struct list_head *blob_list, u64 num_blobs = 0; u64 total_parts = 0; WIMStruct *prev_wim_part = NULL; + const struct wim_resource_descriptor *prev_rdesc = NULL; list_for_each_entry(blob, blob_list, write_blobs_list) { num_blobs++; total_bytes += blob->size; if (blob->blob_location == BLOB_IN_WIM) { - if (prev_wim_part != blob->rdesc->wim) { - prev_wim_part = blob->rdesc->wim; + const struct wim_resource_descriptor *rdesc = blob->rdesc; + WIMStruct *wim = rdesc->wim; + + if (prev_wim_part != wim) { + prev_wim_part = wim; total_parts++; } + if (unlikely(wim->being_compacted) && rdesc != prev_rdesc) { + if (prev_rdesc != NULL && + rdesc->offset_in_wim < + prev_rdesc->offset_in_wim + + prev_rdesc->size_in_wim) + { + WARNING("WIM file contains overlapping " + "resources! Compaction is not " + "possible."); + return WIMLIB_ERR_RESOURCE_ORDER; + } + prev_rdesc = rdesc; + } } } ctx->progress_data.progress.write_streams.total_bytes = total_bytes; @@ -1136,6 +1163,7 @@ compute_blob_list_stats(struct list_head *blob_list, ctx->progress_data.progress.write_streams.total_parts = total_parts; ctx->progress_data.progress.write_streams.completed_parts = 0; ctx->progress_data.next_progress = 0; + return 0; } /* Find blobs in @blob_list that can be copied to the output WIM in raw form @@ -1200,21 +1228,37 @@ write_raw_copy_resource(struct wim_resource_descriptor *in_rdesc, } in_fd = &in_rdesc->wim->in_fd; wimlib_assert(cur_read_offset != end_read_offset); - do { - bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset); + if (likely(!in_rdesc->wim->being_compacted) || + in_rdesc->offset_in_wim > out_fd->offset) { + do { + bytes_to_read = min(sizeof(buf), + end_read_offset - cur_read_offset); - ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset); - if (ret) - return ret; + ret = full_pread(in_fd, buf, bytes_to_read, + cur_read_offset); + if (ret) + return ret; - ret = full_write(out_fd, buf, bytes_to_read); - if (ret) - return ret; + ret = full_write(out_fd, buf, bytes_to_read); + if (ret) + return ret; - cur_read_offset += bytes_to_read; + cur_read_offset += bytes_to_read; - } while (cur_read_offset != end_read_offset); + } while (cur_read_offset != end_read_offset); + } else { + /* Optimization: the WIM file is being compacted and the + * resource being written is already in the desired location. + * Skip over the data instead of re-writing it. */ + + /* Due the earlier check for overlapping resources, it should + * never be the case that we already overwrote the resource. */ + wimlib_assert(!(in_rdesc->offset_in_wim < out_fd->offset)); + + if (-1 == filedes_seek(out_fd, out_fd->offset + in_rdesc->size_in_wim)) + return WIMLIB_ERR_WRITE; + } list_for_each_entry(blob, &in_rdesc->blob_list, rdesc_node) { if (blob->will_be_in_output_wim) { @@ -1296,17 +1340,6 @@ validate_blob_list(struct list_head *blob_list) } } -static inline bool -blob_is_in_file(const struct blob_descriptor *blob) -{ - return blob->blob_location == BLOB_IN_FILE_ON_DISK -#ifdef __WIN32__ - || blob->blob_location == BLOB_IN_WINNT_FILE_ON_DISK - || blob->blob_location == BLOB_WIN32_ENCRYPTED -#endif - ; -} - static void init_done_with_file_info(struct list_head *blob_list) { @@ -1420,9 +1453,9 @@ init_done_with_file_info(struct list_head *blob_list) * identical to another blob already being written or one that would be filtered * out of the output WIM using blob_filtered() with the context @filter_ctx. * Each such duplicate blob will be removed from @blob_list, its reference count - * transfered to the pre-existing duplicate blob, its memory freed, and will not - * be written. Alternatively, if a blob in @blob_list is a duplicate with any - * blob in @blob_table that has not been marked for writing or would not be + * transferred to the pre-existing duplicate blob, its memory freed, and will + * not be written. Alternatively, if a blob in @blob_list is a duplicate with + * any blob in @blob_table that has not been marked for writing or would not be * hard-filtered, it is freed and the pre-existing duplicate is written instead, * taking ownership of the reference count and slot in the @blob_table_list. * @@ -1490,7 +1523,9 @@ write_blob_list(struct list_head *blob_list, if (ret) return ret; - compute_blob_list_stats(blob_list, &ctx); + ret = compute_blob_list_stats(blob_list, &ctx); + if (ret) + return ret; if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID_SORT) { ret = sort_blob_list_for_solid_compression(blob_list); @@ -1505,16 +1540,12 @@ write_blob_list(struct list_head *blob_list, out_ctype, out_chunk_size, &raw_copy_blobs); - if (num_nonraw_bytes == 0) - goto out_write_raw_copy_resources; - - /* Unless uncompressed output was required, allocate a chunk_compressor - * to do compression. There are serial and parallel implementations of - * the chunk_compressor interface. We default to parallel using the + /* Unless no data needs to be compressed, allocate a chunk_compressor to + * do compression. There are serial and parallel implementations of the + * chunk_compressor interface. We default to parallel using the * specified number of threads, unless the upper bound on the number * bytes needing to be compressed is less than a heuristic value. */ - if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) { - + if (num_nonraw_bytes != 0 && out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) { #ifdef ENABLE_MULTITHREADED_COMPRESSION if (num_nonraw_bytes > max(2000000, out_chunk_size)) { ret = new_parallel_chunk_compressor(out_ctype, @@ -1542,9 +1573,6 @@ write_blob_list(struct list_head *blob_list, else ctx.progress_data.progress.write_streams.num_threads = 1; - INIT_LIST_HEAD(&ctx.blobs_being_compressed); - INIT_LIST_HEAD(&ctx.blobs_in_solid_resource); - ret = call_progress(ctx.progress_data.progfunc, WIMLIB_PROGRESS_MSG_WRITE_STREAMS, &ctx.progress_data.progress, @@ -1552,7 +1580,20 @@ write_blob_list(struct list_head *blob_list, if (ret) goto out_destroy_context; + /* Copy any compressed resources for which the raw data can be reused + * without decompression. */ + ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd, + &ctx.progress_data); + + if (ret || num_nonraw_bytes == 0) + goto out_destroy_context; + + INIT_LIST_HEAD(&ctx.blobs_being_compressed); + if (write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) { + + INIT_LIST_HEAD(&ctx.blobs_in_solid_resource); + ret = begin_write_resource(&ctx, num_nonraw_bytes); if (ret) goto out_destroy_context; @@ -1606,12 +1647,6 @@ write_blob_list(struct list_head *blob_list, wimlib_assert(offset_in_res == reshdr.uncompressed_size); } -out_write_raw_copy_resources: - /* Copy any compressed resources for which the raw data can be reused - * without decompression. */ - ret = write_raw_copy_resources(&raw_copy_blobs, ctx.out_fd, - &ctx.progress_data); - out_destroy_context: FREE(ctx.chunk_csizes); if (ctx.compressor) @@ -2134,16 +2169,28 @@ write_metadata_resources(WIMStruct *wim, int image, int write_flags) struct wim_image_metadata *imd; imd = wim->image_metadata[i - 1]; - /* Build a new metadata resource only if image was modified from - * the original (or was newly added). Otherwise just copy the - * existing one. */ - if (imd->modified) { + if (is_image_dirty(imd)) { + /* The image was modified from the original, or was + * newly added, so we have to build and write a new + * metadata resource. */ ret = write_metadata_resource(wim, i, write_resource_flags); - } else if (write_flags & WIMLIB_WRITE_FLAG_APPEND) { - blob_set_out_reshdr_for_reuse(imd->metadata_blob); + } else if (is_image_unchanged_from_wim(imd, wim) && + (write_flags & (WIMLIB_WRITE_FLAG_UNSAFE_COMPACT | + WIMLIB_WRITE_FLAG_APPEND))) + { + /* The metadata resource is already in the WIM file. + * For appends, we don't need to write it at all. For + * compactions, we re-write existing metadata resources + * along with the existing file resources, not here. */ + if (write_flags & WIMLIB_WRITE_FLAG_APPEND) + blob_set_out_reshdr_for_reuse(imd->metadata_blob); ret = 0; } else { + /* The metadata resource is in a WIM file other than the + * one being written to. We need to rewrite it, + * possibly compressed differently; but rebuilding the + * metadata itself isn't necessary. */ ret = write_wim_resource(imd->metadata_blob, &wim->out_fd, wim->out_compression_type, @@ -2341,7 +2388,9 @@ finish_write(WIMStruct *wim, int image, int write_flags, } /* Write integrity table if needed. */ - if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) { + if ((write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) && + wim->out_hdr.blob_table_reshdr.offset_in_wim != 0) + { if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS) { /* The XML data we wrote may have overwritten part of * the old integrity table, so while calculating the new @@ -2385,6 +2434,14 @@ finish_write(WIMStruct *wim, int image, int write_flags, if (ret) return ret; + if (unlikely(write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT)) { + /* Truncate any data the compaction freed up. */ + if (ftruncate(wim->out_fd.fd, wim->out_fd.offset)) { + ERROR_WITH_ERRNO("Failed to truncate the output WIM file"); + return WIMLIB_ERR_WRITE; + } + } + /* Possibly sync file data to disk before closing. On POSIX systems, it * is necessary to do this before using rename() to overwrite an * existing file with a new file. Otherwise, data loss would occur if @@ -2574,6 +2631,25 @@ should_default_to_solid_compression(WIMStruct *wim, int write_flags) wim_has_solid_resources(wim); } +/* Update the images' filecount/bytecount stats (in the XML info) to take into + * account any recent modifications. */ +static int +update_image_stats(WIMStruct *wim) +{ + if (!wim_has_metadata(wim)) + return 0; + for (int i = 0; i < wim->hdr.image_count; i++) { + struct wim_image_metadata *imd = wim->image_metadata[i]; + if (imd->stats_outdated) { + int ret = xml_update_image_info(wim, i + 1); + if (ret) + return ret; + imd->stats_outdated = false; + } + } + return 0; +} + /* Write a standalone WIM or split WIM (SWM) part to a new file or to a file * descriptor. */ int @@ -2620,6 +2696,10 @@ write_wim_part(WIMStruct *wim, WIMLIB_WRITE_FLAG_NOT_PIPABLE)) return WIMLIB_ERR_INVALID_PARAM; + /* Only wimlib_overwrite() accepts UNSAFE_COMPACT. */ + if (write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT) + return WIMLIB_ERR_INVALID_PARAM; + /* Include an integrity table by default if no preference was given and * the WIM already had an integrity table. */ if (!(write_flags & (WIMLIB_WRITE_FLAG_CHECK_INTEGRITY | @@ -2717,6 +2797,11 @@ write_wim_part(WIMStruct *wim, wim->out_hdr.boot_idx = 1; } + /* Update image stats if needed. */ + ret = update_image_stats(wim); + if (ret) + return ret; + /* Set up the output file descriptor. */ if (write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR) { /* File descriptor was explicitly provided. */ @@ -2819,11 +2904,16 @@ wimlib_write_to_fd(WIMStruct *wim, int fd, return write_standalone_wim(wim, &fd, image, write_flags, num_threads); } +/* Have there been any changes to images in the specified WIM, including updates + * as well as deletions and additions of entire images, but excluding changes to + * the XML document? */ static bool -any_images_modified(WIMStruct *wim) +any_images_changed(WIMStruct *wim) { + if (wim->image_deletion_occurred) + return true; for (int i = 0; i < wim->hdr.image_count; i++) - if (wim->image_metadata[i]->modified) + if (!is_image_unchanged_from_wim(wim->image_metadata[i], wim)) return true; return false; } @@ -2863,6 +2953,20 @@ check_resource_offsets(WIMStruct *wim, off_t end_offset) return 0; } +static int +free_blob_if_invalidated(struct blob_descriptor *blob, void *_wim) +{ + const WIMStruct *wim = _wim; + + if (!blob->will_be_in_output_wim && + blob->blob_location == BLOB_IN_WIM && blob->rdesc->wim == wim) + { + blob_table_unlink(wim->blob_table, blob); + free_blob_descriptor(blob); + } + return 0; +} + /* * Overwrite a WIM, possibly appending new resources to it. * @@ -2918,13 +3022,25 @@ check_resource_offsets(WIMStruct *wim, off_t end_offset) * is that a small hole is left in the WIM where the old blob table, xml data, * and integrity table were. (These usually only take up a small amount of * space compared to the blobs, however.) + * + * Finally, this function also supports "compaction" overwrites as an + * alternative to the normal "append" overwrites described above. In a + * compaction, data is written starting immediately from the end of the header. + * All existing resources are written first, in order by file offset. New + * resources are written afterwards, and at the end any extra data is truncated + * from the file. The advantage of this approach is that is that the WIM file + * ends up fully optimized, without any holes remaining. The main disadavantage + * is that this operation is fundamentally unsafe and cannot be interrupted + * without data corruption. Consequently, compactions are only ever done when + * explicitly requested by the library user with the flag + * WIMLIB_WRITE_FLAG_UNSAFE_COMPACT. (Another disadvantage is that a compaction + * can be much slower than an append.) */ static int overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) { int ret; off_t old_wim_end; - u64 old_blob_table_end, old_xml_begin, old_xml_end; struct list_head blob_list; struct list_head blob_table_list; struct filter_context filter_ctx; @@ -2950,66 +3066,110 @@ overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) if (should_default_to_solid_compression(wim, write_flags)) write_flags |= WIMLIB_WRITE_FLAG_SOLID; - /* Set additional flags for overwrite. */ - write_flags |= WIMLIB_WRITE_FLAG_APPEND | - WIMLIB_WRITE_FLAG_STREAMS_OK; - - /* Make sure there is no data after the XML data, except possibily an - * integrity table. If this were the case, then this data would be - * overwritten. */ - old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim; - old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim; - old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim + - wim->hdr.blob_table_reshdr.size_in_wim; - if (wim_has_integrity_table(wim) && - wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) { - WARNING("Didn't expect the integrity table to be before the XML data"); - ret = WIMLIB_ERR_RESOURCE_ORDER; - goto out; - } + if (unlikely(write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT)) { - if (old_blob_table_end > old_xml_begin) { - WARNING("Didn't expect the blob table to be after the XML data"); - ret = WIMLIB_ERR_RESOURCE_ORDER; - goto out; - } + /* In-place compaction */ - /* Set @old_wim_end, which indicates the point beyond which we don't - * allow any file and metadata resources to appear without returning - * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise - * overwrite these resources). */ - if (!wim->image_deletion_occurred && !any_images_modified(wim)) { - /* If no images have been modified and no images have been - * deleted, a new blob table does not need to be written. We - * shall write the new XML data and optional integrity table - * immediately after the blob table. Note that this may - * overwrite an existing integrity table. */ - old_wim_end = old_blob_table_end; - write_flags |= WIMLIB_WRITE_FLAG_NO_NEW_BLOBS; - } else if (wim_has_integrity_table(wim)) { - /* Old WIM has an integrity table; begin writing new blobs after - * it. */ - old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim + - wim->hdr.integrity_table_reshdr.size_in_wim; + WARNING("The WIM file \"%"TS"\" is being compacted in place.\n" + " Do *not* interrupt the operation, or else " + "the WIM file will be\n" + " corrupted!", wim->filename); + wim->being_compacted = 1; + old_wim_end = WIM_HEADER_DISK_SIZE; + + ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, + write_flags, &blob_list, + &blob_table_list, &filter_ctx); + if (ret) + goto out; + + /* Prevent new files from being deduplicated with existing blobs + * in the WIM that we haven't decided to write. Such blobs will + * be overwritten during the compaction. */ + for_blob_in_table(wim->blob_table, free_blob_if_invalidated, wim); + + if (wim_has_metadata(wim)) { + /* Add existing metadata resources to be compacted along + * with the file resources. */ + for (int i = 0; i < wim->hdr.image_count; i++) { + struct wim_image_metadata *imd = wim->image_metadata[i]; + if (is_image_unchanged_from_wim(imd, wim)) { + fully_reference_blob_for_write(imd->metadata_blob, + &blob_list); + } + } + } } else { - /* No existing integrity table; begin writing new blobs after - * the old XML data. */ - old_wim_end = old_xml_end; - } + u64 old_blob_table_end, old_xml_begin, old_xml_end; - ret = check_resource_offsets(wim, old_wim_end); - if (ret) - goto out; + /* Set additional flags for append. */ + write_flags |= WIMLIB_WRITE_FLAG_APPEND | + WIMLIB_WRITE_FLAG_STREAMS_OK; + + /* Make sure there is no data after the XML data, except + * possibily an integrity table. If this were the case, then + * this data would be overwritten. */ + old_xml_begin = wim->hdr.xml_data_reshdr.offset_in_wim; + old_xml_end = old_xml_begin + wim->hdr.xml_data_reshdr.size_in_wim; + old_blob_table_end = wim->hdr.blob_table_reshdr.offset_in_wim + + wim->hdr.blob_table_reshdr.size_in_wim; + if (wim_has_integrity_table(wim) && + wim->hdr.integrity_table_reshdr.offset_in_wim < old_xml_end) { + WARNING("Didn't expect the integrity table to be " + "before the XML data"); + ret = WIMLIB_ERR_RESOURCE_ORDER; + goto out; + } + + if (old_blob_table_end > old_xml_begin) { + WARNING("Didn't expect the blob table to be after " + "the XML data"); + ret = WIMLIB_ERR_RESOURCE_ORDER; + goto out; + } + /* Set @old_wim_end, which indicates the point beyond which we + * don't allow any file and metadata resources to appear without + * returning WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we + * would otherwise overwrite these resources). */ + if (!any_images_changed(wim)) { + /* If no images have been modified, added, or deleted, + * then a new blob table does not need to be written. + * We shall write the new XML data and optional + * integrity table immediately after the blob table. + * Note that this may overwrite an existing integrity + * table. */ + old_wim_end = old_blob_table_end; + write_flags |= WIMLIB_WRITE_FLAG_NO_NEW_BLOBS; + } else if (wim_has_integrity_table(wim)) { + /* Old WIM has an integrity table; begin writing new + * blobs after it. */ + old_wim_end = wim->hdr.integrity_table_reshdr.offset_in_wim + + wim->hdr.integrity_table_reshdr.size_in_wim; + } else { + /* No existing integrity table; begin writing new blobs + * after the old XML data. */ + old_wim_end = old_xml_end; + } + + ret = check_resource_offsets(wim, old_wim_end); + if (ret) + goto out; + + ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, + write_flags, &blob_list, + &blob_table_list, &filter_ctx); + if (ret) + goto out; - ret = prepare_blob_list_for_write(wim, WIMLIB_ALL_IMAGES, write_flags, - &blob_list, &blob_table_list, - &filter_ctx); + if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS) + wimlib_assert(list_empty(&blob_list)); + } + + /* Update image stats if needed. */ + ret = update_image_stats(wim); if (ret) goto out; - if (write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS) - wimlib_assert(list_empty(&blob_list)); - ret = open_wim_writable(wim, wim->filename, O_RDWR); if (ret) goto out; @@ -3051,7 +3211,8 @@ overwrite_wim_inplace(WIMStruct *wim, int write_flags, unsigned num_threads) return 0; out_truncate: - if (!(write_flags & WIMLIB_WRITE_FLAG_NO_NEW_BLOBS)) { + if (!(write_flags & (WIMLIB_WRITE_FLAG_NO_NEW_BLOBS | + WIMLIB_WRITE_FLAG_UNSAFE_COMPACT))) { WARNING("Truncating \"%"TS"\" to its original size " "(%"PRIu64" bytes)", wim->filename, old_wim_end); /* Return value of ftruncate() is ignored because this is @@ -3065,6 +3226,7 @@ out_unlock_wim: out_close_wim: (void)close_wim_writable(wim, write_flags); out: + wim->being_compacted = 0; return ret; } @@ -3163,6 +3325,20 @@ wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads) if (!wim->filename) return WIMLIB_ERR_NO_FILENAME; + if (unlikely(write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT)) { + /* + * In UNSAFE_COMPACT mode: + * - RECOMPRESS is forbidden + * - REBUILD is ignored + * - SOFT_DELETE and NO_SOLID_SORT are implied + */ + if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) + return WIMLIB_ERR_COMPACTION_NOT_POSSIBLE; + write_flags &= ~WIMLIB_WRITE_FLAG_REBUILD; + write_flags |= WIMLIB_WRITE_FLAG_SOFT_DELETE; + write_flags |= WIMLIB_WRITE_FLAG_NO_SOLID_SORT; + } + orig_hdr_flags = wim->hdr.flags; if (write_flags & WIMLIB_WRITE_FLAG_IGNORE_READONLY_FLAG) wim->hdr.flags &= ~WIM_HDR_FLAG_READONLY; @@ -3177,5 +3353,7 @@ wimlib_overwrite(WIMStruct *wim, int write_flags, unsigned num_threads) return ret; WARNING("Falling back to re-building entire WIM"); } + if (write_flags & WIMLIB_WRITE_FLAG_UNSAFE_COMPACT) + return WIMLIB_ERR_COMPACTION_NOT_POSSIBLE; return overwrite_wim_via_tmpfile(wim, write_flags, num_threads); }