# include <sys/uio.h> /* for `struct iovec' */
#endif
+/* Return true if the specified resource is compressed and the compressed data
+ * can be reused with the specified output parameters. */
+static bool
+can_raw_copy(const struct wim_lookup_table_entry *lte,
+ int write_resource_flags, int out_ctype, u32 out_chunk_size)
+{
+ return (out_ctype == wim_resource_compression_type(lte)
+ && out_chunk_size == wim_resource_chunk_size(lte)
+ && out_ctype != WIMLIB_COMPRESSION_TYPE_NONE);
+}
+
+
+/* Return true if the specified resource must be recompressed when the specified
+ * output parameters are used. */
+static bool
+must_compress_stream(const struct wim_lookup_table_entry *lte,
+ int write_resource_flags, int out_ctype, u32 out_chunk_size)
+{
+ return (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
+ && ((write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS)
+ || !can_raw_copy(lte, write_resource_flags,
+ out_ctype, out_chunk_size)));
+}
+
+static unsigned
+compress_chunk(const void * uncompressed_data,
+ unsigned uncompressed_len,
+ void *compressed_data,
+ int out_ctype,
+ struct wimlib_lzx_context *comp_ctx)
+{
+ switch (out_ctype) {
+ case WIMLIB_COMPRESSION_TYPE_XPRESS:
+ return wimlib_xpress_compress(uncompressed_data,
+ uncompressed_len,
+ compressed_data);
+ case WIMLIB_COMPRESSION_TYPE_LZX:
+ return wimlib_lzx_compress2(uncompressed_data,
+ uncompressed_len,
+ compressed_data,
+ comp_ctx);
+ default:
+ wimlib_assert(0);
+ return 0;
+ }
+}
+
/* Chunk table that's located at the beginning of each compressed resource in
* the WIM. (This is not the on-disk format; the on-disk format just has an
* array of offsets.) */
static int
begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte,
struct filedes *out_fd,
+ u32 out_chunk_size,
struct chunk_table **chunk_tab_ret,
int resource_flags)
{
int ret;
size = wim_resource_size(lte);
- num_chunks = wim_resource_chunks(lte);
+ num_chunks = DIV_ROUND_UP(size, out_chunk_size);
bytes_per_chunk_entry = (size > (1ULL << 32)) ? 8 : 4;
alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
chunk_tab = CALLOC(1, alloc_size);
}
}
-/*
- * compress_func_t- Pointer to a function to compresses a chunk
- * of a WIM resource. This may be either
- * wimlib_xpress_compress() (xpress-compress.c) or
- * wimlib_lzx_compress() (lzx-compress.c).
- *
- * @chunk: Uncompressed data of the chunk.
- * @chunk_size: Size of the uncompressed chunk, in bytes.
- * @out: Pointer to output buffer of size at least (@chunk_size - 1) bytes.
- *
- * Returns the size of the compressed data written to @out in bytes, or 0 if the
- * data could not be compressed to (@chunk_size - 1) bytes or fewer.
- *
- * As a special requirement, the compression code is optimized for the WIM
- * format and therefore requires (@chunk_size <= 32768).
- *
- * As another special requirement, the compression code will read up to 8 bytes
- * off the end of the @chunk array for performance reasons. The values of these
- * bytes will not affect the output of the compression, but the calling code
- * must make sure that the buffer holding the uncompressed chunk is actually at
- * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
- * mapped memory that will not cause a memory access violation if accessed.
- */
-typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
- void *out);
-
-static compress_func_t
-get_compress_func(int out_ctype)
-{
- if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
- return wimlib_lzx_compress;
- else
- return wimlib_xpress_compress;
-}
-
/* Finishes a WIM chunk table and writes it to the output file at the correct
* offset. */
static int
}
struct write_resource_ctx {
- compress_func_t compress;
+ int out_ctype;
+ u32 out_chunk_size;
+ struct wimlib_lzx_context *comp_ctx;
struct chunk_table *chunk_tab;
struct filedes *out_fd;
SHA_CTX sha_ctx;
const void *out_chunk;
unsigned out_chunk_size;
int ret;
+ void *compressed_chunk = NULL;
+ unsigned compressed_size;
+ bool compressed_chunk_malloced = false;
+ size_t stack_max = 32768;
if (ctx->doing_sha)
sha1_update(&ctx->sha_ctx, chunk, chunk_size);
out_chunk = chunk;
out_chunk_size = chunk_size;
- if (ctx->compress) {
- void *compressed_chunk;
- unsigned compressed_size;
+ if (ctx->out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
/* Compress the chunk. */
- compressed_chunk = alloca(chunk_size);
- compressed_size = (*ctx->compress)(chunk, chunk_size,
- compressed_chunk);
+ if (chunk_size <= stack_max) {
+ compressed_chunk = alloca(chunk_size);
+ } else {
+ compressed_chunk = MALLOC(chunk_size);
+ if (compressed_chunk == NULL)
+ return WIMLIB_ERR_NOMEM;
+ compressed_chunk_malloced = true;
+ }
+ compressed_size = compress_chunk(chunk, chunk_size,
+ compressed_chunk,
+ ctx->out_ctype,
+ ctx->comp_ctx);
/* Use compressed data if compression to less than input size
* was successful. */
if (compressed_size) {
ret = full_write(ctx->out_fd, out_chunk, out_chunk_size);
if (ret)
goto error;
- return 0;
+
+out_free_memory:
+ if (compressed_chunk_malloced)
+ FREE(compressed_chunk);
+ return ret;
error:
ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
- return ret;
+ goto out_free_memory;
}
/*
* On success, this is filled in with the offset, flags, compressed size,
* and uncompressed size of the resource in the output WIM.
*
- * @write_resource_flags:
+ * @resource_flags:
* * WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed even
* if it could otherwise be copied directly from the input;
* * WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE if writing a resource for a pipable WIM
* (and the output file descriptor may be a pipe).
*
+ * @comp_ctx:
+ * Location of LZX compression context pointer, which will be allocated or
+ * updated if needed. (Initialize to NULL.)
+ *
* Additional notes: The SHA1 message digest of the uncompressed data is
* calculated (except when doing a raw copy --- see below). If the @unhashed
* flag is set on the lookup table entry, this message digest is simply copied
int
write_wim_resource(struct wim_lookup_table_entry *lte,
struct filedes *out_fd, int out_ctype,
+ u32 out_chunk_size,
struct resource_entry *out_res_entry,
- int resource_flags)
+ int resource_flags,
+ struct wimlib_lzx_context **comp_ctx)
{
struct write_resource_ctx write_ctx;
off_t res_start_offset;
+ u32 in_chunk_size;
u64 read_size;
int ret;
* desired other than no compression, we can simply copy the compressed
* data without recompressing it. This also means we must skip
* calculating the SHA1, as we never will see the uncompressed data. */
- if (lte->resource_location == RESOURCE_IN_WIM &&
- out_ctype == wim_resource_compression_type(lte) &&
- out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
- !(resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS))
- {
+ if (can_raw_copy(lte, resource_flags, out_ctype, out_chunk_size)) {
/* Normally we can request a RAW_FULL read, but if we're reading
* from a pipable resource and writing a non-pipable resource or
* vice versa, then a RAW_CHUNKS read needs to be requested so
*/
if (lte->is_pipable == !!(resource_flags &
WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE))
+ {
resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_FULL;
- else
+ read_size = lte->resource_entry.size;
+ } else {
resource_flags |= WIMLIB_READ_RESOURCE_FLAG_RAW_CHUNKS;
+ read_size = lte->resource_entry.original_size;
+ }
write_ctx.doing_sha = false;
- read_size = lte->resource_entry.size;
} else {
write_ctx.doing_sha = true;
sha1_init(&write_ctx.sha_ctx);
* table and set the function to use for chunk compression. Exceptions:
* no compression function is needed if doing a raw copy; also, no chunk
* table is needed if doing a *full* (not per-chunk) raw copy. */
- write_ctx.compress = NULL;
+ write_ctx.out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
+ write_ctx.out_chunk_size = out_chunk_size;
write_ctx.chunk_tab = NULL;
if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE) {
- if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW))
- write_ctx.compress = get_compress_func(out_ctype);
+ wimlib_assert(out_chunk_size > 0);
+ if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW)) {
+ write_ctx.out_ctype = out_ctype;
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) {
+ ret = wimlib_lzx_alloc_context(out_chunk_size,
+ NULL, comp_ctx);
+ if (ret)
+ goto out;
+ }
+ write_ctx.comp_ctx = *comp_ctx;
+ }
if (!(resource_flags & WIMLIB_READ_RESOURCE_FLAG_RAW_FULL)) {
ret = begin_wim_resource_chunk_tab(lte, out_fd,
+ out_chunk_size,
&write_ctx.chunk_tab,
resource_flags);
if (ret)
write_ctx.out_fd = out_fd;
write_ctx.resource_flags = resource_flags;
try_write_again:
+ if (write_ctx.out_ctype == WIMLIB_COMPRESSION_TYPE_NONE)
+ in_chunk_size = wim_resource_chunk_size(lte);
+ else
+ in_chunk_size = out_chunk_size;
ret = read_resource_prefix(lte, read_size,
- write_resource_cb, &write_ctx, resource_flags);
+ write_resource_cb,
+ in_chunk_size, &write_ctx, resource_flags);
if (ret)
goto out_free_chunk_tab;
goto out_free_chunk_tab;
out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
FREE(write_ctx.chunk_tab);
- write_ctx.compress = NULL;
+ write_ctx.out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
write_ctx.chunk_tab = NULL;
write_ctx.doing_sha = false;
goto try_write_again;
write_wim_resource_from_buffer(const void *buf, size_t buf_size,
int reshdr_flags, struct filedes *out_fd,
int out_ctype,
+ u32 out_chunk_size,
struct resource_entry *out_res_entry,
- u8 *hash_ret, int write_resource_flags)
+ u8 *hash_ret, int write_resource_flags,
+ struct wimlib_lzx_context **comp_ctx)
{
/* Set up a temporary lookup table entry to provide to
* write_wim_resource(). */
lte.attached_buffer = (void*)buf;
lte.resource_entry.original_size = buf_size;
lte.resource_entry.flags = reshdr_flags;
+ lte.compression_type = WIMLIB_COMPRESSION_TYPE_NONE;
if (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_PIPABLE) {
sha1_buffer(buf, buf_size, lte.hash);
lte.unhashed = 1;
}
- ret = write_wim_resource(<e, out_fd, out_ctype, out_res_entry,
- write_resource_flags);
+ ret = write_wim_resource(<e, out_fd, out_ctype, out_chunk_size,
+ out_res_entry, write_resource_flags, comp_ctx);
if (ret)
return ret;
if (hash_ret)
struct compressor_thread_params {
struct shared_queue *res_to_compress_queue;
struct shared_queue *compressed_res_queue;
- compress_func_t compress;
+ int out_ctype;
+ struct wimlib_lzx_context *comp_ctx;
};
#define MAX_CHUNKS_PER_MSG 2
struct message {
struct wim_lookup_table_entry *lte;
+ u32 out_chunk_size;
u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
};
static void
-compress_chunks(struct message *msg, compress_func_t compress)
+compress_chunks(struct message *msg, int out_ctype,
+ struct wimlib_lzx_context *comp_ctx)
{
for (unsigned i = 0; i < msg->num_chunks; i++) {
- unsigned len = compress(msg->uncompressed_chunks[i],
- msg->uncompressed_chunk_sizes[i],
- msg->compressed_chunks[i]);
+ unsigned len;
+
+ len = compress_chunk(msg->uncompressed_chunks[i],
+ msg->uncompressed_chunk_sizes[i],
+ msg->compressed_chunks[i],
+ out_ctype,
+ comp_ctx);
+
void *out_chunk;
unsigned out_len;
if (len) {
struct compressor_thread_params *params = arg;
struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
struct shared_queue *compressed_res_queue = params->compressed_res_queue;
- compress_func_t compress = params->compress;
struct message *msg;
DEBUG("Compressor thread ready");
while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
- compress_chunks(msg, compress);
+ compress_chunks(msg, params->out_ctype, params->comp_ctx);
shared_queue_put(compressed_res_queue, msg);
}
DEBUG("Compressor thread terminating");
struct serial_write_stream_ctx {
struct filedes *out_fd;
int out_ctype;
+ u32 out_chunk_size;
+ struct wimlib_lzx_context **comp_ctx;
int write_resource_flags;
};
{
struct serial_write_stream_ctx *ctx = _ctx;
return write_wim_resource(lte, ctx->out_fd,
- ctx->out_ctype, <e->output_resource_entry,
- ctx->write_resource_flags);
+ ctx->out_ctype,
+ ctx->out_chunk_size,
+ <e->output_resource_entry,
+ ctx->write_resource_flags,
+ ctx->comp_ctx);
}
struct wim_lookup_table *lookup_table,
struct filedes *out_fd,
int out_ctype,
+ u32 out_chunk_size,
+ struct wimlib_lzx_context **comp_ctx,
int write_resource_flags,
struct write_streams_progress_data *progress_data)
{
struct serial_write_stream_ctx ctx = {
.out_fd = out_fd,
.out_ctype = out_ctype,
+ .out_chunk_size = out_chunk_size,
.write_resource_flags = write_resource_flags,
+ .comp_ctx = comp_ctx,
};
return do_write_stream_list(stream_list,
lookup_table,
struct wim_lookup_table *lookup_table,
struct filedes *out_fd,
int out_ctype,
+ u32 out_chunk_size,
+ struct wimlib_lzx_context **comp_ctx,
int write_resource_flags,
struct write_streams_progress_data *progress_data)
{
lookup_table,
out_fd,
out_ctype,
+ out_chunk_size,
+ comp_ctx,
write_resource_flags,
progress_data);
}
struct filedes *out_fd;
off_t res_start_offset;
int out_ctype;
+ u32 out_chunk_size;
+ struct wimlib_lzx_context **comp_ctx;
int write_resource_flags;
struct shared_queue *res_to_compress_queue;
struct shared_queue *compressed_res_queue;
};
static int
-init_message(struct message *msg)
+init_message(struct message *msg, u32 out_chunk_size)
{
+ msg->out_chunk_size = out_chunk_size;
for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
- msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
- msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
+ msg->compressed_chunks[i] = MALLOC(out_chunk_size);
+ msg->uncompressed_chunks[i] = MALLOC(out_chunk_size);
if (msg->compressed_chunks[i] == NULL ||
msg->uncompressed_chunks[i] == NULL)
return WIMLIB_ERR_NOMEM;
}
static struct message *
-allocate_messages(size_t num_messages)
+allocate_messages(size_t num_messages, u32 out_chunk_size)
{
struct message *msgs;
if (!msgs)
return NULL;
for (size_t i = 0; i < num_messages; i++) {
- if (init_message(&msgs[i])) {
+ if (init_message(&msgs[i], out_chunk_size)) {
free_messages(msgs, num_messages);
return NULL;
}
{
/* Pre-allocate all the buffers that will be needed to do the chunk
* compression. */
- ctx->msgs = allocate_messages(ctx->num_messages);
+ ctx->msgs = allocate_messages(ctx->num_messages, ctx->out_chunk_size);
if (!ctx->msgs)
return WIMLIB_ERR_NOMEM;
* it if needed. */
ret = begin_wim_resource_chunk_tab(cur_lte,
ctx->out_fd,
+ ctx->out_chunk_size,
&ctx->cur_chunk_tab,
ctx->write_resource_flags);
if (ret)
ret = write_wim_resource(cur_lte,
ctx->out_fd,
WIMLIB_COMPRESSION_TYPE_NONE,
+ 0,
&cur_lte->output_resource_entry,
- ctx->write_resource_flags);
+ ctx->write_resource_flags,
+ ctx->comp_ctx);
if (ret)
return ret;
} else {
ctx->lookup_table,
ctx->out_fd,
ctx->out_ctype,
+ ctx->out_chunk_size,
+ ctx->comp_ctx,
ctx->write_resource_flags,
ctx->progress_data);
if (ret)
ctx->lookup_table,
ctx->out_fd,
ctx->out_ctype,
+ ctx->out_chunk_size,
+ ctx->comp_ctx,
ctx->write_resource_flags,
ctx->progress_data);
}
* when @lte is already hashed. */
sha1_init(&ctx->next_sha_ctx);
ctx->next_chunk = 0;
- ctx->next_num_chunks = wim_resource_chunks(lte);
+ ctx->next_num_chunks = DIV_ROUND_UP(wim_resource_size(lte),
+ ctx->out_chunk_size);
ctx->next_lte = lte;
INIT_LIST_HEAD(<e->msg_list);
list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams);
ret = read_resource_prefix(lte, wim_resource_size(lte),
- main_writer_thread_cb, ctx, 0);
+ main_writer_thread_cb,
+ ctx->out_chunk_size, ctx, 0);
if (ret)
return ret;
wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
int ret;
if (wim_resource_size(lte) < 1000 ||
- ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
- (lte->resource_location == RESOURCE_IN_WIM &&
- !(ctx->write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS) &&
- lte->wim->compression_type == ctx->out_ctype))
+ !must_compress_stream(lte, ctx->write_resource_flags,
+ ctx->out_ctype, ctx->out_chunk_size))
{
/* Stream is too small or isn't being compressed. Process it by
* the main thread when we have a chance. We can't necessarily
* create the number of threads requested.
*
* High level description of the algorithm for writing compressed streams in
- * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes
- * rather than on full files. The currently executing thread becomes the main
- * thread and is entirely in charge of reading the data to compress (which may
- * be in any location understood by the resource code--- such as in an external
- * file being captured, or in another WIM file from which an image is being
- * exported) and actually writing the compressed data to the output file.
- * Additional threads are "compressor threads" and all execute the
- * compressor_thread_proc, where they repeatedly retrieve buffers of data from
- * the main thread, compress them, and hand them back to the main thread.
+ * parallel: We perform compression on chunks rather than on full files. The
+ * currently executing thread becomes the main thread and is entirely in charge
+ * of reading the data to compress (which may be in any location understood by
+ * the resource code--- such as in an external file being captured, or in
+ * another WIM file from which an image is being exported) and actually writing
+ * the compressed data to the output file. Additional threads are "compressor
+ * threads" and all execute the compressor_thread_proc, where they repeatedly
+ * retrieve buffers of data from the main thread, compress them, and hand them
+ * back to the main thread.
*
* Certain streams, such as streams that do not need to be compressed (e.g.
* input compression type same as output compression type) or streams of very
struct wim_lookup_table *lookup_table,
struct filedes *out_fd,
int out_ctype,
+ u32 out_chunk_size,
+ struct wimlib_lzx_context **comp_ctx,
int write_resource_flags,
struct write_streams_progress_data *progress_data,
unsigned num_threads)
if (ret)
goto out_destroy_res_to_compress_queue;
- struct compressor_thread_params params;
- params.res_to_compress_queue = &res_to_compress_queue;
- params.compressed_res_queue = &compressed_res_queue;
- params.compress = get_compress_func(out_ctype);
+ struct compressor_thread_params *params;
+
+ params = CALLOC(num_threads, sizeof(params[0]));
+ if (params == NULL) {
+ ret = WIMLIB_ERR_NOMEM;
+ goto out_destroy_compressed_res_queue;
+ }
+
+ for (unsigned i = 0; i < num_threads; i++) {
+ params[i].res_to_compress_queue = &res_to_compress_queue;
+ params[i].compressed_res_queue = &compressed_res_queue;
+ params[i].out_ctype = out_ctype;
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) {
+ ret = wimlib_lzx_alloc_context(out_chunk_size,
+ NULL, ¶ms[i].comp_ctx);
+ if (ret)
+ goto out_free_params;
+ }
+ }
compressor_threads = MALLOC(num_threads * sizeof(pthread_t));
if (!compressor_threads) {
ret = WIMLIB_ERR_NOMEM;
- goto out_destroy_compressed_res_queue;
+ goto out_free_params;
}
for (unsigned i = 0; i < num_threads; i++) {
DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
ret = pthread_create(&compressor_threads[i], NULL,
- compressor_thread_proc, ¶ms);
+ compressor_thread_proc, ¶ms[i]);
if (ret != 0) {
ret = -1;
ERROR_WITH_ERRNO("Failed to create compressor "
ctx.lookup_table = lookup_table;
ctx.out_fd = out_fd;
ctx.out_ctype = out_ctype;
+ ctx.out_chunk_size = out_chunk_size;
+ ctx.comp_ctx = comp_ctx;
ctx.res_to_compress_queue = &res_to_compress_queue;
ctx.compressed_res_queue = &compressed_res_queue;
ctx.num_messages = queue_size;
}
}
FREE(compressor_threads);
+out_free_params:
+ for (unsigned i = 0; i < num_threads; i++)
+ wimlib_lzx_free_context(params[i].comp_ctx);
+ FREE(params);
out_destroy_compressed_res_queue:
shared_queue_destroy(&compressed_res_queue);
out_destroy_res_to_compress_queue:
lookup_table,
out_fd,
out_ctype,
+ out_chunk_size,
+ comp_ctx,
write_resource_flags,
progress_data);
static int
write_stream_list(struct list_head *stream_list,
struct wim_lookup_table *lookup_table,
- struct filedes *out_fd, int out_ctype, int write_flags,
+ struct filedes *out_fd, int out_ctype,
+ u32 out_chunk_size,
+ struct wimlib_lzx_context **comp_ctx,
+ int write_flags,
unsigned num_threads, wimlib_progress_func_t progress_func)
{
struct wim_lookup_table_entry *lte;
list_for_each_entry(lte, stream_list, write_streams_list) {
num_streams++;
total_bytes += wim_resource_size(lte);
- if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
- && (wim_resource_compression_type(lte) != out_ctype ||
- (write_resource_flags & WIMLIB_WRITE_RESOURCE_FLAG_RECOMPRESS)))
- {
+ if (must_compress_stream(lte, write_resource_flags,
+ out_ctype, out_chunk_size))
total_compression_bytes += wim_resource_size(lte);
- }
if (lte->resource_location == RESOURCE_IN_WIM) {
if (prev_wim_part != lte->wim) {
prev_wim_part = lte->wim;
lookup_table,
out_fd,
out_ctype,
+ out_chunk_size,
+ comp_ctx,
write_resource_flags,
&progress_data,
num_threads);
lookup_table,
out_fd,
out_ctype,
+ out_chunk_size,
+ comp_ctx,
write_resource_flags,
&progress_data);
if (ret == 0)
return write_stream_list(stream_list,
wim->lookup_table,
&wim->out_fd,
- wim->compression_type,
+ wim->out_compression_type,
+ wim->out_chunk_size,
+ &wim->lzx_context,
write_flags,
num_threads,
progress_func);
"metadata resource.", i);
ret = write_wim_resource(imd->metadata_lte,
&wim->out_fd,
- wim->compression_type,
+ wim->out_compression_type,
+ wim->out_chunk_size,
&imd->metadata_lte->output_resource_entry,
- write_resource_flags);
+ write_resource_flags,
+ &wim->lzx_context);
}
if (ret)
return ret;
{
int ret = 0;
- if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR))
+ if (!(write_flags & WIMLIB_WRITE_FLAG_FILE_DESCRIPTOR)) {
+ DEBUG("Closing WIM file.");
if (filedes_valid(&wim->out_fd))
if (filedes_close(&wim->out_fd))
ret = WIMLIB_ERR_WRITE;
+ }
filedes_invalidate(&wim->out_fd);
return ret;
}
off_t new_lookup_table_end;
u64 xml_totalbytes;
+ DEBUG("image=%d, write_flags=%08x", image, write_flags);
+
write_resource_flags = write_flags_to_resource_flags(write_flags);
/* In the WIM header, there is room for the resource entry for a
hdr_offset = 0;
if (write_flags & WIMLIB_WRITE_FLAG_HEADER_AT_END)
hdr_offset = wim->out_fd.offset;
+ DEBUG("Writing new header @ %"PRIu64".", hdr_offset);
ret = write_wim_header_at_offset(&wim->hdr, &wim->out_fd, hdr_offset);
if (ret)
return ret;
* operation has been written to disk, but the new file data has not.
*/
if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) {
+ DEBUG("Syncing WIM file.");
if (fsync(wim->out_fd.fd)) {
ERROR_WITH_ERRNO("Error syncing data to WIM file");
return WIMLIB_ERR_WRITE;
wim->hdr.part_number = part_number;
wim->hdr.total_parts = total_parts;
+ /* Set compression type if different. */
+ if (wim->compression_type != wim->out_compression_type)
+ wim->hdr.flags = get_wim_hdr_cflags(wim->out_compression_type);
+
+ /* Set chunk size if different. */
+ wim->hdr.chunk_size = wim->out_chunk_size;
+
/* Use GUID if specified; otherwise generate a new one. */
if (guid)
memcpy(wim->hdr.guid, guid, WIMLIB_GUID_LEN);
out_restore_hdr:
memcpy(&wim->hdr, &hdr_save, sizeof(struct wim_header));
(void)close_wim_writable(wim, write_flags);
+ DEBUG("ret=%d", ret);
return ret;
}
wim->lookup_table,
&wim->out_fd,
wim->compression_type,
+ wim->chunk_size,
+ &wim->lzx_context,
write_flags,
num_threads,
progress_func);
ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,
write_flags | WIMLIB_WRITE_FLAG_FSYNC,
num_threads, progress_func);
- if (ret)
- goto out_unlink;
+ if (ret) {
+ tunlink(tmpfile);
+ return ret;
+ }
close_wim(wim);
+ /* Rename the new WIM file to the original WIM file. Note: on Windows
+ * this actually calls win32_rename_replacement(), not _wrename(), so
+ * that removing the existing destination file can be handled. */
DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, wim->filename);
- /* Rename the new file to the old file .*/
- if (trename(tmpfile, wim->filename) != 0) {
+ ret = trename(tmpfile, wim->filename);
+ if (ret) {
ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'",
tmpfile, wim->filename);
- ret = WIMLIB_ERR_RENAME;
- goto out_unlink;
+ #ifdef __WIN32__
+ if (ret < 0)
+ #endif
+ {
+ tunlink(tmpfile);
+ }
+ return WIMLIB_ERR_RENAME;
}
if (progress_func) {
progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress);
}
return 0;
-
-out_unlink:
- /* Remove temporary file. */
- tunlink(tmpfile);
- return ret;
}
/* API function documented in wimlib.h */
if ((!wim->deletion_occurred || (write_flags & WIMLIB_WRITE_FLAG_SOFT_DELETE))
&& !(write_flags & (WIMLIB_WRITE_FLAG_REBUILD |
WIMLIB_WRITE_FLAG_PIPABLE))
- && !(wim_is_pipable(wim)))
+ && !(wim_is_pipable(wim))
+ && wim->compression_type == wim->out_compression_type
+ && wim->chunk_size == wim->out_chunk_size)
{
ret = overwrite_wim_inplace(wim, write_flags, num_threads,
progress_func);