X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Fwrite.c;h=8b09449036b2ac52ad016278fe6f0f4180a91700;hp=43e81b677ea57848759cbd2a86b11127f69e98dd;hb=16aab5992bfe8c33e2815b4ce5c9ebd7ec130c21;hpb=34a91e36924e10b924117d91acd116ade58df0b4 diff --git a/src/write.c b/src/write.c index 43e81b67..8b094490 100644 --- a/src/write.c +++ b/src/write.c @@ -65,10 +65,6 @@ #include -#if defined(__WIN32__) && !defined(INVALID_HANDLE_VALUE) -# define INVALID_HANDLE_VALUE ((HANDLE)(-1)) -#endif - /* Chunk table that's located at the beginning of each compressed resource in * the WIM. (This is not the on-disk format; the on-disk format just has an * array of offsets.) */ @@ -97,13 +93,13 @@ begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte, u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE; size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64); struct chunk_table *chunk_tab = CALLOC(1, alloc_size); - int ret; + + DEBUG("Begin chunk table for stream with size %"PRIu64, size); if (!chunk_tab) { ERROR("Failed to allocate chunk table for %"PRIu64" byte " "resource", size); - ret = WIMLIB_ERR_NOMEM; - goto out; + return WIMLIB_ERR_NOMEM; } chunk_tab->file_offset = file_offset; chunk_tab->num_chunks = num_chunks; @@ -119,14 +115,10 @@ begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte, ERROR_WITH_ERRNO("Failed to write chunk table in compressed " "file resource"); FREE(chunk_tab); - ret = WIMLIB_ERR_WRITE; - goto out; + return WIMLIB_ERR_WRITE; } - - ret = 0; *chunk_tab_ret = chunk_tab; -out: - return ret; + return 0; } /* @@ -155,7 +147,7 @@ out: typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size, void *out); -compress_func_t +static compress_func_t get_compress_func(int out_ctype) { if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) @@ -178,16 +170,18 @@ get_compress_func(int out_ctype) * Returns 0 on success; nonzero on failure. */ static int -write_wim_resource_chunk(const void *chunk, unsigned chunk_size, - FILE *out_fp, compress_func_t compress, - struct chunk_table *chunk_tab) +write_wim_resource_chunk(const void * restrict chunk, + unsigned chunk_size, + FILE * restrict out_fp, + compress_func_t compress, + struct chunk_table * restrict chunk_tab) { - const u8 *out_chunk; + const void *out_chunk; unsigned out_chunk_size; if (compress) { - u8 *compressed_chunk = alloca(chunk_size); + void *compressed_chunk = alloca(chunk_size); - out_chunk_size = compress(chunk, chunk_size, compressed_chunk); + out_chunk_size = (*compress)(chunk, chunk_size, compressed_chunk); if (out_chunk_size) { /* Write compressed */ out_chunk = compressed_chunk; @@ -218,8 +212,9 @@ write_wim_resource_chunk(const void *chunk, unsigned chunk_size, * @compressed_size_p. */ static int -finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab, - FILE *out_fp, u64 *compressed_size_p) +finish_wim_resource_chunk_tab(struct chunk_table * restrict chunk_tab, + FILE * restrict out_fp, + u64 * restrict compressed_size_p) { size_t bytes_written; if (fseeko(out_fp, chunk_tab->file_offset, SEEK_SET) != 0) { @@ -252,7 +247,23 @@ finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab, } static int -finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte) +fflush_and_ftruncate(FILE *out_fp, off_t offset) +{ + if (fseeko(out_fp, offset, SEEK_SET) || + fflush(out_fp) || + ftruncate(fileno(out_fp), offset)) + { + ERROR_WITH_ERRNO("Failed to flush and/or truncate " + "output WIM file"); + return WIMLIB_ERR_WRITE; + } else { + return 0; + } +} + +static int +finalize_and_check_sha1(SHA_CTX * restrict sha_ctx, + struct wim_lookup_table_entry * restrict lte) { u8 md[SHA1_HASH_SIZE]; sha1_final(md, sha_ctx); @@ -280,25 +291,16 @@ struct write_resource_ctx { }; static int -write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx) +write_resource_cb(const void *restrict chunk, size_t chunk_size, + void *restrict _ctx) { struct write_resource_ctx *ctx = _ctx; if (ctx->doing_sha) sha1_update(&ctx->sha_ctx, chunk, chunk_size); - - if (ctx->compress) { - return write_wim_resource_chunk(chunk, chunk_size, - ctx->out_fp, ctx->compress, - ctx->chunk_tab); - } else { - if (fwrite(chunk, 1, chunk_size, ctx->out_fp) != chunk_size) { - ERROR_WITH_ERRNO("Error writing to output WIM"); - return WIMLIB_ERR_WRITE; - } else { - return 0; - } - } + return write_wim_resource_chunk(chunk, chunk_size, + ctx->out_fp, ctx->compress, + ctx->chunk_tab); } /* @@ -339,12 +341,6 @@ write_wim_resource(struct wim_lookup_table_entry *lte, flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS; - if (wim_resource_size(lte) == 0) { - /* Empty resource; nothing needs to be done, so just return - * success. */ - return 0; - } - /* Get current position in output WIM */ offset = ftello(out_fp); if (offset == -1) { @@ -426,19 +422,12 @@ try_write_again: if (new_size >= wim_resource_size(lte)) { /* Oops! We compressed the resource to larger than the original * size. Write the resource uncompressed instead. */ - if (fseeko(out_fp, offset, SEEK_SET) || - fflush(out_fp) || - ftruncate(fileno(out_fp), - offset + wim_resource_size(lte))) - { - ERROR_WITH_ERRNO("Failed to flush and/or truncate " - "output WIM file"); - ret = WIMLIB_ERR_WRITE; - goto out_free_chunk_tab; - } DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; " "writing uncompressed instead", wim_resource_size(lte), new_size); + ret = fflush_and_ftruncate(out_fp, offset); + if (ret) + goto out_free_chunk_tab; write_ctx.compress = NULL; write_ctx.doing_sha = false; out_ctype = WIMLIB_COMPRESSION_TYPE_NONE; @@ -473,15 +462,30 @@ shared_queue_init(struct shared_queue *q, unsigned size) wimlib_assert(size != 0); q->array = CALLOC(sizeof(q->array[0]), size); if (!q->array) - return WIMLIB_ERR_NOMEM; + goto err; q->filled_slots = 0; q->front = 0; q->back = size - 1; q->size = size; - pthread_mutex_init(&q->lock, NULL); - pthread_cond_init(&q->msg_avail_cond, NULL); - pthread_cond_init(&q->space_avail_cond, NULL); + if (pthread_mutex_init(&q->lock, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize mutex"); + goto err; + } + if (pthread_cond_init(&q->msg_avail_cond, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize condition variable"); + goto err_destroy_lock; + } + if (pthread_cond_init(&q->space_avail_cond, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize condition variable"); + goto err_destroy_msg_avail_cond; + } return 0; +err_destroy_msg_avail_cond: + pthread_cond_destroy(&q->msg_avail_cond); +err_destroy_lock: + pthread_mutex_destroy(&q->lock); +err: + return WIMLIB_ERR_NOMEM; } static void @@ -552,7 +556,6 @@ static void compress_chunks(struct message *msg, compress_func_t compress) { for (unsigned i = 0; i < msg->num_chunks; i++) { - DEBUG2("compress chunk %u of %u", i, msg->num_chunks); unsigned len = compress(msg->uncompressed_chunks[i], msg->uncompressed_chunk_sizes[i], msg->compressed_chunks[i]); @@ -616,16 +619,34 @@ do_write_streams_progress(union wimlib_progress_info *progress, } } +struct serial_write_stream_ctx { + FILE *out_fp; + int out_ctype; + int write_resource_flags; +}; + +static int +serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx) +{ + struct serial_write_stream_ctx *ctx = _ctx; + return write_wim_resource(lte, ctx->out_fp, + ctx->out_ctype, <e->output_resource_entry, + ctx->write_resource_flags); +} + +/* Write a list of streams, taking into account that some streams may be + * duplicates that are checksummed and discarded on the fly, and also delegating + * the actual writing of a stream to a function @write_stream_cb, which is + * passed the context @write_stream_ctx. */ static int do_write_stream_list(struct list_head *stream_list, struct wim_lookup_table *lookup_table, - FILE *out_fp, - int out_ctype, + int (*write_stream_cb)(struct wim_lookup_table_entry *, void *), + void *write_stream_ctx, wimlib_progress_func_t progress_func, - union wimlib_progress_info *progress, - int write_resource_flags) + union wimlib_progress_info *progress) { - int ret; + int ret = 0; struct wim_lookup_table_entry *lte; /* For each stream in @stream_list ... */ @@ -641,11 +662,9 @@ do_write_stream_list(struct list_head *stream_list, struct wim_lookup_table_entry *tmp; u32 orig_refcnt = lte->out_refcnt; - ret = hash_unhashed_stream(lte, - lookup_table, - &tmp); + ret = hash_unhashed_stream(lte, lookup_table, &tmp); if (ret) - return ret; + break; if (tmp != lte) { lte = tmp; /* We found a duplicate stream. */ @@ -655,6 +674,7 @@ do_write_stream_list(struct list_head *stream_list, * just skip to the next stream. */ DEBUG("Discarding duplicate stream of length %"PRIu64, wim_resource_size(lte)); + lte->no_progress = 0; goto skip_to_progress; } } @@ -668,24 +688,60 @@ do_write_stream_list(struct list_head *stream_list, * while in the latter case this is done because we do not have * the SHA1 message digest yet. */ wimlib_assert(lte->out_refcnt != 0); - ret = write_wim_resource(lte, - out_fp, - out_ctype, - <e->output_resource_entry, - write_resource_flags); + lte->deferred = 0; + lte->no_progress = 0; + ret = (*write_stream_cb)(lte, write_stream_ctx); if (ret) - return ret; + break; + /* In parallel mode, some streams are deferred for later, + * serialized processing; ignore them here. */ + if (lte->deferred) + continue; if (lte->unhashed) { list_del(<e->unhashed_list); lookup_table_insert(lookup_table, lte); lte->unhashed = 0; } skip_to_progress: - do_write_streams_progress(progress, - progress_func, - wim_resource_size(lte)); + if (!lte->no_progress) { + do_write_streams_progress(progress, + progress_func, + wim_resource_size(lte)); + } } - return 0; + return ret; +} + +static int +do_write_stream_list_serial(struct list_head *stream_list, + struct wim_lookup_table *lookup_table, + FILE *out_fp, + int out_ctype, + int write_resource_flags, + wimlib_progress_func_t progress_func, + union wimlib_progress_info *progress) +{ + struct serial_write_stream_ctx ctx = { + .out_fp = out_fp, + .out_ctype = out_ctype, + .write_resource_flags = write_resource_flags, + }; + return do_write_stream_list(stream_list, + lookup_table, + serial_write_stream, + &ctx, + progress_func, + progress); +} + +static inline int +write_flags_to_resource_flags(int write_flags) +{ + int resource_flags = 0; + + if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) + resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS; + return resource_flags; } static int @@ -693,22 +749,21 @@ write_stream_list_serial(struct list_head *stream_list, struct wim_lookup_table *lookup_table, FILE *out_fp, int out_ctype, - int write_flags, + int write_resource_flags, wimlib_progress_func_t progress_func, union wimlib_progress_info *progress) { - int write_resource_flags = 0; - if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) - write_resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS; - + DEBUG("Writing stream list (serial version)"); progress->write_streams.num_threads = 1; if (progress_func) progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress); - return do_write_stream_list(stream_list, - lookup_table, - out_fp, - out_ctype, progress_func, - progress, write_resource_flags); + return do_write_stream_list_serial(stream_list, + lookup_table, + out_fp, + out_ctype, + write_resource_flags, + progress_func, + progress); } #ifdef ENABLE_MULTITHREADED_COMPRESSION @@ -719,9 +774,6 @@ write_wim_chunks(struct message *msg, FILE *out_fp, for (unsigned i = 0; i < msg->num_chunks; i++) { unsigned chunk_csize = msg->compressed_chunk_sizes[i]; - DEBUG2("Write wim chunk %u of %u (csize = %u)", - i, msg->num_chunks, chunk_csize); - if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp) != chunk_csize) { @@ -740,26 +792,26 @@ struct main_writer_thread_ctx { struct wim_lookup_table *lookup_table; FILE *out_fp; int out_ctype; + int write_resource_flags; struct shared_queue *res_to_compress_queue; struct shared_queue *compressed_res_queue; size_t num_messages; - int write_flags; wimlib_progress_func_t progress_func; union wimlib_progress_info *progress; struct list_head available_msgs; struct list_head outstanding_streams; struct list_head serial_streams; + size_t num_outstanding_messages; + + SHA_CTX next_sha_ctx; u64 next_chunk; u64 next_num_chunks; + struct wim_lookup_table_entry *next_lte; + struct message *msgs; struct message *next_msg; - size_t next_chunk_in_msg; - struct wim_lookup_table_entry *cur_lte; struct chunk_table *cur_chunk_tab; - struct wim_lookup_table_entry *next_lte; - SHA_CTX sha_ctx; - u8 next_hash[20]; }; static int @@ -814,11 +866,12 @@ allocate_messages(size_t num_messages) static void main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx) { + while (ctx->num_outstanding_messages--) + shared_queue_get(ctx->compressed_res_queue); free_messages(ctx->msgs, ctx->num_messages); FREE(ctx->cur_chunk_tab); } - static int main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx) { @@ -837,17 +890,22 @@ main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx) * chunks sent off for compression. * * The first stream in outstanding_streams is the stream that is - * currently being written (cur_lte). + * currently being written. * * The last stream in outstanding_streams is the stream that is - * currently being read and chunks fed to the compressor threads. */ + * currently being read and having chunks fed to the compressor threads. + * */ INIT_LIST_HEAD(&ctx->outstanding_streams); + ctx->num_outstanding_messages = 0; + + ctx->next_msg = NULL; /* Resources that don't need any chunks compressed are added to this * list and written directly by the main thread. */ INIT_LIST_HEAD(&ctx->serial_streams); - ctx->cur_lte = NULL; + ctx->cur_chunk_tab = NULL; + return 0; } @@ -859,13 +917,18 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) int ret; wimlib_assert(!list_empty(&ctx->outstanding_streams)); + wimlib_assert(ctx->num_outstanding_messages != 0); + + cur_lte = container_of(ctx->outstanding_streams.next, + struct wim_lookup_table_entry, + being_compressed_list); /* Get the next message from the queue and process it. * The message will contain 1 or more data chunks that have been * compressed. */ msg = shared_queue_get(ctx->compressed_res_queue); msg->complete = true; - cur_lte = ctx->cur_lte; + --ctx->num_outstanding_messages; /* Is this the next chunk in the current resource? If it's not * (i.e., an earlier chunk in a same or different resource @@ -874,67 +937,72 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) * * Otherwise, write all the chunks we can. */ while (cur_lte != NULL && - !list_empty(&cur_lte->msg_list) && - (msg = container_of(cur_lte->msg_list.next, - struct message, - list))->complete) + !list_empty(&cur_lte->msg_list) + && (msg = container_of(cur_lte->msg_list.next, + struct message, + list))->complete) { + list_move(&msg->list, &ctx->available_msgs); if (msg->begin_chunk == 0) { - /* This is the first set of chunks. Leave space * for the chunk table in the output file. */ off_t cur_offset = ftello(ctx->out_fp); - if (cur_offset == -1) { - ret = WIMLIB_ERR_WRITE; - goto out; - } + if (cur_offset == -1) + return WIMLIB_ERR_WRITE; ret = begin_wim_resource_chunk_tab(cur_lte, ctx->out_fp, cur_offset, &ctx->cur_chunk_tab); if (ret) - goto out; + return ret; } /* Write the compressed chunks from the message. */ ret = write_wim_chunks(msg, ctx->out_fp, ctx->cur_chunk_tab); if (ret) - goto out; - - list_del(&msg->list); - - /* This message is available to use for different chunks - * now. */ - list_add(&msg->list, &ctx->available_msgs); + return ret; /* Was this the last chunk of the stream? If so, finish * it. */ if (list_empty(&cur_lte->msg_list) && msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks) { - DEBUG2("Finish wim chunk tab"); u64 res_csize; + off_t offset; + ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab, ctx->out_fp, &res_csize); if (ret) - goto out; + return ret; + + list_del(&cur_lte->being_compressed_list); + + /* Grab the offset of this stream in the output file + * from the chunk table before we free it. */ + offset = ctx->cur_chunk_tab->file_offset; + + FREE(ctx->cur_chunk_tab); + ctx->cur_chunk_tab = NULL; -#if 0 if (res_csize >= wim_resource_size(cur_lte)) { /* Oops! We compressed the resource to * larger than the original size. Write * the resource uncompressed instead. */ - ret = write_uncompressed_resource_and_truncate( - cur_lte, - ctx->out_fp, - ctx->cur_chunk_tab->file_offset, - &cur_lte->output_resource_entry); + DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; " + "writing uncompressed instead", + wim_resource_size(cur_lte), res_csize); + ret = fflush_and_ftruncate(ctx->out_fp, offset); if (ret) - goto out; - } else -#endif - { + return ret; + ret = write_wim_resource(cur_lte, + ctx->out_fp, + WIMLIB_COMPRESSION_TYPE_NONE, + &cur_lte->output_resource_entry, + ctx->write_resource_flags); + if (ret) + return ret; + } else { cur_lte->output_resource_entry.size = res_csize; @@ -942,26 +1010,16 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) cur_lte->resource_entry.original_size; cur_lte->output_resource_entry.offset = - ctx->cur_chunk_tab->file_offset; + offset; cur_lte->output_resource_entry.flags = cur_lte->resource_entry.flags | WIM_RESHDR_FLAG_COMPRESSED; } - do_write_streams_progress(ctx->progress, ctx->progress_func, - wim_resource_size(cur_lte)); - FREE(ctx->cur_chunk_tab); - ctx->cur_chunk_tab = NULL; - struct list_head *next = cur_lte->write_streams_list.next; - list_del(&cur_lte->write_streams_list); - - if (next == &ctx->outstanding_streams) - cur_lte = NULL; - else - cur_lte = container_of(cur_lte->write_streams_list.next, - struct wim_lookup_table_entry, - write_streams_list); + do_write_streams_progress(ctx->progress, + ctx->progress_func, + wim_resource_size(cur_lte)); /* Since we just finished writing a stream, write any * streams that have been added to the serial_streams @@ -969,162 +1027,163 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) * resources that don't need to be compressed because * the desired compression type is the same as the * previous compression type). */ - ret = do_write_stream_list(&ctx->serial_streams, - ctx->lookup_table, - ctx->out_fp, - ctx->out_ctype, - ctx->progress_func, - ctx->progress, - 0); - if (ret) - goto out; + if (!list_empty(&ctx->serial_streams)) { + ret = do_write_stream_list_serial(&ctx->serial_streams, + ctx->lookup_table, + ctx->out_fp, + ctx->out_ctype, + ctx->write_resource_flags, + ctx->progress_func, + ctx->progress); + if (ret) + return ret; + } + + /* Advance to the next stream to write. */ + if (list_empty(&ctx->outstanding_streams)) { + cur_lte = NULL; + } else { + cur_lte = container_of(ctx->outstanding_streams.next, + struct wim_lookup_table_entry, + being_compressed_list); + } } } -out: - ctx->cur_lte = cur_lte; - return ret; + return 0; } +/* Called when the main thread has read a new chunk of data. */ static int main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx) { struct main_writer_thread_ctx *ctx = _ctx; int ret; struct message *next_msg; + u64 next_chunk_in_msg; - next_msg = ctx->next_msg; - - sha1_update(&ctx->sha_ctx, chunk, chunk_size); + /* Update SHA1 message digest for the stream currently being read by the + * main thread. */ + sha1_update(&ctx->next_sha_ctx, chunk, chunk_size); + /* We send chunks of data to the compressor chunks in batches which we + * refer to as "messages". @next_msg is the message that is currently + * being prepared to send off. If it is NULL, that indicates that we + * need to start a new message. */ + next_msg = ctx->next_msg; if (!next_msg) { - if (list_empty(&ctx->available_msgs)) { + /* We need to start a new message. First check to see if there + * is a message available in the list of available messages. If + * so, we can just take one. If not, all the messages (there is + * a fixed number of them, proportional to the number of + * threads) have been sent off to the compressor threads, so we + * receive messages from the compressor threads containing + * compressed chunks of data. + * + * We may need to receive multiple messages before one is + * actually available to use because messages received that are + * *not* for the very next set of chunks to compress must be + * buffered until it's time to write those chunks. */ + while (list_empty(&ctx->available_msgs)) { ret = receive_compressed_chunks(ctx); if (ret) return ret; } - wimlib_assert(!list_empty(&ctx->available_msgs)); - next_msg = container_of(ctx->available_msgs.next, - struct message, - list); + struct message, list); list_del(&next_msg->list); next_msg->complete = false; next_msg->begin_chunk = ctx->next_chunk; next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG, ctx->next_num_chunks - ctx->next_chunk); - ctx->next_chunk_in_msg = 0; + ctx->next_msg = next_msg; } - wimlib_assert(next_msg != NULL); - wimlib_assert(ctx->next_chunk_in_msg < next_msg->num_chunks); + /* Fill in the next chunk to compress */ + next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk; - next_msg->uncompressed_chunk_sizes[ctx->next_chunk_in_msg] = chunk_size; - memcpy(next_msg->uncompressed_chunks[ctx->next_chunk_in_msg], + next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size; + memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg], chunk, chunk_size); - - if (++ctx->next_chunk_in_msg == next_msg->num_chunks) { - shared_queue_put(ctx->res_to_compress_queue, - next_msg); + ctx->next_chunk++; + if (++next_chunk_in_msg == next_msg->num_chunks) { + /* Send off an array of chunks to compress */ + list_add_tail(&next_msg->list, &ctx->next_lte->msg_list); + shared_queue_put(ctx->res_to_compress_queue, next_msg); + ++ctx->num_outstanding_messages; ctx->next_msg = NULL; } return 0; } +static int +main_writer_thread_finish(void *_ctx) +{ + struct main_writer_thread_ctx *ctx = _ctx; + int ret; + while (ctx->num_outstanding_messages != 0) { + ret = receive_compressed_chunks(ctx); + if (ret) + return ret; + } + wimlib_assert(list_empty(&ctx->outstanding_streams)); + return do_write_stream_list_serial(&ctx->serial_streams, + ctx->lookup_table, + ctx->out_fp, + ctx->out_ctype, + ctx->write_resource_flags, + ctx->progress_func, + ctx->progress); +} + static int submit_stream_for_compression(struct wim_lookup_table_entry *lte, struct main_writer_thread_ctx *ctx) { int ret; - sha1_init(&ctx->sha_ctx); + /* Read the entire stream @lte, feeding its data chunks to the + * compressor threads. Also SHA1-sum the stream; this is required in + * the case that @lte is unhashed, and a nice additional verification + * when @lte is already hashed. */ + sha1_init(&ctx->next_sha_ctx); + ctx->next_chunk = 0; ctx->next_num_chunks = wim_resource_chunks(lte); + ctx->next_lte = lte; + INIT_LIST_HEAD(<e->msg_list); + list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams); ret = read_resource_prefix(lte, wim_resource_size(lte), main_writer_thread_cb, ctx, 0); - if (ret) - return ret; - ret = finalize_and_check_sha1(&ctx->sha_ctx, lte); - if (ret) - return ret; + if (ret == 0) { + wimlib_assert(ctx->next_chunk == ctx->next_num_chunks); + ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte); + } + return ret; } -/* - * This function is executed by the main thread when the resources are being - * compressed in parallel. The main thread is in change of all reading of the - * uncompressed data and writing of the compressed data. The compressor threads - * *only* do compression from/to in-memory buffers. - * - * Each unit of work given to a compressor thread is up to MAX_CHUNKS_PER_MSG - * chunks of compressed data to compress, represented in a `struct message'. - * Each message is passed from the main thread to a worker thread through the - * res_to_compress_queue, and it is passed back through the - * compressed_res_queue. - */ static int -main_writer_thread_proc(struct main_writer_thread_ctx *ctx) +main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx) { + struct main_writer_thread_ctx *ctx = _ctx; int ret; - struct list_head *stream_list; - struct wim_lookup_table_entry *lte; - - ret = main_writer_thread_init_ctx(ctx); - if (ret) - goto out_destroy_ctx; - - stream_list = ctx->stream_list; - while (!list_empty(stream_list)) { - lte = container_of(stream_list->next, - struct wim_lookup_table_entry, - write_streams_list); - list_del(<e->write_streams_list); - if (lte->unhashed && !lte->unique_size) { - struct wim_lookup_table_entry *tmp; - u32 orig_refcnt = lte->out_refcnt; - ret = hash_unhashed_stream(lte, ctx->lookup_table, &tmp); - if (ret) - goto out_destroy_ctx; - if (tmp != lte) { - lte = tmp; - if (orig_refcnt != tmp->out_refcnt) { - DEBUG("Discarding duplicate stream of length %"PRIu64, - wim_resource_size(lte)); - goto skip_to_progress; - } - } - } - - if (wim_resource_size(lte) < 1000 || - ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE || - (lte->resource_location == RESOURCE_IN_WIM && - wimlib_get_compression_type(lte->wim) == ctx->out_ctype)) - { - list_add(<e->write_streams_list, - &ctx->serial_streams); - } else { - ret = submit_stream_for_compression(lte, ctx); - if (ret) - goto out_destroy_ctx; - if (lte->unhashed) { - list_del(<e->unhashed_list); - lookup_table_insert(ctx->lookup_table, lte); - lte->unhashed = 0; - } - } - skip_to_progress: - do_write_streams_progress(ctx->progress, - ctx->progress_func, - wim_resource_size(lte)); - } - - while (!list_empty(&ctx->outstanding_streams)) { - ret = receive_compressed_chunks(ctx); - if (ret) - goto out_destroy_ctx; + if (wim_resource_size(lte) < 1000 || + ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE || + (lte->resource_location == RESOURCE_IN_WIM && + !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) && + wimlib_get_compression_type(lte->wim) == ctx->out_ctype)) + { + /* Stream is too small or isn't being compressed. Process it by + * the main thread when we have a chance. We can't necessarily + * process it right here, as the main thread could be in the + * middle of writing a different stream. */ + list_add_tail(<e->write_streams_list, &ctx->serial_streams); + lte->deferred = 1; + ret = 0; + } else { + ret = submit_stream_for_compression(lte, ctx); } - ret = 0; -out_destroy_ctx: - main_writer_thread_destroy_ctx(ctx); + lte->no_progress = 1; return ret; } @@ -1138,15 +1197,41 @@ get_default_num_threads() #endif } +/* Equivalent to write_stream_list_serial(), except this takes a @num_threads + * parameter and will perform compression using that many threads. Falls + * back to write_stream_list_serial() on certain errors, such as a failure to + * create the number of threads requested. + * + * High level description of the algorithm for writing compressed streams in + * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes + * rather than on full files. The currently executing thread becomes the main + * thread and is entirely in charge of reading the data to compress (which may + * be in any location understood by the resource code--- such as in an external + * file being captured, or in another WIM file from which an image is being + * exported) and actually writing the compressed data to the output file. + * Additional threads are "compressor threads" and all execute the + * compressor_thread_proc, where they repeatedly retrieve buffers of data from + * the main thread, compress them, and hand them back to the main thread. + * + * Certain streams, such as streams that do not need to be compressed (e.g. + * input compression type same as output compression type) or streams of very + * small size are placed in a list (main_writer_thread_ctx.serial_list) and + * handled entirely by the main thread at an appropriate time. + * + * At any given point in time, multiple streams may be having chunks compressed + * concurrently. The stream that the main thread is currently *reading* may be + * later in the list that the stream that the main thread is currently + * *writing*. + */ static int write_stream_list_parallel(struct list_head *stream_list, struct wim_lookup_table *lookup_table, FILE *out_fp, int out_ctype, - int write_flags, - unsigned num_threads, + int write_resource_flags, wimlib_progress_func_t progress_func, - union wimlib_progress_info *progress) + union wimlib_progress_info *progress, + unsigned num_threads) { int ret; struct shared_queue res_to_compress_queue; @@ -1158,24 +1243,29 @@ write_stream_list_parallel(struct list_head *stream_list, if (nthreads < 1 || nthreads > UINT_MAX) { WARNING("Could not determine number of processors! Assuming 1"); goto out_serial; + } else if (nthreads == 1) { + goto out_serial_quiet; } else { num_threads = nthreads; } } + DEBUG("Writing stream list (parallel version, num_threads=%u)", + num_threads); + progress->write_streams.num_threads = num_threads; - static const double MESSAGES_PER_THREAD = 2.0; + static const size_t MESSAGES_PER_THREAD = 2; size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD); DEBUG("Initializing shared queues (queue_size=%zu)", queue_size); ret = shared_queue_init(&res_to_compress_queue, queue_size); - if (ret != 0) + if (ret) goto out_serial; ret = shared_queue_init(&compressed_res_queue, queue_size); - if (ret != 0) + if (ret) goto out_destroy_res_to_compress_queue; struct compressor_thread_params params; @@ -1190,13 +1280,14 @@ write_stream_list_parallel(struct list_head *stream_list, } for (unsigned i = 0; i < num_threads; i++) { - DEBUG("pthread_create thread %u", i); + DEBUG("pthread_create thread %u of %u", i + 1, num_threads); ret = pthread_create(&compressor_threads[i], NULL, compressor_thread_proc, ¶ms); if (ret != 0) { ret = -1; ERROR_WITH_ERRNO("Failed to create compressor " - "thread %u", i); + "thread %u of %u", + i + 1, num_threads); num_threads = i; goto out_join; } @@ -1206,7 +1297,6 @@ write_stream_list_parallel(struct list_head *stream_list, progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress); struct main_writer_thread_ctx ctx; - memset(&ctx, 0, sizeof(ctx)); ctx.stream_list = stream_list; ctx.lookup_table = lookup_table; ctx.out_fp = out_fp; @@ -1214,10 +1304,27 @@ write_stream_list_parallel(struct list_head *stream_list, ctx.res_to_compress_queue = &res_to_compress_queue; ctx.compressed_res_queue = &compressed_res_queue; ctx.num_messages = queue_size; - ctx.write_flags = write_flags; + ctx.write_resource_flags = write_resource_flags | WIMLIB_RESOURCE_FLAG_THREADSAFE_READ; ctx.progress_func = progress_func; ctx.progress = progress; - ret = main_writer_thread_proc(&ctx); + ret = main_writer_thread_init_ctx(&ctx); + if (ret) + goto out_join; + ret = do_write_stream_list(stream_list, lookup_table, + main_thread_process_next_stream, + &ctx, progress_func, progress); + if (ret) + goto out_destroy_ctx; + + /* The main thread has finished reading all streams that are going to be + * compressed in parallel, and it now needs to wait for all remaining + * chunks to be compressed so that the remaining streams can actually be + * written to the output file. Furthermore, any remaining streams that + * had processing deferred to the main thread need to be handled. These + * tasks are done by the main_writer_thread_finish() function. */ + ret = main_writer_thread_finish(&ctx); +out_destroy_ctx: + main_writer_thread_destroy_ctx(&ctx); out_join: for (unsigned i = 0; i < num_threads; i++) shared_queue_put(&res_to_compress_queue, NULL); @@ -1225,7 +1332,8 @@ out_join: for (unsigned i = 0; i < num_threads; i++) { if (pthread_join(compressor_threads[i], NULL)) { WARNING_WITH_ERRNO("Failed to join compressor " - "thread %u", i); + "thread %u of %u", + i + 1, num_threads); } } FREE(compressor_threads); @@ -1237,11 +1345,12 @@ out_destroy_res_to_compress_queue: return ret; out_serial: WARNING("Falling back to single-threaded compression"); +out_serial_quiet: return write_stream_list_serial(stream_list, lookup_table, out_fp, out_ctype, - write_flags, + write_resource_flags, progress_func, progress); @@ -1264,16 +1373,23 @@ write_stream_list(struct list_head *stream_list, u64 total_compression_bytes = 0; union wimlib_progress_info progress; int ret; + int write_resource_flags; if (list_empty(stream_list)) return 0; + write_resource_flags = write_flags_to_resource_flags(write_flags); + + /* Calculate the total size of the streams to be written. Note: this + * will be the uncompressed size, as we may not know the compressed size + * yet, and also this will assume that every unhashed stream will be + * written (which will not necessarily be the case). */ list_for_each_entry(lte, stream_list, write_streams_list) { num_streams++; total_bytes += wim_resource_size(lte); if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE && (wim_resource_compression_type(lte) != out_ctype || - (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS))) + (write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS))) { total_compression_bytes += wim_resource_size(lte); } @@ -1292,17 +1408,17 @@ write_stream_list(struct list_head *stream_list, lookup_table, out_fp, out_ctype, - write_flags, - num_threads, + write_resource_flags, progress_func, - &progress); + &progress, + num_threads); else #endif ret = write_stream_list_serial(stream_list, lookup_table, out_fp, out_ctype, - write_flags, + write_resource_flags, progress_func, &progress); return ret; @@ -1336,15 +1452,15 @@ stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab) { struct stream_size_table *tab = _tab; size_t pos; - struct wim_lookup_table_entry *hashed_lte; + struct wim_lookup_table_entry *same_size_lte; struct hlist_node *tmp; pos = hash_u64(wim_resource_size(lte)) % tab->capacity; lte->unique_size = 1; - hlist_for_each_entry(hashed_lte, tmp, &tab->array[pos], hash_list_2) { - if (wim_resource_size(hashed_lte) == wim_resource_size(lte)) { + hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) { + if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) { lte->unique_size = 0; - hashed_lte->unique_size = 0; + same_size_lte->unique_size = 0; break; } } @@ -1362,14 +1478,34 @@ struct lte_overwrite_prepare_args { struct stream_size_table stream_size_tab; }; +/* First phase of preparing streams for an in-place overwrite. This is called + * on all streams, both hashed and unhashed, except the metadata resources. */ static int -lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *arg) +lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args) { - struct lte_overwrite_prepare_args *args = arg; + struct lte_overwrite_prepare_args *args = _args; - if (lte->resource_location == RESOURCE_IN_WIM && - lte->wim == args->wim) - { + wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA)); + if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim) + list_add_tail(<e->write_streams_list, &args->stream_list); + lte->out_refcnt = lte->refcnt; + stream_size_table_insert(lte, &args->stream_size_tab); + return 0; +} + +/* Second phase of preparing streams for an in-place overwrite. This is called + * on existing metadata resources and hashed streams, but not unhashed streams. + * + * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so + * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as + * the latter uses lte->hash_list_2, while the former expects to set + * lte->output_resource_entry. */ +static int +lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args) +{ + struct lte_overwrite_prepare_args *args = _args; + + if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) { /* We can't do an in place overwrite on the WIM if there are * streams after the XML data. */ if (lte->resource_entry.offset + @@ -1381,19 +1517,6 @@ lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *arg) #endif return WIMLIB_ERR_RESOURCE_ORDER; } - } else { - wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA)); - list_add_tail(<e->write_streams_list, &args->stream_list); - } - lte->out_refcnt = lte->refcnt; - stream_size_table_insert(lte, &args->stream_size_tab); - return 0; -} - -static int -lte_set_output_res_entry(struct wim_lookup_table_entry *lte, void *_wim) -{ - if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == _wim) { copy_resource_entry(<e->output_resource_entry, <e->resource_entry); } @@ -1419,6 +1542,7 @@ prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset, { int ret; struct lte_overwrite_prepare_args args; + unsigned i; args.wim = wim; args.end_offset = end_offset; @@ -1428,28 +1552,25 @@ prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset, return ret; INIT_LIST_HEAD(&args.stream_list); - for (int i = 0; i < wim->hdr.image_count; i++) { + for (i = 0; i < wim->hdr.image_count; i++) { struct wim_image_metadata *imd; struct wim_lookup_table_entry *lte; imd = wim->image_metadata[i]; - image_for_each_unhashed_stream(lte, imd) { - ret = lte_overwrite_prepare(lte, &args); - if (ret) - goto out_destroy_stream_size_table; - } + image_for_each_unhashed_stream(lte, imd) + lte_overwrite_prepare(lte, &args); + } + for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args); + list_transfer(&args.stream_list, stream_list); + + for (i = 0; i < wim->hdr.image_count; i++) { + ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte, + &args); + if (ret) + goto out_destroy_stream_size_table; } ret = for_lookup_table_entry(wim->lookup_table, - lte_overwrite_prepare, &args); - if (ret) - goto out_destroy_stream_size_table; - - for (int i = 0; i < wim->hdr.image_count; i++) - lte_set_output_res_entry(wim->image_metadata[i]->metadata_lte, - wim); - for_lookup_table_entry(wim->lookup_table, lte_set_output_res_entry, wim); - INIT_LIST_HEAD(stream_list); - list_splice(&args.stream_list, stream_list); + lte_overwrite_prepare_2, &args); out_destroy_stream_size_table: destroy_stream_size_table(&args.stream_size_tab); return ret; @@ -1484,8 +1605,8 @@ inode_find_streams_to_write(struct wim_inode *inode, static int image_find_streams_to_write(WIMStruct *w) { - struct wim_image_metadata *imd; struct find_streams_ctx *ctx; + struct wim_image_metadata *imd; struct wim_inode *inode; struct wim_lookup_table_entry *lte; @@ -1535,10 +1656,8 @@ prepare_stream_list(WIMStruct *wim, int image, struct list_head *stream_list) wim->private = &ctx; ret = for_image(wim, image, image_find_streams_to_write); destroy_stream_size_table(&ctx.stream_size_tab); - if (ret == 0) { - INIT_LIST_HEAD(stream_list); - list_splice(&ctx.stream_list, stream_list); - } + if (ret == 0) + list_transfer(&ctx.stream_list, stream_list); return ret; } @@ -1622,13 +1741,11 @@ finish_write(WIMStruct *w, int image, int write_flags, * it should be a copy of the resource entry for the image that is * marked as bootable. This is not well documented... */ if (hdr.boot_idx == 0) { - memset(&hdr.boot_metadata_res_entry, 0, - sizeof(struct resource_entry)); + zero_resource_entry(&hdr.boot_metadata_res_entry); } else { - memcpy(&hdr.boot_metadata_res_entry, - &w->image_metadata[ - hdr.boot_idx - 1]->metadata_lte->output_resource_entry, - sizeof(struct resource_entry)); + copy_resource_entry(&hdr.boot_metadata_res_entry, + &w->image_metadata[ hdr.boot_idx- 1 + ]->metadata_lte->output_resource_entry); } if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { @@ -1648,7 +1765,7 @@ finish_write(WIMStruct *w, int image, int write_flags, if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) { struct wim_header checkpoint_hdr; memcpy(&checkpoint_hdr, &hdr, sizeof(struct wim_header)); - memset(&checkpoint_hdr.integrity, 0, sizeof(struct resource_entry)); + zero_resource_entry(&checkpoint_hdr.integrity); if (fseeko(out, 0, SEEK_SET)) { ERROR_WITH_ERRNO("Failed to seek to beginning " "of WIM being written"); @@ -1692,7 +1809,7 @@ finish_write(WIMStruct *w, int image, int write_flags, if (ret) goto out_close_wim; } else { - memset(&hdr.integrity, 0, sizeof(struct resource_entry)); + zero_resource_entry(&hdr.integrity); } if (fseeko(out, 0, SEEK_SET) != 0) { @@ -1923,6 +2040,7 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, int ret; struct list_head stream_list; off_t old_wim_end; + u64 old_lookup_table_end, old_xml_begin, old_xml_end; DEBUG("Overwriting `%"TS"' in-place", w->filename); @@ -1930,33 +2048,45 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, * data, and that there are no stream resources, metadata resources, or * lookup tables after the XML data. Otherwise, these data would be * overwritten. */ - if (w->hdr.integrity.offset != 0 && - w->hdr.integrity.offset < w->hdr.xml_res_entry.offset) { + old_xml_begin = w->hdr.xml_res_entry.offset; + old_xml_end = old_xml_begin + w->hdr.xml_res_entry.size; + old_lookup_table_end = w->hdr.lookup_table_res_entry.offset + + w->hdr.lookup_table_res_entry.size; + if (w->hdr.integrity.offset != 0 && w->hdr.integrity.offset < old_xml_end) { ERROR("Didn't expect the integrity table to be before the XML data"); return WIMLIB_ERR_RESOURCE_ORDER; } - if (w->hdr.lookup_table_res_entry.offset > w->hdr.xml_res_entry.offset) { + if (old_lookup_table_end > old_xml_begin) { ERROR("Didn't expect the lookup table to be after the XML data"); return WIMLIB_ERR_RESOURCE_ORDER; } - - if (w->hdr.integrity.offset) - old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size; - else - old_wim_end = w->hdr.xml_res_entry.offset + w->hdr.xml_res_entry.size; - + /* Set @old_wim_end, which indicates the point beyond which we don't + * allow any file and metadata resources to appear without returning + * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise + * overwrite these resources). */ if (!w->deletion_occurred && !any_images_modified(w)) { /* If no images have been modified and no images have been - * deleted, a new lookup table does not need to be written. */ + * deleted, a new lookup table does not need to be written. We + * shall write the new XML data and optional integrity table + * immediately after the lookup table. Note that this may + * overwrite an existing integrity table. */ DEBUG("Skipping writing lookup table " "(no images modified or deleted)"); - old_wim_end = w->hdr.lookup_table_res_entry.offset + - w->hdr.lookup_table_res_entry.size; + old_wim_end = old_lookup_table_end; write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE | WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML; + } else if (w->hdr.integrity.offset) { + /* Old WIM has an integrity table; begin writing new streams + * after it. */ + old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size; + } else { + /* No existing integrity table; begin writing new streams after + * the old XML data. */ + old_wim_end = old_xml_end; } + ret = prepare_streams_for_overwrite(w, old_wim_end, &stream_list); if (ret) return ret; @@ -1968,15 +2098,13 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, ret = lock_wim(w, w->out_fp); if (ret) { - fclose(w->out_fp); - w->out_fp = NULL; + close_wim_writable(w); return ret; } if (fseeko(w->out_fp, old_wim_end, SEEK_SET) != 0) { ERROR_WITH_ERRNO("Can't seek to end of WIM"); - fclose(w->out_fp); - w->out_fp = NULL; + close_wim_writable(w); w->wim_locked = 0; return WIMLIB_ERR_WRITE; } @@ -1991,20 +2119,20 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, num_threads, progress_func); if (ret) - goto out_ftruncate; + goto out_truncate; for (int i = 0; i < w->hdr.image_count; i++) { if (w->image_metadata[i]->modified) { select_wim_image(w, i + 1); ret = write_metadata_resource(w); if (ret) - goto out_ftruncate; + goto out_truncate; } } write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE; ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags, progress_func); -out_ftruncate: +out_truncate: close_wim_writable(w); if (ret != 0 && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)", @@ -2038,9 +2166,9 @@ overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags, ret = wimlib_write(w, tmpfile, WIMLIB_ALL_IMAGES, write_flags | WIMLIB_WRITE_FLAG_FSYNC, num_threads, progress_func); - if (ret != 0) { + if (ret) { ERROR("Failed to write the WIM file `%"TS"'", tmpfile); - goto err; + goto out_unlink; } DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, w->filename); @@ -2061,7 +2189,7 @@ overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags, ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'", tmpfile, w->filename); ret = WIMLIB_ERR_RENAME; - goto err; + goto out_unlink; } if (progress_func) { @@ -2086,11 +2214,12 @@ overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags, FREE(w->filename); w->filename = NULL; } - return ret; -err: + goto out; +out_unlink: /* Remove temporary file. */ if (tunlink(tmpfile) != 0) WARNING_WITH_ERRNO("Failed to remove `%"TS"'", tmpfile); +out: return ret; }