X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Fwrite.c;h=37e652a21bcb5dfc943f2bfc07aaac4f1e7ae6ae;hp=03b6c1eb910e0f2ff215b0bc0e2479fab746813a;hb=308dcf5163c29a65f9ec48d6abd96bb0f99ff942;hpb=3f9b53a4a214a254bb27ed30994faf2a0fd12375 diff --git a/src/write.c b/src/write.c index 03b6c1eb..37e652a2 100644 --- a/src/write.c +++ b/src/write.c @@ -97,13 +97,11 @@ begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte, u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE; size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64); struct chunk_table *chunk_tab = CALLOC(1, alloc_size); - int ret; if (!chunk_tab) { ERROR("Failed to allocate chunk table for %"PRIu64" byte " "resource", size); - ret = WIMLIB_ERR_NOMEM; - goto out; + return WIMLIB_ERR_NOMEM; } chunk_tab->file_offset = file_offset; chunk_tab->num_chunks = num_chunks; @@ -119,14 +117,10 @@ begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte, ERROR_WITH_ERRNO("Failed to write chunk table in compressed " "file resource"); FREE(chunk_tab); - ret = WIMLIB_ERR_WRITE; - goto out; + return WIMLIB_ERR_WRITE; } - - ret = 0; *chunk_tab_ret = chunk_tab; -out: - return ret; + return 0; } /* @@ -155,7 +149,7 @@ out: typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size, void *out); -compress_func_t +static compress_func_t get_compress_func(int out_ctype) { if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) @@ -339,12 +333,6 @@ write_wim_resource(struct wim_lookup_table_entry *lte, flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS; - if (wim_resource_size(lte) == 0) { - /* Empty resource; nothing needs to be done, so just return - * success. */ - return 0; - } - /* Get current position in output WIM */ offset = ftello(out_fp); if (offset == -1) { @@ -473,15 +461,30 @@ shared_queue_init(struct shared_queue *q, unsigned size) wimlib_assert(size != 0); q->array = CALLOC(sizeof(q->array[0]), size); if (!q->array) - return WIMLIB_ERR_NOMEM; + goto err; q->filled_slots = 0; q->front = 0; q->back = size - 1; q->size = size; - pthread_mutex_init(&q->lock, NULL); - pthread_cond_init(&q->msg_avail_cond, NULL); - pthread_cond_init(&q->space_avail_cond, NULL); + if (pthread_mutex_init(&q->lock, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize mutex"); + goto err; + } + if (pthread_cond_init(&q->msg_avail_cond, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize condition variable"); + goto err_destroy_lock; + } + if (pthread_cond_init(&q->space_avail_cond, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize condition variable"); + goto err_destroy_msg_avail_cond; + } return 0; +err_destroy_msg_avail_cond: + pthread_cond_destroy(&q->msg_avail_cond); +err_destroy_lock: + pthread_mutex_destroy(&q->lock); +err: + return WIMLIB_ERR_NOMEM; } static void @@ -552,7 +555,6 @@ static void compress_chunks(struct message *msg, compress_func_t compress) { for (unsigned i = 0; i < msg->num_chunks; i++) { - DEBUG2("compress chunk %u of %u", i, msg->num_chunks); unsigned len = compress(msg->uncompressed_chunks[i], msg->uncompressed_chunk_sizes[i], msg->compressed_chunks[i]); @@ -616,12 +618,30 @@ do_write_streams_progress(union wimlib_progress_info *progress, } } +struct serial_write_stream_ctx { + FILE *out_fp; + int out_ctype; + int write_resource_flags; +}; + +static int +serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx) +{ + struct serial_write_stream_ctx *ctx = _ctx; + return write_wim_resource(lte, ctx->out_fp, + ctx->out_ctype, <e->output_resource_entry, + ctx->write_resource_flags); +} + +/* Write a list of streams, taking into account that some streams may be + * duplicates that are checksummed and discarded on the fly, and also delegating + * the actual writing of a stream to a function @write_stream_cb, which is + * passed the context @write_stream_ctx. */ static int do_write_stream_list(struct list_head *stream_list, struct wim_lookup_table *lookup_table, - FILE *out_fp, - int out_ctype, - int write_resource_flags, + int (*write_stream_cb)(struct wim_lookup_table_entry *, void *), + void *write_stream_ctx, wimlib_progress_func_t progress_func, union wimlib_progress_info *progress) { @@ -641,9 +661,7 @@ do_write_stream_list(struct list_head *stream_list, struct wim_lookup_table_entry *tmp; u32 orig_refcnt = lte->out_refcnt; - ret = hash_unhashed_stream(lte, - lookup_table, - &tmp); + ret = hash_unhashed_stream(lte, lookup_table, &tmp); if (ret) break; if (tmp != lte) { @@ -668,49 +686,77 @@ do_write_stream_list(struct list_head *stream_list, * while in the latter case this is done because we do not have * the SHA1 message digest yet. */ wimlib_assert(lte->out_refcnt != 0); - ret = write_wim_resource(lte, - out_fp, - out_ctype, - <e->output_resource_entry, - write_resource_flags); + lte->deferred = 0; + ret = (*write_stream_cb)(lte, write_stream_ctx); if (ret) break; + /* In parallel mode, some streams are deferred for later, + * serialized processing; ignore them here. */ + if (lte->deferred) + continue; if (lte->unhashed) { list_del(<e->unhashed_list); lookup_table_insert(lookup_table, lte); lte->unhashed = 0; } skip_to_progress: - do_write_streams_progress(progress, - progress_func, - wim_resource_size(lte)); + if (progress_func) { + do_write_streams_progress(progress, + progress_func, + wim_resource_size(lte)); + } } return ret; } +static int +do_write_stream_list_serial(struct list_head *stream_list, + struct wim_lookup_table *lookup_table, + FILE *out_fp, + int out_ctype, + int write_resource_flags, + wimlib_progress_func_t progress_func, + union wimlib_progress_info *progress) +{ + struct serial_write_stream_ctx ctx = { + .out_fp = out_fp, + .out_ctype = out_ctype, + .write_resource_flags = write_resource_flags, + }; + return do_write_stream_list(stream_list, + lookup_table, + serial_write_stream, + &ctx, + progress_func, + progress); +} + +static inline int +write_flags_to_resource_flags(int write_flags) +{ + return (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) ? + WIMLIB_RESOURCE_FLAG_RECOMPRESS : 0; +} + static int write_stream_list_serial(struct list_head *stream_list, struct wim_lookup_table *lookup_table, FILE *out_fp, int out_ctype, - int write_flags, + int write_resource_flags, wimlib_progress_func_t progress_func, union wimlib_progress_info *progress) { - int write_resource_flags = 0; - if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) - write_resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS; - progress->write_streams.num_threads = 1; if (progress_func) progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress); - return do_write_stream_list(stream_list, - lookup_table, - out_fp, - out_ctype, - write_resource_flags, - progress_func, - progress); + return do_write_stream_list_serial(stream_list, + lookup_table, + out_fp, + out_ctype, + write_resource_flags, + progress_func, + progress); } #ifdef ENABLE_MULTITHREADED_COMPRESSION @@ -721,9 +767,6 @@ write_wim_chunks(struct message *msg, FILE *out_fp, for (unsigned i = 0; i < msg->num_chunks; i++) { unsigned chunk_csize = msg->compressed_chunk_sizes[i]; - DEBUG2("Write wim chunk %u of %u (csize = %u)", - i, msg->num_chunks, chunk_csize); - if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp) != chunk_csize) { @@ -742,26 +785,26 @@ struct main_writer_thread_ctx { struct wim_lookup_table *lookup_table; FILE *out_fp; int out_ctype; + int write_resource_flags; struct shared_queue *res_to_compress_queue; struct shared_queue *compressed_res_queue; size_t num_messages; - int write_flags; wimlib_progress_func_t progress_func; union wimlib_progress_info *progress; struct list_head available_msgs; struct list_head outstanding_streams; struct list_head serial_streams; + size_t num_outstanding_messages; + + SHA_CTX next_sha_ctx; u64 next_chunk; u64 next_num_chunks; + struct wim_lookup_table_entry *next_lte; + struct message *msgs; struct message *next_msg; - size_t next_chunk_in_msg; - struct wim_lookup_table_entry *cur_lte; struct chunk_table *cur_chunk_tab; - struct wim_lookup_table_entry *next_lte; - SHA_CTX sha_ctx; - u8 next_hash[20]; }; static int @@ -816,11 +859,12 @@ allocate_messages(size_t num_messages) static void main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx) { + while (ctx->num_outstanding_messages--) + shared_queue_get(ctx->compressed_res_queue); free_messages(ctx->msgs, ctx->num_messages); FREE(ctx->cur_chunk_tab); } - static int main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx) { @@ -839,17 +883,22 @@ main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx) * chunks sent off for compression. * * The first stream in outstanding_streams is the stream that is - * currently being written (cur_lte). + * currently being written. * * The last stream in outstanding_streams is the stream that is - * currently being read and chunks fed to the compressor threads. */ + * currently being read and having chunks fed to the compressor threads. + * */ INIT_LIST_HEAD(&ctx->outstanding_streams); + ctx->num_outstanding_messages = 0; + + ctx->next_msg = NULL; /* Resources that don't need any chunks compressed are added to this * list and written directly by the main thread. */ INIT_LIST_HEAD(&ctx->serial_streams); - ctx->cur_lte = NULL; + ctx->cur_chunk_tab = NULL; + return 0; } @@ -861,13 +910,18 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) int ret; wimlib_assert(!list_empty(&ctx->outstanding_streams)); + wimlib_assert(ctx->num_outstanding_messages != 0); + + cur_lte = container_of(ctx->outstanding_streams.next, + struct wim_lookup_table_entry, + being_compressed_list); /* Get the next message from the queue and process it. * The message will contain 1 or more data chunks that have been * compressed. */ msg = shared_queue_get(ctx->compressed_res_queue); msg->complete = true; - cur_lte = ctx->cur_lte; + --ctx->num_outstanding_messages; /* Is this the next chunk in the current resource? If it's not * (i.e., an earlier chunk in a same or different resource @@ -876,52 +930,44 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) * * Otherwise, write all the chunks we can. */ while (cur_lte != NULL && - !list_empty(&cur_lte->msg_list) && - (msg = container_of(cur_lte->msg_list.next, - struct message, - list))->complete) + !list_empty(&cur_lte->msg_list) + && (msg = container_of(cur_lte->msg_list.next, + struct message, + list))->complete) { + list_move(&msg->list, &ctx->available_msgs); if (msg->begin_chunk == 0) { - /* This is the first set of chunks. Leave space * for the chunk table in the output file. */ off_t cur_offset = ftello(ctx->out_fp); - if (cur_offset == -1) { - ret = WIMLIB_ERR_WRITE; - goto out; - } + if (cur_offset == -1) + return WIMLIB_ERR_WRITE; ret = begin_wim_resource_chunk_tab(cur_lte, ctx->out_fp, cur_offset, &ctx->cur_chunk_tab); if (ret) - goto out; + return ret; } /* Write the compressed chunks from the message. */ ret = write_wim_chunks(msg, ctx->out_fp, ctx->cur_chunk_tab); if (ret) - goto out; - - list_del(&msg->list); - - /* This message is available to use for different chunks - * now. */ - list_add(&msg->list, &ctx->available_msgs); + return ret; /* Was this the last chunk of the stream? If so, finish * it. */ if (list_empty(&cur_lte->msg_list) && msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks) { - DEBUG2("Finish wim chunk tab"); u64 res_csize; ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab, ctx->out_fp, &res_csize); if (ret) - goto out; + return ret; + list_del(&cur_lte->being_compressed_list); #if 0 if (res_csize >= wim_resource_size(cur_lte)) { /* Oops! We compressed the resource to @@ -955,178 +1001,166 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) FREE(ctx->cur_chunk_tab); ctx->cur_chunk_tab = NULL; - struct list_head *next = cur_lte->write_streams_list.next; - list_del(&cur_lte->write_streams_list); - - if (next == &ctx->outstanding_streams) - cur_lte = NULL; - else - cur_lte = container_of(cur_lte->write_streams_list.next, - struct wim_lookup_table_entry, - write_streams_list); - /* Since we just finished writing a stream, write any * streams that have been added to the serial_streams * list for direct writing by the main thread (e.g. * resources that don't need to be compressed because * the desired compression type is the same as the * previous compression type). */ - ret = do_write_stream_list(&ctx->serial_streams, - ctx->lookup_table, - ctx->out_fp, - ctx->out_ctype, - ctx->progress_func, - ctx->progress, - 0); + ret = do_write_stream_list_serial(&ctx->serial_streams, + ctx->lookup_table, + ctx->out_fp, + ctx->out_ctype, + ctx->write_resource_flags, + ctx->progress_func, + ctx->progress); if (ret) - goto out; + return ret; + + /* Advance to the next stream to write. */ + if (list_empty(&ctx->outstanding_streams)) { + cur_lte = NULL; + } else { + cur_lte = container_of(ctx->outstanding_streams.next, + struct wim_lookup_table_entry, + being_compressed_list); + } } } -out: - ctx->cur_lte = cur_lte; - return ret; + return 0; } +/* Called when the main thread has read a new chunk of data. */ static int main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx) { struct main_writer_thread_ctx *ctx = _ctx; int ret; struct message *next_msg; + u64 next_chunk_in_msg; - next_msg = ctx->next_msg; - - sha1_update(&ctx->sha_ctx, chunk, chunk_size); + /* Update SHA1 message digest for the stream currently being read by the + * main thread. */ + sha1_update(&ctx->next_sha_ctx, chunk, chunk_size); + /* We send chunks of data to the compressor chunks in batches which we + * refer to as "messages". @next_msg is the message that is currently + * being prepared to send off. If it is NULL, that indicates that we + * need to start a new message. */ + next_msg = ctx->next_msg; if (!next_msg) { - if (list_empty(&ctx->available_msgs)) { + /* We need to start a new message. First check to see if there + * is a message available in the list of available messages. If + * so, we can just take one. If not, all the messages (there is + * a fixed number of them, proportional to the number of + * threads) have been sent off to the compressor threads, so we + * receive messages from the compressor threads containing + * compressed chunks of data. + * + * We may need to receive multiple messages before one is + * actually available to use because messages received that are + * *not* for the very next set of chunks to compress must be + * buffered until it's time to write those chunks. */ + while (list_empty(&ctx->available_msgs)) { ret = receive_compressed_chunks(ctx); if (ret) return ret; } - wimlib_assert(!list_empty(&ctx->available_msgs)); - next_msg = container_of(ctx->available_msgs.next, - struct message, - list); + struct message, list); list_del(&next_msg->list); next_msg->complete = false; next_msg->begin_chunk = ctx->next_chunk; next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG, ctx->next_num_chunks - ctx->next_chunk); - ctx->next_chunk_in_msg = 0; + ctx->next_msg = next_msg; } - wimlib_assert(next_msg != NULL); - wimlib_assert(ctx->next_chunk_in_msg < next_msg->num_chunks); + /* Fill in the next chunk to compress */ + next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk; - next_msg->uncompressed_chunk_sizes[ctx->next_chunk_in_msg] = chunk_size; - memcpy(next_msg->uncompressed_chunks[ctx->next_chunk_in_msg], + next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size; + memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg], chunk, chunk_size); - - if (++ctx->next_chunk_in_msg == next_msg->num_chunks) { - shared_queue_put(ctx->res_to_compress_queue, - next_msg); + ctx->next_chunk++; + if (++next_chunk_in_msg == next_msg->num_chunks) { + /* Send off an array of chunks to compress */ + list_add_tail(&next_msg->list, &ctx->next_lte->msg_list); + shared_queue_put(ctx->res_to_compress_queue, next_msg); + ++ctx->num_outstanding_messages; ctx->next_msg = NULL; } return 0; } +static int +main_writer_thread_finish(void *_ctx) +{ + struct main_writer_thread_ctx *ctx = _ctx; + int ret; + while (ctx->num_outstanding_messages != 0) { + ret = receive_compressed_chunks(ctx); + if (ret) + return ret; + } + wimlib_assert(list_empty(&ctx->outstanding_streams)); + return do_write_stream_list_serial(&ctx->serial_streams, + ctx->lookup_table, + ctx->out_fp, + ctx->out_ctype, + ctx->write_resource_flags, + ctx->progress_func, + ctx->progress); +} + static int submit_stream_for_compression(struct wim_lookup_table_entry *lte, struct main_writer_thread_ctx *ctx) { int ret; - sha1_init(&ctx->sha_ctx); + /* Read the entire stream @lte, feeding its data chunks to the + * compressor threads. Also SHA1-sum the stream; this is required in + * the case that @lte is unhashed, and a nice additional verification + * when @lte is already hashed. */ + sha1_init(&ctx->next_sha_ctx); + ctx->next_chunk = 0; ctx->next_num_chunks = wim_resource_chunks(lte); + ctx->next_lte = lte; + INIT_LIST_HEAD(<e->msg_list); + list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams); ret = read_resource_prefix(lte, wim_resource_size(lte), main_writer_thread_cb, ctx, 0); - if (ret) - return ret; - ret = finalize_and_check_sha1(&ctx->sha_ctx, lte); - if (ret) - return ret; + if (ret == 0) { + wimlib_assert(ctx->next_chunk == ctx->next_num_chunks); + ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte); + } + return ret; } -/* - * This function is executed by the main thread when the resources are being - * compressed in parallel. The main thread is in change of all reading of the - * uncompressed data and writing of the compressed data. The compressor threads - * *only* do compression from/to in-memory buffers. - * - * Each unit of work given to a compressor thread is up to MAX_CHUNKS_PER_MSG - * chunks of compressed data to compress, represented in a `struct message'. - * Each message is passed from the main thread to a worker thread through the - * res_to_compress_queue, and it is passed back through the - * compressed_res_queue. - */ static int -main_writer_thread_proc(struct main_writer_thread_ctx *ctx) +main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx) { + struct main_writer_thread_ctx *ctx = _ctx; int ret; - struct list_head *stream_list; - struct wim_lookup_table_entry *lte; - - ret = main_writer_thread_init_ctx(ctx); - if (ret) - goto out_destroy_ctx; - - stream_list = ctx->stream_list; - while (!list_empty(stream_list)) { - lte = container_of(stream_list->next, - struct wim_lookup_table_entry, - write_streams_list); - list_del(<e->write_streams_list); - if (lte->unhashed && !lte->unique_size) { - struct wim_lookup_table_entry *tmp; - u32 orig_refcnt = lte->out_refcnt; - - ret = hash_unhashed_stream(lte, ctx->lookup_table, &tmp); - if (ret) - goto out_destroy_ctx; - if (tmp != lte) { - lte = tmp; - if (orig_refcnt != tmp->out_refcnt) { - DEBUG("Discarding duplicate stream of length %"PRIu64, - wim_resource_size(lte)); - goto skip_to_progress; - } - } - } - - if (wim_resource_size(lte) < 1000 || - ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE || - (lte->resource_location == RESOURCE_IN_WIM && - wimlib_get_compression_type(lte->wim) == ctx->out_ctype)) - { - list_add(<e->write_streams_list, - &ctx->serial_streams); - } else { - ret = submit_stream_for_compression(lte, ctx); - if (ret) - goto out_destroy_ctx; - if (lte->unhashed) { - list_del(<e->unhashed_list); - lookup_table_insert(ctx->lookup_table, lte); - lte->unhashed = 0; - } - } - skip_to_progress: - do_write_streams_progress(ctx->progress, - ctx->progress_func, - wim_resource_size(lte)); - } - while (!list_empty(&ctx->outstanding_streams)) { - ret = receive_compressed_chunks(ctx); - if (ret) - goto out_destroy_ctx; + if (wim_resource_size(lte) < 1000 || + ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE || + (lte->resource_location == RESOURCE_IN_WIM && + !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) && + wimlib_get_compression_type(lte->wim) == ctx->out_ctype)) + { + /* Stream is too small or isn't being compressed. Process it by + * the main thread when we have a chance. We can't necessarily + * process it right here, as the main thread could be in the + * middle of writing a different stream. */ + list_add_tail(<e->write_streams_list, &ctx->serial_streams); + lte->deferred = 1; + ret = 0; + } else { + ret = submit_stream_for_compression(lte, ctx); } - ret = 0; -out_destroy_ctx: - main_writer_thread_destroy_ctx(ctx); return ret; } @@ -1140,15 +1174,41 @@ get_default_num_threads() #endif } +/* Equivalent to write_stream_list_serial(), except this takes a @num_threads + * parameter and will perform compression using that many threads. Falls + * back to write_stream_list_serial() on certain errors, such as a failure to + * create the number of threads requested. + * + * High level description of the algorithm for writing compressed streams in + * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes + * rather than on full files. The currently executing thread becomes the main + * thread and is entirely in charge of reading the data to compress (which may + * be in any location understood by the resource code--- such as in an external + * file being captured, or in another WIM file from which an image is being + * exported) and actually writing the compressed data to the output file. + * Additional threads are "compressor threads" and all execute the + * compressor_thread_proc, where they repeatedly retrieve buffers of data from + * the main thread, compress them, and hand them back to the main thread. + * + * Certain streams, such as streams that do not need to be compressed (e.g. + * input compression type same as output compression type) or streams of very + * small size are placed in a list (main_writer_thread_ctx.serial_list) and + * handled entirely by the main thread at an appropriate time. + * + * At any given point in time, multiple streams may be having chunks compressed + * concurrently. The stream that the main thread is currently *reading* may be + * later in the list that the stream that the main thread is currently + * *writing*. + */ static int write_stream_list_parallel(struct list_head *stream_list, struct wim_lookup_table *lookup_table, FILE *out_fp, int out_ctype, - int write_flags, - unsigned num_threads, + int write_resource_flags, wimlib_progress_func_t progress_func, - union wimlib_progress_info *progress) + union wimlib_progress_info *progress, + unsigned num_threads) { int ret; struct shared_queue res_to_compress_queue; @@ -1167,17 +1227,17 @@ write_stream_list_parallel(struct list_head *stream_list, progress->write_streams.num_threads = num_threads; - static const double MESSAGES_PER_THREAD = 2.0; + static const size_t MESSAGES_PER_THREAD = 2; size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD); DEBUG("Initializing shared queues (queue_size=%zu)", queue_size); ret = shared_queue_init(&res_to_compress_queue, queue_size); - if (ret != 0) + if (ret) goto out_serial; ret = shared_queue_init(&compressed_res_queue, queue_size); - if (ret != 0) + if (ret) goto out_destroy_res_to_compress_queue; struct compressor_thread_params params; @@ -1192,13 +1252,14 @@ write_stream_list_parallel(struct list_head *stream_list, } for (unsigned i = 0; i < num_threads; i++) { - DEBUG("pthread_create thread %u", i); + DEBUG("pthread_create thread %u of %u", i + 1, num_threads); ret = pthread_create(&compressor_threads[i], NULL, compressor_thread_proc, ¶ms); if (ret != 0) { ret = -1; ERROR_WITH_ERRNO("Failed to create compressor " - "thread %u", i); + "thread %u of %u", + i + 1, num_threads); num_threads = i; goto out_join; } @@ -1208,7 +1269,6 @@ write_stream_list_parallel(struct list_head *stream_list, progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress); struct main_writer_thread_ctx ctx; - memset(&ctx, 0, sizeof(ctx)); ctx.stream_list = stream_list; ctx.lookup_table = lookup_table; ctx.out_fp = out_fp; @@ -1216,10 +1276,27 @@ write_stream_list_parallel(struct list_head *stream_list, ctx.res_to_compress_queue = &res_to_compress_queue; ctx.compressed_res_queue = &compressed_res_queue; ctx.num_messages = queue_size; - ctx.write_flags = write_flags; + ctx.write_resource_flags = write_resource_flags | WIMLIB_RESOURCE_FLAG_THREADSAFE_READ; ctx.progress_func = progress_func; ctx.progress = progress; - ret = main_writer_thread_proc(&ctx); + ret = main_writer_thread_init_ctx(&ctx); + if (ret) + goto out_join; + ret = do_write_stream_list(stream_list, lookup_table, + main_thread_process_next_stream, + &ctx, NULL, NULL); + if (ret) + goto out_destroy_ctx; + + /* The main thread has finished reading all streams that are going to be + * compressed in parallel, and it now needs to wait for all remaining + * chunks to be compressed so that the remaining streams can actually be + * written to the output file. Furthermore, any remaining streams that + * had processing deferred to the main thread need to be handled. These + * tasks are done by the main_writer_thread_finish() function. */ + ret = main_writer_thread_finish(&ctx); +out_destroy_ctx: + main_writer_thread_destroy_ctx(&ctx); out_join: for (unsigned i = 0; i < num_threads; i++) shared_queue_put(&res_to_compress_queue, NULL); @@ -1227,7 +1304,8 @@ out_join: for (unsigned i = 0; i < num_threads; i++) { if (pthread_join(compressor_threads[i], NULL)) { WARNING_WITH_ERRNO("Failed to join compressor " - "thread %u", i); + "thread %u of %u", + i + 1, num_threads); } } FREE(compressor_threads); @@ -1243,7 +1321,7 @@ out_serial: lookup_table, out_fp, out_ctype, - write_flags, + write_resource_flags, progress_func, progress); @@ -1266,10 +1344,13 @@ write_stream_list(struct list_head *stream_list, u64 total_compression_bytes = 0; union wimlib_progress_info progress; int ret; + int write_resource_flags; if (list_empty(stream_list)) return 0; + write_resource_flags = write_flags_to_resource_flags(write_flags); + /* Calculate the total size of the streams to be written. Note: this * will be the uncompressed size, as we may not know the compressed size * yet, and also this will assume that every unhashed stream will be @@ -1279,7 +1360,7 @@ write_stream_list(struct list_head *stream_list, total_bytes += wim_resource_size(lte); if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE && (wim_resource_compression_type(lte) != out_ctype || - (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS))) + (write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS))) { total_compression_bytes += wim_resource_size(lte); } @@ -1298,17 +1379,17 @@ write_stream_list(struct list_head *stream_list, lookup_table, out_fp, out_ctype, - write_flags, - num_threads, + write_resource_flags, progress_func, - &progress); + &progress, + num_threads); else #endif ret = write_stream_list_serial(stream_list, lookup_table, out_fp, out_ctype, - write_flags, + write_resource_flags, progress_func, &progress); return ret; @@ -1342,15 +1423,15 @@ stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab) { struct stream_size_table *tab = _tab; size_t pos; - struct wim_lookup_table_entry *hashed_lte; + struct wim_lookup_table_entry *same_size_lte; struct hlist_node *tmp; pos = hash_u64(wim_resource_size(lte)) % tab->capacity; lte->unique_size = 1; - hlist_for_each_entry(hashed_lte, tmp, &tab->array[pos], hash_list_2) { - if (wim_resource_size(hashed_lte) == wim_resource_size(lte)) { + hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) { + if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) { lte->unique_size = 0; - hashed_lte->unique_size = 0; + same_size_lte->unique_size = 0; break; } } @@ -1368,14 +1449,34 @@ struct lte_overwrite_prepare_args { struct stream_size_table stream_size_tab; }; +/* First phase of preparing streams for an in-place overwrite. This is called + * on all streams, both hashed and unhashed, except the metadata resources. */ static int -lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *arg) +lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args) { - struct lte_overwrite_prepare_args *args = arg; + struct lte_overwrite_prepare_args *args = _args; - if (lte->resource_location == RESOURCE_IN_WIM && - lte->wim == args->wim) - { + wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA)); + if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim) + list_add_tail(<e->write_streams_list, &args->stream_list); + lte->out_refcnt = lte->refcnt; + stream_size_table_insert(lte, &args->stream_size_tab); + return 0; +} + +/* Second phase of preparing streams for an in-place overwrite. This is called + * on existing metadata resources and hashed streams, but not unhashed streams. + * + * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so + * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as + * the latter uses lte->hash_list_2, while the former expects to set + * lte->output_resource_entry. */ +static int +lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args) +{ + struct lte_overwrite_prepare_args *args = _args; + + if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) { /* We can't do an in place overwrite on the WIM if there are * streams after the XML data. */ if (lte->resource_entry.offset + @@ -1387,19 +1488,6 @@ lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *arg) #endif return WIMLIB_ERR_RESOURCE_ORDER; } - } else { - wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA)); - list_add_tail(<e->write_streams_list, &args->stream_list); - } - lte->out_refcnt = lte->refcnt; - stream_size_table_insert(lte, &args->stream_size_tab); - return 0; -} - -static int -lte_set_output_res_entry(struct wim_lookup_table_entry *lte, void *_wim) -{ - if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == _wim) { copy_resource_entry(<e->output_resource_entry, <e->resource_entry); } @@ -1425,6 +1513,7 @@ prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset, { int ret; struct lte_overwrite_prepare_args args; + unsigned i; args.wim = wim; args.end_offset = end_offset; @@ -1434,27 +1523,25 @@ prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset, return ret; INIT_LIST_HEAD(&args.stream_list); - for (int i = 0; i < wim->hdr.image_count; i++) { + for (i = 0; i < wim->hdr.image_count; i++) { struct wim_image_metadata *imd; struct wim_lookup_table_entry *lte; imd = wim->image_metadata[i]; - image_for_each_unhashed_stream(lte, imd) { - ret = lte_overwrite_prepare(lte, &args); - if (ret) - goto out_destroy_stream_size_table; - } + image_for_each_unhashed_stream(lte, imd) + lte_overwrite_prepare(lte, &args); } - ret = for_lookup_table_entry(wim->lookup_table, - lte_overwrite_prepare, &args); - if (ret) - goto out_destroy_stream_size_table; - - for (int i = 0; i < wim->hdr.image_count; i++) - lte_set_output_res_entry(wim->image_metadata[i]->metadata_lte, - wim); - for_lookup_table_entry(wim->lookup_table, lte_set_output_res_entry, wim); + for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args); list_transfer(&args.stream_list, stream_list); + + for (i = 0; i < wim->hdr.image_count; i++) { + ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte, + &args); + if (ret) + goto out_destroy_stream_size_table; + } + ret = for_lookup_table_entry(wim->lookup_table, + lte_overwrite_prepare_2, &args); out_destroy_stream_size_table: destroy_stream_size_table(&args.stream_size_tab); return ret; @@ -1924,6 +2011,7 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, int ret; struct list_head stream_list; off_t old_wim_end; + u64 old_lookup_table_end, old_xml_begin, old_xml_end; DEBUG("Overwriting `%"TS"' in-place", w->filename); @@ -1931,33 +2019,45 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, * data, and that there are no stream resources, metadata resources, or * lookup tables after the XML data. Otherwise, these data would be * overwritten. */ - if (w->hdr.integrity.offset != 0 && - w->hdr.integrity.offset < w->hdr.xml_res_entry.offset) { + old_xml_begin = w->hdr.xml_res_entry.offset; + old_xml_end = old_xml_begin + w->hdr.xml_res_entry.size; + old_lookup_table_end = w->hdr.lookup_table_res_entry.offset + + w->hdr.lookup_table_res_entry.size; + if (w->hdr.integrity.offset != 0 && w->hdr.integrity.offset < old_xml_end) { ERROR("Didn't expect the integrity table to be before the XML data"); return WIMLIB_ERR_RESOURCE_ORDER; } - if (w->hdr.lookup_table_res_entry.offset > w->hdr.xml_res_entry.offset) { + if (old_lookup_table_end > old_xml_begin) { ERROR("Didn't expect the lookup table to be after the XML data"); return WIMLIB_ERR_RESOURCE_ORDER; } - - if (w->hdr.integrity.offset) - old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size; - else - old_wim_end = w->hdr.xml_res_entry.offset + w->hdr.xml_res_entry.size; - + /* Set @old_wim_end, which indicates the point beyond which we don't + * allow any file and metadata resources to appear without returning + * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise + * overwrite these resources). */ if (!w->deletion_occurred && !any_images_modified(w)) { /* If no images have been modified and no images have been - * deleted, a new lookup table does not need to be written. */ + * deleted, a new lookup table does not need to be written. We + * shall write the new XML data and optional integrity table + * immediately after the lookup table. Note that this may + * overwrite an existing integrity table. */ DEBUG("Skipping writing lookup table " "(no images modified or deleted)"); - old_wim_end = w->hdr.lookup_table_res_entry.offset + - w->hdr.lookup_table_res_entry.size; + old_wim_end = old_lookup_table_end; write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE | WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML; + } else if (w->hdr.integrity.offset) { + /* Old WIM has an integrity table; begin writing new streams + * after it. */ + old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size; + } else { + /* No existing integrity table; begin writing new streams after + * the old XML data. */ + old_wim_end = old_xml_end; } + ret = prepare_streams_for_overwrite(w, old_wim_end, &stream_list); if (ret) return ret; @@ -1969,15 +2069,13 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, ret = lock_wim(w, w->out_fp); if (ret) { - fclose(w->out_fp); - w->out_fp = NULL; + close_wim_writable(w); return ret; } if (fseeko(w->out_fp, old_wim_end, SEEK_SET) != 0) { ERROR_WITH_ERRNO("Can't seek to end of WIM"); - fclose(w->out_fp); - w->out_fp = NULL; + close_wim_writable(w); w->wim_locked = 0; return WIMLIB_ERR_WRITE; } @@ -1992,20 +2090,20 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, num_threads, progress_func); if (ret) - goto out_ftruncate; + goto out_truncate; for (int i = 0; i < w->hdr.image_count; i++) { if (w->image_metadata[i]->modified) { select_wim_image(w, i + 1); ret = write_metadata_resource(w); if (ret) - goto out_ftruncate; + goto out_truncate; } } write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE; ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags, progress_func); -out_ftruncate: +out_truncate: close_wim_writable(w); if (ret != 0 && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",