X-Git-Url: https://wimlib.net/git/?a=blobdiff_plain;f=src%2Fwrite.c;h=b4d0f0d7d891b372f0f383a82a9466990a5d69ee;hb=fd763f8533b72067aa38a08849e57f23aa300060;hp=0beba03f4264f20fa8286a35fc36cc70b36077c9;hpb=2af95e7dc313e9abaf571034c3e86f32503bf232;p=wimlib diff --git a/src/write.c b/src/write.c index 0beba03f..b4d0f0d7 100644 --- a/src/write.c +++ b/src/write.c @@ -245,6 +245,21 @@ finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab, return 0; } +static int +fflush_and_ftruncate(FILE *out_fp, off_t offset) +{ + if (fseeko(out_fp, offset, SEEK_SET) || + fflush(out_fp) || + ftruncate(fileno(out_fp), offset)) + { + ERROR_WITH_ERRNO("Failed to flush and/or truncate " + "output WIM file"); + return WIMLIB_ERR_WRITE; + } else { + return 0; + } +} + static int finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte) { @@ -331,8 +346,6 @@ write_wim_resource(struct wim_lookup_table_entry *lte, off_t offset; int ret; - DEBUG2("wim_resource_size(lte)=%"PRIu64, wim_resource_size(lte)); - flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS; /* Get current position in output WIM */ @@ -416,19 +429,12 @@ try_write_again: if (new_size >= wim_resource_size(lte)) { /* Oops! We compressed the resource to larger than the original * size. Write the resource uncompressed instead. */ - if (fseeko(out_fp, offset, SEEK_SET) || - fflush(out_fp) || - ftruncate(fileno(out_fp), - offset + wim_resource_size(lte))) - { - ERROR_WITH_ERRNO("Failed to flush and/or truncate " - "output WIM file"); - ret = WIMLIB_ERR_WRITE; - goto out_free_chunk_tab; - } DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; " "writing uncompressed instead", wim_resource_size(lte), new_size); + ret = fflush_and_ftruncate(out_fp, offset); + if (ret) + goto out_free_chunk_tab; write_ctx.compress = NULL; write_ctx.doing_sha = false; out_ctype = WIMLIB_COMPRESSION_TYPE_NONE; @@ -557,7 +563,6 @@ static void compress_chunks(struct message *msg, compress_func_t compress) { for (unsigned i = 0; i < msg->num_chunks; i++) { - DEBUG2("compress chunk %u of %u", i, msg->num_chunks); unsigned len = compress(msg->uncompressed_chunks[i], msg->uncompressed_chunk_sizes[i], msg->compressed_chunks[i]); @@ -636,6 +641,10 @@ serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx) ctx->write_resource_flags); } +/* Write a list of streams, taking into account that some streams may be + * duplicates that are checksummed and discarded on the fly, and also delegating + * the actual writing of a stream to a function @write_stream_cb, which is + * passed the context @write_stream_ctx. */ static int do_write_stream_list(struct list_head *stream_list, struct wim_lookup_table *lookup_table, @@ -672,6 +681,7 @@ do_write_stream_list(struct list_head *stream_list, * just skip to the next stream. */ DEBUG("Discarding duplicate stream of length %"PRIu64, wim_resource_size(lte)); + lte->no_progress = 0; goto skip_to_progress; } } @@ -686,6 +696,7 @@ do_write_stream_list(struct list_head *stream_list, * the SHA1 message digest yet. */ wimlib_assert(lte->out_refcnt != 0); lte->deferred = 0; + lte->no_progress = 0; ret = (*write_stream_cb)(lte, write_stream_ctx); if (ret) break; @@ -699,7 +710,7 @@ do_write_stream_list(struct list_head *stream_list, lte->unhashed = 0; } skip_to_progress: - if (progress_func) { + if (!lte->no_progress) { do_write_streams_progress(progress, progress_func, wim_resource_size(lte)); @@ -746,6 +757,7 @@ write_stream_list_serial(struct list_head *stream_list, wimlib_progress_func_t progress_func, union wimlib_progress_info *progress) { + DEBUG("Writing stream list (serial version)"); progress->write_streams.num_threads = 1; if (progress_func) progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress); @@ -766,9 +778,6 @@ write_wim_chunks(struct message *msg, FILE *out_fp, for (unsigned i = 0; i < msg->num_chunks; i++) { unsigned chunk_csize = msg->compressed_chunk_sizes[i]; - DEBUG2("Write wim chunk %u of %u (csize = %u)", - i, msg->num_chunks, chunk_csize); - if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp) != chunk_csize) { @@ -914,7 +923,6 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) wimlib_assert(!list_empty(&ctx->outstanding_streams)); wimlib_assert(ctx->num_outstanding_messages != 0); - DEBUG2("Receiving more compressed chunks"); cur_lte = container_of(ctx->outstanding_streams.next, struct wim_lookup_table_entry, being_compressed_list); @@ -926,8 +934,6 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) msg->complete = true; --ctx->num_outstanding_messages; - DEBUG2("recved msg %p", msg); - /* Is this the next chunk in the current resource? If it's not * (i.e., an earlier chunk in a same or different resource * hasn't been compressed yet), do nothing, and keep this @@ -965,8 +971,9 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) if (list_empty(&cur_lte->msg_list) && msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks) { - DEBUG2("Finish wim chunk tab"); u64 res_csize; + off_t offset; + ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab, ctx->out_fp, &res_csize); @@ -974,21 +981,32 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) return ret; list_del(&cur_lte->being_compressed_list); -#if 0 + + /* Grab the offset of this stream in the output file + * from the chunk table before we free it. */ + offset = ctx->cur_chunk_tab->file_offset; + + FREE(ctx->cur_chunk_tab); + ctx->cur_chunk_tab = NULL; + if (res_csize >= wim_resource_size(cur_lte)) { /* Oops! We compressed the resource to * larger than the original size. Write * the resource uncompressed instead. */ - ret = write_uncompressed_resource_and_truncate( - cur_lte, - ctx->out_fp, - ctx->cur_chunk_tab->file_offset, - &cur_lte->output_resource_entry); + DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; " + "writing uncompressed instead", + wim_resource_size(cur_lte), res_csize); + ret = fflush_and_ftruncate(ctx->out_fp, offset); if (ret) - goto out; - } else -#endif - { + return ret; + ret = write_wim_resource(cur_lte, + ctx->out_fp, + WIMLIB_COMPRESSION_TYPE_NONE, + &cur_lte->output_resource_entry, + ctx->write_resource_flags); + if (ret) + return ret; + } else { cur_lte->output_resource_entry.size = res_csize; @@ -996,16 +1014,16 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) cur_lte->resource_entry.original_size; cur_lte->output_resource_entry.offset = - ctx->cur_chunk_tab->file_offset; + offset; cur_lte->output_resource_entry.flags = cur_lte->resource_entry.flags | WIM_RESHDR_FLAG_COMPRESSED; } - do_write_streams_progress(ctx->progress, ctx->progress_func, + + do_write_streams_progress(ctx->progress, + ctx->progress_func, wim_resource_size(cur_lte)); - FREE(ctx->cur_chunk_tab); - ctx->cur_chunk_tab = NULL; /* Since we just finished writing a stream, write any * streams that have been added to the serial_streams @@ -1013,31 +1031,32 @@ receive_compressed_chunks(struct main_writer_thread_ctx *ctx) * resources that don't need to be compressed because * the desired compression type is the same as the * previous compression type). */ - ret = do_write_stream_list_serial(&ctx->serial_streams, - ctx->lookup_table, - ctx->out_fp, - ctx->out_ctype, - ctx->write_resource_flags, - ctx->progress_func, - ctx->progress); - if (ret) - return ret; + if (!list_empty(&ctx->serial_streams)) { + ret = do_write_stream_list_serial(&ctx->serial_streams, + ctx->lookup_table, + ctx->out_fp, + ctx->out_ctype, + ctx->write_resource_flags, + ctx->progress_func, + ctx->progress); + if (ret) + return ret; + } + + /* Advance to the next stream to write. */ if (list_empty(&ctx->outstanding_streams)) { cur_lte = NULL; } else { cur_lte = container_of(ctx->outstanding_streams.next, struct wim_lookup_table_entry, being_compressed_list); - #ifdef ENABLE_MORE_DEBUG - DEBUG2("Advance to stream:"); - print_lookup_table_entry(cur_lte, stderr); - #endif } } } return 0; } +/* Called when the main thread has read a new chunk of data. */ static int main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx) { @@ -1046,20 +1065,29 @@ main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx) struct message *next_msg; u64 next_chunk_in_msg; - DEBUG2("chunk_size=%zu, wim_resource_size(next_lte)=%"PRIu64, - chunk_size, wim_resource_size(ctx->next_lte)); - + /* Update SHA1 message digest for the stream currently being read by the + * main thread. */ sha1_update(&ctx->next_sha_ctx, chunk, chunk_size); + + /* We send chunks of data to the compressor chunks in batches which we + * refer to as "messages". @next_msg is the message that is currently + * being prepared to send off. If it is NULL, that indicates that we + * need to start a new message. */ next_msg = ctx->next_msg; if (!next_msg) { - /* Start filling in a new message */ - - DEBUG2("Start new msg"); - + /* We need to start a new message. First check to see if there + * is a message available in the list of available messages. If + * so, we can just take one. If not, all the messages (there is + * a fixed number of them, proportional to the number of + * threads) have been sent off to the compressor threads, so we + * receive messages from the compressor threads containing + * compressed chunks of data. + * + * We may need to receive multiple messages before one is + * actually available to use because messages received that are + * *not* for the very next set of chunks to compress must be + * buffered until it's time to write those chunks. */ while (list_empty(&ctx->available_msgs)) { - /* No message available; receive messages, writing - * compressed data. */ - DEBUG2("No msgs available!"); ret = receive_compressed_chunks(ctx); if (ret) return ret; @@ -1072,20 +1100,17 @@ main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx) next_msg->begin_chunk = ctx->next_chunk; next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG, ctx->next_num_chunks - ctx->next_chunk); - DEBUG2("next_msg {begin_chunk=%"PRIu64", num_chunks=%"PRIu64"}", - next_msg->begin_chunk, next_msg->num_chunks); ctx->next_msg = next_msg; } + /* Fill in the next chunk to compress */ next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk; - /* Fill in the next chunk to compress */ next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size; memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg], chunk, chunk_size); ctx->next_chunk++; if (++next_chunk_in_msg == next_msg->num_chunks) { - DEBUG2("Sending message %p", next_msg); /* Send off an array of chunks to compress */ list_add_tail(&next_msg->list, &ctx->next_lte->msg_list); shared_queue_put(ctx->res_to_compress_queue, next_msg); @@ -1100,7 +1125,6 @@ main_writer_thread_finish(void *_ctx) { struct main_writer_thread_ctx *ctx = _ctx; int ret; - DEBUG2("finishing"); while (ctx->num_outstanding_messages != 0) { ret = receive_compressed_chunks(ctx); if (ret) @@ -1122,11 +1146,10 @@ submit_stream_for_compression(struct wim_lookup_table_entry *lte, { int ret; -#ifdef ENABLE_MORE_DEBUG - DEBUG2("Submit for compression:"); - print_lookup_table_entry(lte, stderr); -#endif - + /* Read the entire stream @lte, feeding its data chunks to the + * compressor threads. Also SHA1-sum the stream; this is required in + * the case that @lte is unhashed, and a nice additional verification + * when @lte is already hashed. */ sha1_init(&ctx->next_sha_ctx); ctx->next_chunk = 0; ctx->next_num_chunks = wim_resource_chunks(lte); @@ -1154,12 +1177,17 @@ main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx) !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) && wimlib_get_compression_type(lte->wim) == ctx->out_ctype)) { + /* Stream is too small or isn't being compressed. Process it by + * the main thread when we have a chance. We can't necessarily + * process it right here, as the main thread could be in the + * middle of writing a different stream. */ list_add_tail(<e->write_streams_list, &ctx->serial_streams); lte->deferred = 1; ret = 0; } else { ret = submit_stream_for_compression(lte, ctx); } + lte->no_progress = 1; return ret; } @@ -1173,6 +1201,32 @@ get_default_num_threads() #endif } +/* Equivalent to write_stream_list_serial(), except this takes a @num_threads + * parameter and will perform compression using that many threads. Falls + * back to write_stream_list_serial() on certain errors, such as a failure to + * create the number of threads requested. + * + * High level description of the algorithm for writing compressed streams in + * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes + * rather than on full files. The currently executing thread becomes the main + * thread and is entirely in charge of reading the data to compress (which may + * be in any location understood by the resource code--- such as in an external + * file being captured, or in another WIM file from which an image is being + * exported) and actually writing the compressed data to the output file. + * Additional threads are "compressor threads" and all execute the + * compressor_thread_proc, where they repeatedly retrieve buffers of data from + * the main thread, compress them, and hand them back to the main thread. + * + * Certain streams, such as streams that do not need to be compressed (e.g. + * input compression type same as output compression type) or streams of very + * small size are placed in a list (main_writer_thread_ctx.serial_list) and + * handled entirely by the main thread at an appropriate time. + * + * At any given point in time, multiple streams may be having chunks compressed + * concurrently. The stream that the main thread is currently *reading* may be + * later in the list that the stream that the main thread is currently + * *writing*. + */ static int write_stream_list_parallel(struct list_head *stream_list, struct wim_lookup_table *lookup_table, @@ -1193,11 +1247,16 @@ write_stream_list_parallel(struct list_head *stream_list, if (nthreads < 1 || nthreads > UINT_MAX) { WARNING("Could not determine number of processors! Assuming 1"); goto out_serial; + } else if (nthreads == 1) { + goto out_serial_quiet; } else { num_threads = nthreads; } } + DEBUG("Writing stream list (parallel version, num_threads=%u)", + num_threads); + progress->write_streams.num_threads = num_threads; static const size_t MESSAGES_PER_THREAD = 2; @@ -1257,9 +1316,16 @@ write_stream_list_parallel(struct list_head *stream_list, goto out_join; ret = do_write_stream_list(stream_list, lookup_table, main_thread_process_next_stream, - &ctx, NULL, NULL); + &ctx, progress_func, progress); if (ret) goto out_destroy_ctx; + + /* The main thread has finished reading all streams that are going to be + * compressed in parallel, and it now needs to wait for all remaining + * chunks to be compressed so that the remaining streams can actually be + * written to the output file. Furthermore, any remaining streams that + * had processing deferred to the main thread need to be handled. These + * tasks are done by the main_writer_thread_finish() function. */ ret = main_writer_thread_finish(&ctx); out_destroy_ctx: main_writer_thread_destroy_ctx(&ctx); @@ -1283,6 +1349,7 @@ out_destroy_res_to_compress_queue: return ret; out_serial: WARNING("Falling back to single-threaded compression"); +out_serial_quiet: return write_stream_list_serial(stream_list, lookup_table, out_fp,