return 0;
}
+static int
+fflush_and_ftruncate(FILE *out_fp, off_t offset)
+{
+ if (fseeko(out_fp, offset, SEEK_SET) ||
+ fflush(out_fp) ||
+ ftruncate(fileno(out_fp), offset))
+ {
+ ERROR_WITH_ERRNO("Failed to flush and/or truncate "
+ "output WIM file");
+ return WIMLIB_ERR_WRITE;
+ } else {
+ return 0;
+ }
+}
+
static int
finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte)
{
off_t offset;
int ret;
- DEBUG2("wim_resource_size(lte)=%"PRIu64, wim_resource_size(lte));
-
flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS;
/* Get current position in output WIM */
if (new_size >= wim_resource_size(lte)) {
/* Oops! We compressed the resource to larger than the original
* size. Write the resource uncompressed instead. */
- if (fseeko(out_fp, offset, SEEK_SET) ||
- fflush(out_fp) ||
- ftruncate(fileno(out_fp),
- offset + wim_resource_size(lte)))
- {
- ERROR_WITH_ERRNO("Failed to flush and/or truncate "
- "output WIM file");
- ret = WIMLIB_ERR_WRITE;
- goto out_free_chunk_tab;
- }
DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
"writing uncompressed instead",
wim_resource_size(lte), new_size);
+ ret = fflush_and_ftruncate(out_fp, offset);
+ if (ret)
+ goto out_free_chunk_tab;
write_ctx.compress = NULL;
write_ctx.doing_sha = false;
out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
compress_chunks(struct message *msg, compress_func_t compress)
{
for (unsigned i = 0; i < msg->num_chunks; i++) {
- DEBUG2("compress chunk %u of %u", i, msg->num_chunks);
unsigned len = compress(msg->uncompressed_chunks[i],
msg->uncompressed_chunk_sizes[i],
msg->compressed_chunks[i]);
ctx->write_resource_flags);
}
+/* Write a list of streams, taking into account that some streams may be
+ * duplicates that are checksummed and discarded on the fly, and also delegating
+ * the actual writing of a stream to a function @write_stream_cb, which is
+ * passed the context @write_stream_ctx. */
static int
do_write_stream_list(struct list_head *stream_list,
struct wim_lookup_table *lookup_table,
* just skip to the next stream. */
DEBUG("Discarding duplicate stream of length %"PRIu64,
wim_resource_size(lte));
+ lte->no_progress = 0;
goto skip_to_progress;
}
}
* the SHA1 message digest yet. */
wimlib_assert(lte->out_refcnt != 0);
lte->deferred = 0;
+ lte->no_progress = 0;
ret = (*write_stream_cb)(lte, write_stream_ctx);
if (ret)
break;
lte->unhashed = 0;
}
skip_to_progress:
- if (progress_func) {
+ if (!lte->no_progress) {
do_write_streams_progress(progress,
progress_func,
wim_resource_size(lte));
wimlib_progress_func_t progress_func,
union wimlib_progress_info *progress)
{
+ DEBUG("Writing stream list (serial version)");
progress->write_streams.num_threads = 1;
if (progress_func)
progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
for (unsigned i = 0; i < msg->num_chunks; i++) {
unsigned chunk_csize = msg->compressed_chunk_sizes[i];
- DEBUG2("Write wim chunk %u of %u (csize = %u)",
- i, msg->num_chunks, chunk_csize);
-
if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp)
!= chunk_csize)
{
wimlib_assert(!list_empty(&ctx->outstanding_streams));
wimlib_assert(ctx->num_outstanding_messages != 0);
- DEBUG2("Receiving more compressed chunks");
cur_lte = container_of(ctx->outstanding_streams.next,
struct wim_lookup_table_entry,
being_compressed_list);
msg->complete = true;
--ctx->num_outstanding_messages;
- DEBUG2("recved msg %p", msg);
-
/* Is this the next chunk in the current resource? If it's not
* (i.e., an earlier chunk in a same or different resource
* hasn't been compressed yet), do nothing, and keep this
if (list_empty(&cur_lte->msg_list) &&
msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
{
- DEBUG2("Finish wim chunk tab");
u64 res_csize;
+ off_t offset;
+
ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
ctx->out_fp,
&res_csize);
return ret;
list_del(&cur_lte->being_compressed_list);
-#if 0
+
+ /* Grab the offset of this stream in the output file
+ * from the chunk table before we free it. */
+ offset = ctx->cur_chunk_tab->file_offset;
+
+ FREE(ctx->cur_chunk_tab);
+ ctx->cur_chunk_tab = NULL;
+
if (res_csize >= wim_resource_size(cur_lte)) {
/* Oops! We compressed the resource to
* larger than the original size. Write
* the resource uncompressed instead. */
- ret = write_uncompressed_resource_and_truncate(
- cur_lte,
- ctx->out_fp,
- ctx->cur_chunk_tab->file_offset,
- &cur_lte->output_resource_entry);
+ DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
+ "writing uncompressed instead",
+ wim_resource_size(cur_lte), res_csize);
+ ret = fflush_and_ftruncate(ctx->out_fp, offset);
if (ret)
- goto out;
- } else
-#endif
- {
+ return ret;
+ ret = write_wim_resource(cur_lte,
+ ctx->out_fp,
+ WIMLIB_COMPRESSION_TYPE_NONE,
+ &cur_lte->output_resource_entry,
+ ctx->write_resource_flags);
+ if (ret)
+ return ret;
+ } else {
cur_lte->output_resource_entry.size =
res_csize;
cur_lte->resource_entry.original_size;
cur_lte->output_resource_entry.offset =
- ctx->cur_chunk_tab->file_offset;
+ offset;
cur_lte->output_resource_entry.flags =
cur_lte->resource_entry.flags |
WIM_RESHDR_FLAG_COMPRESSED;
}
- do_write_streams_progress(ctx->progress, ctx->progress_func,
+
+ do_write_streams_progress(ctx->progress,
+ ctx->progress_func,
wim_resource_size(cur_lte));
- FREE(ctx->cur_chunk_tab);
- ctx->cur_chunk_tab = NULL;
/* Since we just finished writing a stream, write any
* streams that have been added to the serial_streams
* resources that don't need to be compressed because
* the desired compression type is the same as the
* previous compression type). */
- ret = do_write_stream_list_serial(&ctx->serial_streams,
- ctx->lookup_table,
- ctx->out_fp,
- ctx->out_ctype,
- ctx->write_resource_flags,
- ctx->progress_func,
- ctx->progress);
- if (ret)
- return ret;
+ if (!list_empty(&ctx->serial_streams)) {
+ ret = do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fp,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_func,
+ ctx->progress);
+ if (ret)
+ return ret;
+ }
+
+ /* Advance to the next stream to write. */
if (list_empty(&ctx->outstanding_streams)) {
cur_lte = NULL;
} else {
cur_lte = container_of(ctx->outstanding_streams.next,
struct wim_lookup_table_entry,
being_compressed_list);
- #ifdef ENABLE_MORE_DEBUG
- DEBUG2("Advance to stream:");
- print_lookup_table_entry(cur_lte, stderr);
- #endif
}
}
}
return 0;
}
+/* Called when the main thread has read a new chunk of data. */
static int
main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
{
struct message *next_msg;
u64 next_chunk_in_msg;
- DEBUG2("chunk_size=%zu, wim_resource_size(next_lte)=%"PRIu64,
- chunk_size, wim_resource_size(ctx->next_lte));
-
+ /* Update SHA1 message digest for the stream currently being read by the
+ * main thread. */
sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
+
+ /* We send chunks of data to the compressor chunks in batches which we
+ * refer to as "messages". @next_msg is the message that is currently
+ * being prepared to send off. If it is NULL, that indicates that we
+ * need to start a new message. */
next_msg = ctx->next_msg;
if (!next_msg) {
- /* Start filling in a new message */
-
- DEBUG2("Start new msg");
-
+ /* We need to start a new message. First check to see if there
+ * is a message available in the list of available messages. If
+ * so, we can just take one. If not, all the messages (there is
+ * a fixed number of them, proportional to the number of
+ * threads) have been sent off to the compressor threads, so we
+ * receive messages from the compressor threads containing
+ * compressed chunks of data.
+ *
+ * We may need to receive multiple messages before one is
+ * actually available to use because messages received that are
+ * *not* for the very next set of chunks to compress must be
+ * buffered until it's time to write those chunks. */
while (list_empty(&ctx->available_msgs)) {
- /* No message available; receive messages, writing
- * compressed data. */
- DEBUG2("No msgs available!");
ret = receive_compressed_chunks(ctx);
if (ret)
return ret;
next_msg->begin_chunk = ctx->next_chunk;
next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
ctx->next_num_chunks - ctx->next_chunk);
- DEBUG2("next_msg {begin_chunk=%"PRIu64", num_chunks=%"PRIu64"}",
- next_msg->begin_chunk, next_msg->num_chunks);
ctx->next_msg = next_msg;
}
+ /* Fill in the next chunk to compress */
next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
- /* Fill in the next chunk to compress */
next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
chunk, chunk_size);
ctx->next_chunk++;
if (++next_chunk_in_msg == next_msg->num_chunks) {
- DEBUG2("Sending message %p", next_msg);
/* Send off an array of chunks to compress */
list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
shared_queue_put(ctx->res_to_compress_queue, next_msg);
{
struct main_writer_thread_ctx *ctx = _ctx;
int ret;
- DEBUG2("finishing");
while (ctx->num_outstanding_messages != 0) {
ret = receive_compressed_chunks(ctx);
if (ret)
{
int ret;
-#ifdef ENABLE_MORE_DEBUG
- DEBUG2("Submit for compression:");
- print_lookup_table_entry(lte, stderr);
-#endif
-
+ /* Read the entire stream @lte, feeding its data chunks to the
+ * compressor threads. Also SHA1-sum the stream; this is required in
+ * the case that @lte is unhashed, and a nice additional verification
+ * when @lte is already hashed. */
sha1_init(&ctx->next_sha_ctx);
ctx->next_chunk = 0;
ctx->next_num_chunks = wim_resource_chunks(lte);
!(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
wimlib_get_compression_type(lte->wim) == ctx->out_ctype))
{
+ /* Stream is too small or isn't being compressed. Process it by
+ * the main thread when we have a chance. We can't necessarily
+ * process it right here, as the main thread could be in the
+ * middle of writing a different stream. */
list_add_tail(<e->write_streams_list, &ctx->serial_streams);
lte->deferred = 1;
ret = 0;
} else {
ret = submit_stream_for_compression(lte, ctx);
}
+ lte->no_progress = 1;
return ret;
}
#endif
}
+/* Equivalent to write_stream_list_serial(), except this takes a @num_threads
+ * parameter and will perform compression using that many threads. Falls
+ * back to write_stream_list_serial() on certain errors, such as a failure to
+ * create the number of threads requested.
+ *
+ * High level description of the algorithm for writing compressed streams in
+ * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes
+ * rather than on full files. The currently executing thread becomes the main
+ * thread and is entirely in charge of reading the data to compress (which may
+ * be in any location understood by the resource code--- such as in an external
+ * file being captured, or in another WIM file from which an image is being
+ * exported) and actually writing the compressed data to the output file.
+ * Additional threads are "compressor threads" and all execute the
+ * compressor_thread_proc, where they repeatedly retrieve buffers of data from
+ * the main thread, compress them, and hand them back to the main thread.
+ *
+ * Certain streams, such as streams that do not need to be compressed (e.g.
+ * input compression type same as output compression type) or streams of very
+ * small size are placed in a list (main_writer_thread_ctx.serial_list) and
+ * handled entirely by the main thread at an appropriate time.
+ *
+ * At any given point in time, multiple streams may be having chunks compressed
+ * concurrently. The stream that the main thread is currently *reading* may be
+ * later in the list that the stream that the main thread is currently
+ * *writing*.
+ */
static int
write_stream_list_parallel(struct list_head *stream_list,
struct wim_lookup_table *lookup_table,
if (nthreads < 1 || nthreads > UINT_MAX) {
WARNING("Could not determine number of processors! Assuming 1");
goto out_serial;
+ } else if (nthreads == 1) {
+ goto out_serial_quiet;
} else {
num_threads = nthreads;
}
}
+ DEBUG("Writing stream list (parallel version, num_threads=%u)",
+ num_threads);
+
progress->write_streams.num_threads = num_threads;
static const size_t MESSAGES_PER_THREAD = 2;
goto out_join;
ret = do_write_stream_list(stream_list, lookup_table,
main_thread_process_next_stream,
- &ctx, NULL, NULL);
+ &ctx, progress_func, progress);
if (ret)
goto out_destroy_ctx;
+
+ /* The main thread has finished reading all streams that are going to be
+ * compressed in parallel, and it now needs to wait for all remaining
+ * chunks to be compressed so that the remaining streams can actually be
+ * written to the output file. Furthermore, any remaining streams that
+ * had processing deferred to the main thread need to be handled. These
+ * tasks are done by the main_writer_thread_finish() function. */
ret = main_writer_thread_finish(&ctx);
out_destroy_ctx:
main_writer_thread_destroy_ctx(&ctx);
return ret;
out_serial:
WARNING("Falling back to single-threaded compression");
+out_serial_quiet:
return write_stream_list_serial(stream_list,
lookup_table,
out_fp,