#include <limits.h>
-#if defined(__WIN32__) && !defined(INVALID_HANDLE_VALUE)
-# define INVALID_HANDLE_VALUE ((HANDLE)(-1))
-#endif
-
/* Chunk table that's located at the beginning of each compressed resource in
* the WIM. (This is not the on-disk format; the on-disk format just has an
* array of offsets.) */
u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE;
size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64);
struct chunk_table *chunk_tab = CALLOC(1, alloc_size);
- int ret;
if (!chunk_tab) {
ERROR("Failed to allocate chunk table for %"PRIu64" byte "
"resource", size);
- ret = WIMLIB_ERR_NOMEM;
- goto out;
+ return WIMLIB_ERR_NOMEM;
}
chunk_tab->file_offset = file_offset;
chunk_tab->num_chunks = num_chunks;
ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
"file resource");
FREE(chunk_tab);
- ret = WIMLIB_ERR_WRITE;
- goto out;
+ return WIMLIB_ERR_WRITE;
}
-
- ret = 0;
*chunk_tab_ret = chunk_tab;
-out:
- return ret;
+ return 0;
}
/*
typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
void *out);
-compress_func_t
+static compress_func_t
get_compress_func(int out_ctype)
{
if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
* Returns 0 on success; nonzero on failure.
*/
static int
-write_wim_resource_chunk(const void *chunk, unsigned chunk_size,
- FILE *out_fp, compress_func_t compress,
- struct chunk_table *chunk_tab)
+write_wim_resource_chunk(const void * restrict chunk,
+ unsigned chunk_size,
+ FILE * restrict out_fp,
+ compress_func_t compress,
+ struct chunk_table * restrict chunk_tab)
{
- const u8 *out_chunk;
+ const void *out_chunk;
unsigned out_chunk_size;
if (compress) {
- u8 *compressed_chunk = alloca(chunk_size);
+ void *compressed_chunk = alloca(chunk_size);
- out_chunk_size = compress(chunk, chunk_size, compressed_chunk);
+ out_chunk_size = (*compress)(chunk, chunk_size, compressed_chunk);
if (out_chunk_size) {
/* Write compressed */
out_chunk = compressed_chunk;
* @compressed_size_p.
*/
static int
-finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
- FILE *out_fp, u64 *compressed_size_p)
+finish_wim_resource_chunk_tab(struct chunk_table * restrict chunk_tab,
+ FILE * restrict out_fp,
+ u64 * restrict compressed_size_p)
{
size_t bytes_written;
if (fseeko(out_fp, chunk_tab->file_offset, SEEK_SET) != 0) {
}
static int
-finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte)
+fflush_and_ftruncate(FILE *out_fp, off_t offset)
+{
+ if (fseeko(out_fp, offset, SEEK_SET) ||
+ fflush(out_fp) ||
+ ftruncate(fileno(out_fp), offset))
+ {
+ ERROR_WITH_ERRNO("Failed to flush and/or truncate "
+ "output WIM file");
+ return WIMLIB_ERR_WRITE;
+ } else {
+ return 0;
+ }
+}
+
+static int
+finalize_and_check_sha1(SHA_CTX * restrict sha_ctx,
+ struct wim_lookup_table_entry * restrict lte)
{
u8 md[SHA1_HASH_SIZE];
sha1_final(md, sha_ctx);
};
static int
-write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx)
+write_resource_cb(const void *restrict chunk, size_t chunk_size,
+ void *restrict _ctx)
{
struct write_resource_ctx *ctx = _ctx;
if (ctx->doing_sha)
sha1_update(&ctx->sha_ctx, chunk, chunk_size);
-
- if (ctx->compress) {
- return write_wim_resource_chunk(chunk, chunk_size,
- ctx->out_fp, ctx->compress,
- ctx->chunk_tab);
- } else {
- if (fwrite(chunk, 1, chunk_size, ctx->out_fp) != chunk_size) {
- ERROR_WITH_ERRNO("Error writing to output WIM");
- return WIMLIB_ERR_WRITE;
- } else {
- return 0;
- }
- }
+ return write_wim_resource_chunk(chunk, chunk_size,
+ ctx->out_fp, ctx->compress,
+ ctx->chunk_tab);
}
/*
flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS;
- if (wim_resource_size(lte) == 0) {
- /* Empty resource; nothing needs to be done, so just return
- * success. */
- return 0;
- }
-
/* Get current position in output WIM */
offset = ftello(out_fp);
if (offset == -1) {
if (new_size >= wim_resource_size(lte)) {
/* Oops! We compressed the resource to larger than the original
* size. Write the resource uncompressed instead. */
- if (fseeko(out_fp, offset, SEEK_SET) ||
- fflush(out_fp) ||
- ftruncate(fileno(out_fp),
- offset + wim_resource_size(lte)))
- {
- ERROR_WITH_ERRNO("Failed to flush and/or truncate "
- "output WIM file");
- ret = WIMLIB_ERR_WRITE;
- goto out_free_chunk_tab;
- }
DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
"writing uncompressed instead",
wim_resource_size(lte), new_size);
+ ret = fflush_and_ftruncate(out_fp, offset);
+ if (ret)
+ goto out_free_chunk_tab;
write_ctx.compress = NULL;
write_ctx.doing_sha = false;
out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
wimlib_assert(size != 0);
q->array = CALLOC(sizeof(q->array[0]), size);
if (!q->array)
- return WIMLIB_ERR_NOMEM;
+ goto err;
q->filled_slots = 0;
q->front = 0;
q->back = size - 1;
q->size = size;
- pthread_mutex_init(&q->lock, NULL);
- pthread_cond_init(&q->msg_avail_cond, NULL);
- pthread_cond_init(&q->space_avail_cond, NULL);
+ if (pthread_mutex_init(&q->lock, NULL)) {
+ ERROR_WITH_ERRNO("Failed to initialize mutex");
+ goto err;
+ }
+ if (pthread_cond_init(&q->msg_avail_cond, NULL)) {
+ ERROR_WITH_ERRNO("Failed to initialize condition variable");
+ goto err_destroy_lock;
+ }
+ if (pthread_cond_init(&q->space_avail_cond, NULL)) {
+ ERROR_WITH_ERRNO("Failed to initialize condition variable");
+ goto err_destroy_msg_avail_cond;
+ }
return 0;
+err_destroy_msg_avail_cond:
+ pthread_cond_destroy(&q->msg_avail_cond);
+err_destroy_lock:
+ pthread_mutex_destroy(&q->lock);
+err:
+ return WIMLIB_ERR_NOMEM;
}
static void
compress_chunks(struct message *msg, compress_func_t compress)
{
for (unsigned i = 0; i < msg->num_chunks; i++) {
- DEBUG2("compress chunk %u of %u", i, msg->num_chunks);
unsigned len = compress(msg->uncompressed_chunks[i],
msg->uncompressed_chunk_sizes[i],
msg->compressed_chunks[i]);
}
}
+struct serial_write_stream_ctx {
+ FILE *out_fp;
+ int out_ctype;
+ int write_resource_flags;
+};
+
+static int
+serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx)
+{
+ struct serial_write_stream_ctx *ctx = _ctx;
+ return write_wim_resource(lte, ctx->out_fp,
+ ctx->out_ctype, <e->output_resource_entry,
+ ctx->write_resource_flags);
+}
+
+/* Write a list of streams, taking into account that some streams may be
+ * duplicates that are checksummed and discarded on the fly, and also delegating
+ * the actual writing of a stream to a function @write_stream_cb, which is
+ * passed the context @write_stream_ctx. */
static int
do_write_stream_list(struct list_head *stream_list,
struct wim_lookup_table *lookup_table,
- FILE *out_fp,
- int out_ctype,
- int write_resource_flags,
+ int (*write_stream_cb)(struct wim_lookup_table_entry *, void *),
+ void *write_stream_ctx,
wimlib_progress_func_t progress_func,
union wimlib_progress_info *progress)
{
struct wim_lookup_table_entry *tmp;
u32 orig_refcnt = lte->out_refcnt;
- ret = hash_unhashed_stream(lte,
- lookup_table,
- &tmp);
+ ret = hash_unhashed_stream(lte, lookup_table, &tmp);
if (ret)
break;
if (tmp != lte) {
* just skip to the next stream. */
DEBUG("Discarding duplicate stream of length %"PRIu64,
wim_resource_size(lte));
+ lte->no_progress = 0;
goto skip_to_progress;
}
}
* while in the latter case this is done because we do not have
* the SHA1 message digest yet. */
wimlib_assert(lte->out_refcnt != 0);
- ret = write_wim_resource(lte,
- out_fp,
- out_ctype,
- <e->output_resource_entry,
- write_resource_flags);
+ lte->deferred = 0;
+ lte->no_progress = 0;
+ ret = (*write_stream_cb)(lte, write_stream_ctx);
if (ret)
break;
+ /* In parallel mode, some streams are deferred for later,
+ * serialized processing; ignore them here. */
+ if (lte->deferred)
+ continue;
if (lte->unhashed) {
list_del(<e->unhashed_list);
lookup_table_insert(lookup_table, lte);
lte->unhashed = 0;
}
skip_to_progress:
- do_write_streams_progress(progress,
- progress_func,
- wim_resource_size(lte));
+ if (!lte->no_progress) {
+ do_write_streams_progress(progress,
+ progress_func,
+ wim_resource_size(lte));
+ }
}
return ret;
}
+static int
+do_write_stream_list_serial(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp,
+ int out_ctype,
+ int write_resource_flags,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress)
+{
+ struct serial_write_stream_ctx ctx = {
+ .out_fp = out_fp,
+ .out_ctype = out_ctype,
+ .write_resource_flags = write_resource_flags,
+ };
+ return do_write_stream_list(stream_list,
+ lookup_table,
+ serial_write_stream,
+ &ctx,
+ progress_func,
+ progress);
+}
+
+static inline int
+write_flags_to_resource_flags(int write_flags)
+{
+ int resource_flags = 0;
+
+ if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+ resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS;
+ return resource_flags;
+}
+
static int
write_stream_list_serial(struct list_head *stream_list,
struct wim_lookup_table *lookup_table,
FILE *out_fp,
int out_ctype,
- int write_flags,
+ int write_resource_flags,
wimlib_progress_func_t progress_func,
union wimlib_progress_info *progress)
{
- int write_resource_flags = 0;
- if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
- write_resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS;
-
+ DEBUG("Writing stream list (serial version)");
progress->write_streams.num_threads = 1;
if (progress_func)
progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
- return do_write_stream_list(stream_list,
- lookup_table,
- out_fp,
- out_ctype,
- write_resource_flags,
- progress_func,
- progress);
+ return do_write_stream_list_serial(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_resource_flags,
+ progress_func,
+ progress);
}
#ifdef ENABLE_MULTITHREADED_COMPRESSION
for (unsigned i = 0; i < msg->num_chunks; i++) {
unsigned chunk_csize = msg->compressed_chunk_sizes[i];
- DEBUG2("Write wim chunk %u of %u (csize = %u)",
- i, msg->num_chunks, chunk_csize);
-
if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp)
!= chunk_csize)
{
struct wim_lookup_table *lookup_table;
FILE *out_fp;
int out_ctype;
+ int write_resource_flags;
struct shared_queue *res_to_compress_queue;
struct shared_queue *compressed_res_queue;
size_t num_messages;
- int write_flags;
wimlib_progress_func_t progress_func;
union wimlib_progress_info *progress;
struct list_head available_msgs;
struct list_head outstanding_streams;
struct list_head serial_streams;
+ size_t num_outstanding_messages;
+
+ SHA_CTX next_sha_ctx;
u64 next_chunk;
u64 next_num_chunks;
+ struct wim_lookup_table_entry *next_lte;
+
struct message *msgs;
struct message *next_msg;
- size_t next_chunk_in_msg;
- struct wim_lookup_table_entry *cur_lte;
struct chunk_table *cur_chunk_tab;
- struct wim_lookup_table_entry *next_lte;
- SHA_CTX sha_ctx;
- u8 next_hash[20];
};
static int
static void
main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx)
{
+ while (ctx->num_outstanding_messages--)
+ shared_queue_get(ctx->compressed_res_queue);
free_messages(ctx->msgs, ctx->num_messages);
FREE(ctx->cur_chunk_tab);
}
-
static int
main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx)
{
* chunks sent off for compression.
*
* The first stream in outstanding_streams is the stream that is
- * currently being written (cur_lte).
+ * currently being written.
*
* The last stream in outstanding_streams is the stream that is
- * currently being read and chunks fed to the compressor threads. */
+ * currently being read and having chunks fed to the compressor threads.
+ * */
INIT_LIST_HEAD(&ctx->outstanding_streams);
+ ctx->num_outstanding_messages = 0;
+
+ ctx->next_msg = NULL;
/* Resources that don't need any chunks compressed are added to this
* list and written directly by the main thread. */
INIT_LIST_HEAD(&ctx->serial_streams);
- ctx->cur_lte = NULL;
+ ctx->cur_chunk_tab = NULL;
+
return 0;
}
int ret;
wimlib_assert(!list_empty(&ctx->outstanding_streams));
+ wimlib_assert(ctx->num_outstanding_messages != 0);
+
+ cur_lte = container_of(ctx->outstanding_streams.next,
+ struct wim_lookup_table_entry,
+ being_compressed_list);
/* Get the next message from the queue and process it.
* The message will contain 1 or more data chunks that have been
* compressed. */
msg = shared_queue_get(ctx->compressed_res_queue);
msg->complete = true;
- cur_lte = ctx->cur_lte;
+ --ctx->num_outstanding_messages;
/* Is this the next chunk in the current resource? If it's not
* (i.e., an earlier chunk in a same or different resource
*
* Otherwise, write all the chunks we can. */
while (cur_lte != NULL &&
- !list_empty(&cur_lte->msg_list) &&
- (msg = container_of(cur_lte->msg_list.next,
- struct message,
- list))->complete)
+ !list_empty(&cur_lte->msg_list)
+ && (msg = container_of(cur_lte->msg_list.next,
+ struct message,
+ list))->complete)
{
+ list_move(&msg->list, &ctx->available_msgs);
if (msg->begin_chunk == 0) {
-
/* This is the first set of chunks. Leave space
* for the chunk table in the output file. */
off_t cur_offset = ftello(ctx->out_fp);
- if (cur_offset == -1) {
- ret = WIMLIB_ERR_WRITE;
- goto out;
- }
+ if (cur_offset == -1)
+ return WIMLIB_ERR_WRITE;
ret = begin_wim_resource_chunk_tab(cur_lte,
ctx->out_fp,
cur_offset,
&ctx->cur_chunk_tab);
if (ret)
- goto out;
+ return ret;
}
/* Write the compressed chunks from the message. */
ret = write_wim_chunks(msg, ctx->out_fp, ctx->cur_chunk_tab);
if (ret)
- goto out;
-
- list_del(&msg->list);
-
- /* This message is available to use for different chunks
- * now. */
- list_add(&msg->list, &ctx->available_msgs);
+ return ret;
/* Was this the last chunk of the stream? If so, finish
* it. */
if (list_empty(&cur_lte->msg_list) &&
msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks)
{
- DEBUG2("Finish wim chunk tab");
u64 res_csize;
+ off_t offset;
+
ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab,
ctx->out_fp,
&res_csize);
if (ret)
- goto out;
+ return ret;
+
+ list_del(&cur_lte->being_compressed_list);
+
+ /* Grab the offset of this stream in the output file
+ * from the chunk table before we free it. */
+ offset = ctx->cur_chunk_tab->file_offset;
+
+ FREE(ctx->cur_chunk_tab);
+ ctx->cur_chunk_tab = NULL;
-#if 0
if (res_csize >= wim_resource_size(cur_lte)) {
/* Oops! We compressed the resource to
* larger than the original size. Write
* the resource uncompressed instead. */
- ret = write_uncompressed_resource_and_truncate(
- cur_lte,
- ctx->out_fp,
- ctx->cur_chunk_tab->file_offset,
- &cur_lte->output_resource_entry);
+ DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
+ "writing uncompressed instead",
+ wim_resource_size(cur_lte), res_csize);
+ ret = fflush_and_ftruncate(ctx->out_fp, offset);
if (ret)
- goto out;
- } else
-#endif
- {
+ return ret;
+ ret = write_wim_resource(cur_lte,
+ ctx->out_fp,
+ WIMLIB_COMPRESSION_TYPE_NONE,
+ &cur_lte->output_resource_entry,
+ ctx->write_resource_flags);
+ if (ret)
+ return ret;
+ } else {
cur_lte->output_resource_entry.size =
res_csize;
cur_lte->resource_entry.original_size;
cur_lte->output_resource_entry.offset =
- ctx->cur_chunk_tab->file_offset;
+ offset;
cur_lte->output_resource_entry.flags =
cur_lte->resource_entry.flags |
WIM_RESHDR_FLAG_COMPRESSED;
}
- do_write_streams_progress(ctx->progress, ctx->progress_func,
- wim_resource_size(cur_lte));
- FREE(ctx->cur_chunk_tab);
- ctx->cur_chunk_tab = NULL;
-
- struct list_head *next = cur_lte->write_streams_list.next;
- list_del(&cur_lte->write_streams_list);
- if (next == &ctx->outstanding_streams)
- cur_lte = NULL;
- else
- cur_lte = container_of(cur_lte->write_streams_list.next,
- struct wim_lookup_table_entry,
- write_streams_list);
+ do_write_streams_progress(ctx->progress,
+ ctx->progress_func,
+ wim_resource_size(cur_lte));
/* Since we just finished writing a stream, write any
* streams that have been added to the serial_streams
* resources that don't need to be compressed because
* the desired compression type is the same as the
* previous compression type). */
- ret = do_write_stream_list(&ctx->serial_streams,
- ctx->lookup_table,
- ctx->out_fp,
- ctx->out_ctype,
- ctx->progress_func,
- ctx->progress,
- 0);
- if (ret)
- goto out;
+ if (!list_empty(&ctx->serial_streams)) {
+ ret = do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fp,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_func,
+ ctx->progress);
+ if (ret)
+ return ret;
+ }
+
+ /* Advance to the next stream to write. */
+ if (list_empty(&ctx->outstanding_streams)) {
+ cur_lte = NULL;
+ } else {
+ cur_lte = container_of(ctx->outstanding_streams.next,
+ struct wim_lookup_table_entry,
+ being_compressed_list);
+ }
}
}
-out:
- ctx->cur_lte = cur_lte;
- return ret;
+ return 0;
}
+/* Called when the main thread has read a new chunk of data. */
static int
main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx)
{
struct main_writer_thread_ctx *ctx = _ctx;
int ret;
struct message *next_msg;
+ u64 next_chunk_in_msg;
- next_msg = ctx->next_msg;
-
- sha1_update(&ctx->sha_ctx, chunk, chunk_size);
+ /* Update SHA1 message digest for the stream currently being read by the
+ * main thread. */
+ sha1_update(&ctx->next_sha_ctx, chunk, chunk_size);
+ /* We send chunks of data to the compressor chunks in batches which we
+ * refer to as "messages". @next_msg is the message that is currently
+ * being prepared to send off. If it is NULL, that indicates that we
+ * need to start a new message. */
+ next_msg = ctx->next_msg;
if (!next_msg) {
- if (list_empty(&ctx->available_msgs)) {
+ /* We need to start a new message. First check to see if there
+ * is a message available in the list of available messages. If
+ * so, we can just take one. If not, all the messages (there is
+ * a fixed number of them, proportional to the number of
+ * threads) have been sent off to the compressor threads, so we
+ * receive messages from the compressor threads containing
+ * compressed chunks of data.
+ *
+ * We may need to receive multiple messages before one is
+ * actually available to use because messages received that are
+ * *not* for the very next set of chunks to compress must be
+ * buffered until it's time to write those chunks. */
+ while (list_empty(&ctx->available_msgs)) {
ret = receive_compressed_chunks(ctx);
if (ret)
return ret;
}
- wimlib_assert(!list_empty(&ctx->available_msgs));
-
next_msg = container_of(ctx->available_msgs.next,
- struct message,
- list);
+ struct message, list);
list_del(&next_msg->list);
next_msg->complete = false;
next_msg->begin_chunk = ctx->next_chunk;
next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG,
ctx->next_num_chunks - ctx->next_chunk);
- ctx->next_chunk_in_msg = 0;
+ ctx->next_msg = next_msg;
}
- wimlib_assert(next_msg != NULL);
- wimlib_assert(ctx->next_chunk_in_msg < next_msg->num_chunks);
+ /* Fill in the next chunk to compress */
+ next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk;
- next_msg->uncompressed_chunk_sizes[ctx->next_chunk_in_msg] = chunk_size;
- memcpy(next_msg->uncompressed_chunks[ctx->next_chunk_in_msg],
+ next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size;
+ memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg],
chunk, chunk_size);
-
- if (++ctx->next_chunk_in_msg == next_msg->num_chunks) {
- shared_queue_put(ctx->res_to_compress_queue,
- next_msg);
+ ctx->next_chunk++;
+ if (++next_chunk_in_msg == next_msg->num_chunks) {
+ /* Send off an array of chunks to compress */
+ list_add_tail(&next_msg->list, &ctx->next_lte->msg_list);
+ shared_queue_put(ctx->res_to_compress_queue, next_msg);
+ ++ctx->num_outstanding_messages;
ctx->next_msg = NULL;
}
return 0;
}
+static int
+main_writer_thread_finish(void *_ctx)
+{
+ struct main_writer_thread_ctx *ctx = _ctx;
+ int ret;
+ while (ctx->num_outstanding_messages != 0) {
+ ret = receive_compressed_chunks(ctx);
+ if (ret)
+ return ret;
+ }
+ wimlib_assert(list_empty(&ctx->outstanding_streams));
+ return do_write_stream_list_serial(&ctx->serial_streams,
+ ctx->lookup_table,
+ ctx->out_fp,
+ ctx->out_ctype,
+ ctx->write_resource_flags,
+ ctx->progress_func,
+ ctx->progress);
+}
+
static int
submit_stream_for_compression(struct wim_lookup_table_entry *lte,
struct main_writer_thread_ctx *ctx)
{
int ret;
- sha1_init(&ctx->sha_ctx);
+ /* Read the entire stream @lte, feeding its data chunks to the
+ * compressor threads. Also SHA1-sum the stream; this is required in
+ * the case that @lte is unhashed, and a nice additional verification
+ * when @lte is already hashed. */
+ sha1_init(&ctx->next_sha_ctx);
+ ctx->next_chunk = 0;
ctx->next_num_chunks = wim_resource_chunks(lte);
+ ctx->next_lte = lte;
+ INIT_LIST_HEAD(<e->msg_list);
+ list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams);
ret = read_resource_prefix(lte, wim_resource_size(lte),
main_writer_thread_cb, ctx, 0);
- if (ret)
- return ret;
- ret = finalize_and_check_sha1(&ctx->sha_ctx, lte);
- if (ret)
- return ret;
+ if (ret == 0) {
+ wimlib_assert(ctx->next_chunk == ctx->next_num_chunks);
+ ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte);
+ }
+ return ret;
}
-/*
- * This function is executed by the main thread when the resources are being
- * compressed in parallel. The main thread is in change of all reading of the
- * uncompressed data and writing of the compressed data. The compressor threads
- * *only* do compression from/to in-memory buffers.
- *
- * Each unit of work given to a compressor thread is up to MAX_CHUNKS_PER_MSG
- * chunks of compressed data to compress, represented in a `struct message'.
- * Each message is passed from the main thread to a worker thread through the
- * res_to_compress_queue, and it is passed back through the
- * compressed_res_queue.
- */
static int
-main_writer_thread_proc(struct main_writer_thread_ctx *ctx)
+main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx)
{
+ struct main_writer_thread_ctx *ctx = _ctx;
int ret;
- struct list_head *stream_list;
- struct wim_lookup_table_entry *lte;
-
- ret = main_writer_thread_init_ctx(ctx);
- if (ret)
- goto out_destroy_ctx;
- stream_list = ctx->stream_list;
- while (!list_empty(stream_list)) {
- lte = container_of(stream_list->next,
- struct wim_lookup_table_entry,
- write_streams_list);
- list_del(<e->write_streams_list);
- if (lte->unhashed && !lte->unique_size) {
- struct wim_lookup_table_entry *tmp;
- u32 orig_refcnt = lte->out_refcnt;
-
- ret = hash_unhashed_stream(lte, ctx->lookup_table, &tmp);
- if (ret)
- goto out_destroy_ctx;
- if (tmp != lte) {
- lte = tmp;
- if (orig_refcnt != tmp->out_refcnt) {
- DEBUG("Discarding duplicate stream of length %"PRIu64,
- wim_resource_size(lte));
- goto skip_to_progress;
- }
- }
- }
-
- if (wim_resource_size(lte) < 1000 ||
- ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
- (lte->resource_location == RESOURCE_IN_WIM &&
- wimlib_get_compression_type(lte->wim) == ctx->out_ctype))
- {
- list_add(<e->write_streams_list,
- &ctx->serial_streams);
- } else {
- ret = submit_stream_for_compression(lte, ctx);
- if (ret)
- goto out_destroy_ctx;
- if (lte->unhashed) {
- list_del(<e->unhashed_list);
- lookup_table_insert(ctx->lookup_table, lte);
- lte->unhashed = 0;
- }
- }
- skip_to_progress:
- do_write_streams_progress(ctx->progress,
- ctx->progress_func,
- wim_resource_size(lte));
- }
-
- while (!list_empty(&ctx->outstanding_streams)) {
- ret = receive_compressed_chunks(ctx);
- if (ret)
- goto out_destroy_ctx;
+ if (wim_resource_size(lte) < 1000 ||
+ ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
+ (lte->resource_location == RESOURCE_IN_WIM &&
+ !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
+ wimlib_get_compression_type(lte->wim) == ctx->out_ctype))
+ {
+ /* Stream is too small or isn't being compressed. Process it by
+ * the main thread when we have a chance. We can't necessarily
+ * process it right here, as the main thread could be in the
+ * middle of writing a different stream. */
+ list_add_tail(<e->write_streams_list, &ctx->serial_streams);
+ lte->deferred = 1;
+ ret = 0;
+ } else {
+ ret = submit_stream_for_compression(lte, ctx);
}
- ret = 0;
-out_destroy_ctx:
- main_writer_thread_destroy_ctx(ctx);
+ lte->no_progress = 1;
return ret;
}
#endif
}
+/* Equivalent to write_stream_list_serial(), except this takes a @num_threads
+ * parameter and will perform compression using that many threads. Falls
+ * back to write_stream_list_serial() on certain errors, such as a failure to
+ * create the number of threads requested.
+ *
+ * High level description of the algorithm for writing compressed streams in
+ * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes
+ * rather than on full files. The currently executing thread becomes the main
+ * thread and is entirely in charge of reading the data to compress (which may
+ * be in any location understood by the resource code--- such as in an external
+ * file being captured, or in another WIM file from which an image is being
+ * exported) and actually writing the compressed data to the output file.
+ * Additional threads are "compressor threads" and all execute the
+ * compressor_thread_proc, where they repeatedly retrieve buffers of data from
+ * the main thread, compress them, and hand them back to the main thread.
+ *
+ * Certain streams, such as streams that do not need to be compressed (e.g.
+ * input compression type same as output compression type) or streams of very
+ * small size are placed in a list (main_writer_thread_ctx.serial_list) and
+ * handled entirely by the main thread at an appropriate time.
+ *
+ * At any given point in time, multiple streams may be having chunks compressed
+ * concurrently. The stream that the main thread is currently *reading* may be
+ * later in the list that the stream that the main thread is currently
+ * *writing*.
+ */
static int
write_stream_list_parallel(struct list_head *stream_list,
struct wim_lookup_table *lookup_table,
FILE *out_fp,
int out_ctype,
- int write_flags,
- unsigned num_threads,
+ int write_resource_flags,
wimlib_progress_func_t progress_func,
- union wimlib_progress_info *progress)
+ union wimlib_progress_info *progress,
+ unsigned num_threads)
{
int ret;
struct shared_queue res_to_compress_queue;
if (nthreads < 1 || nthreads > UINT_MAX) {
WARNING("Could not determine number of processors! Assuming 1");
goto out_serial;
+ } else if (nthreads == 1) {
+ goto out_serial_quiet;
} else {
num_threads = nthreads;
}
}
+ DEBUG("Writing stream list (parallel version, num_threads=%u)",
+ num_threads);
+
progress->write_streams.num_threads = num_threads;
- static const double MESSAGES_PER_THREAD = 2.0;
+ static const size_t MESSAGES_PER_THREAD = 2;
size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD);
DEBUG("Initializing shared queues (queue_size=%zu)", queue_size);
ret = shared_queue_init(&res_to_compress_queue, queue_size);
- if (ret != 0)
+ if (ret)
goto out_serial;
ret = shared_queue_init(&compressed_res_queue, queue_size);
- if (ret != 0)
+ if (ret)
goto out_destroy_res_to_compress_queue;
struct compressor_thread_params params;
}
for (unsigned i = 0; i < num_threads; i++) {
- DEBUG("pthread_create thread %u", i);
+ DEBUG("pthread_create thread %u of %u", i + 1, num_threads);
ret = pthread_create(&compressor_threads[i], NULL,
compressor_thread_proc, ¶ms);
if (ret != 0) {
ret = -1;
ERROR_WITH_ERRNO("Failed to create compressor "
- "thread %u", i);
+ "thread %u of %u",
+ i + 1, num_threads);
num_threads = i;
goto out_join;
}
progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
struct main_writer_thread_ctx ctx;
- memset(&ctx, 0, sizeof(ctx));
ctx.stream_list = stream_list;
ctx.lookup_table = lookup_table;
ctx.out_fp = out_fp;
ctx.res_to_compress_queue = &res_to_compress_queue;
ctx.compressed_res_queue = &compressed_res_queue;
ctx.num_messages = queue_size;
- ctx.write_flags = write_flags;
+ ctx.write_resource_flags = write_resource_flags | WIMLIB_RESOURCE_FLAG_THREADSAFE_READ;
ctx.progress_func = progress_func;
ctx.progress = progress;
- ret = main_writer_thread_proc(&ctx);
+ ret = main_writer_thread_init_ctx(&ctx);
+ if (ret)
+ goto out_join;
+ ret = do_write_stream_list(stream_list, lookup_table,
+ main_thread_process_next_stream,
+ &ctx, progress_func, progress);
+ if (ret)
+ goto out_destroy_ctx;
+
+ /* The main thread has finished reading all streams that are going to be
+ * compressed in parallel, and it now needs to wait for all remaining
+ * chunks to be compressed so that the remaining streams can actually be
+ * written to the output file. Furthermore, any remaining streams that
+ * had processing deferred to the main thread need to be handled. These
+ * tasks are done by the main_writer_thread_finish() function. */
+ ret = main_writer_thread_finish(&ctx);
+out_destroy_ctx:
+ main_writer_thread_destroy_ctx(&ctx);
out_join:
for (unsigned i = 0; i < num_threads; i++)
shared_queue_put(&res_to_compress_queue, NULL);
for (unsigned i = 0; i < num_threads; i++) {
if (pthread_join(compressor_threads[i], NULL)) {
WARNING_WITH_ERRNO("Failed to join compressor "
- "thread %u", i);
+ "thread %u of %u",
+ i + 1, num_threads);
}
}
FREE(compressor_threads);
return ret;
out_serial:
WARNING("Falling back to single-threaded compression");
+out_serial_quiet:
return write_stream_list_serial(stream_list,
lookup_table,
out_fp,
out_ctype,
- write_flags,
+ write_resource_flags,
progress_func,
progress);
u64 total_compression_bytes = 0;
union wimlib_progress_info progress;
int ret;
+ int write_resource_flags;
if (list_empty(stream_list))
return 0;
+ write_resource_flags = write_flags_to_resource_flags(write_flags);
+
/* Calculate the total size of the streams to be written. Note: this
* will be the uncompressed size, as we may not know the compressed size
* yet, and also this will assume that every unhashed stream will be
total_bytes += wim_resource_size(lte);
if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE
&& (wim_resource_compression_type(lte) != out_ctype ||
- (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)))
+ (write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS)))
{
total_compression_bytes += wim_resource_size(lte);
}
lookup_table,
out_fp,
out_ctype,
- write_flags,
- num_threads,
+ write_resource_flags,
progress_func,
- &progress);
+ &progress,
+ num_threads);
else
#endif
ret = write_stream_list_serial(stream_list,
lookup_table,
out_fp,
out_ctype,
- write_flags,
+ write_resource_flags,
progress_func,
&progress);
return ret;
{
struct stream_size_table *tab = _tab;
size_t pos;
- struct wim_lookup_table_entry *hashed_lte;
+ struct wim_lookup_table_entry *same_size_lte;
struct hlist_node *tmp;
pos = hash_u64(wim_resource_size(lte)) % tab->capacity;
lte->unique_size = 1;
- hlist_for_each_entry(hashed_lte, tmp, &tab->array[pos], hash_list_2) {
- if (wim_resource_size(hashed_lte) == wim_resource_size(lte)) {
+ hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) {
+ if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) {
lte->unique_size = 0;
- hashed_lte->unique_size = 0;
+ same_size_lte->unique_size = 0;
break;
}
}
struct stream_size_table stream_size_tab;
};
+/* First phase of preparing streams for an in-place overwrite. This is called
+ * on all streams, both hashed and unhashed, except the metadata resources. */
static int
-lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *arg)
+lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args)
{
- struct lte_overwrite_prepare_args *args = arg;
+ struct lte_overwrite_prepare_args *args = _args;
- if (lte->resource_location == RESOURCE_IN_WIM &&
- lte->wim == args->wim)
- {
+ wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA));
+ if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim)
+ list_add_tail(<e->write_streams_list, &args->stream_list);
+ lte->out_refcnt = lte->refcnt;
+ stream_size_table_insert(lte, &args->stream_size_tab);
+ return 0;
+}
+
+/* Second phase of preparing streams for an in-place overwrite. This is called
+ * on existing metadata resources and hashed streams, but not unhashed streams.
+ *
+ * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so
+ * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as
+ * the latter uses lte->hash_list_2, while the former expects to set
+ * lte->output_resource_entry. */
+static int
+lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args)
+{
+ struct lte_overwrite_prepare_args *args = _args;
+
+ if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) {
/* We can't do an in place overwrite on the WIM if there are
* streams after the XML data. */
if (lte->resource_entry.offset +
#endif
return WIMLIB_ERR_RESOURCE_ORDER;
}
- } else {
- wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA));
- list_add_tail(<e->write_streams_list, &args->stream_list);
- }
- lte->out_refcnt = lte->refcnt;
- stream_size_table_insert(lte, &args->stream_size_tab);
- return 0;
-}
-
-static int
-lte_set_output_res_entry(struct wim_lookup_table_entry *lte, void *_wim)
-{
- if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == _wim) {
copy_resource_entry(<e->output_resource_entry,
<e->resource_entry);
}
{
int ret;
struct lte_overwrite_prepare_args args;
+ unsigned i;
args.wim = wim;
args.end_offset = end_offset;
return ret;
INIT_LIST_HEAD(&args.stream_list);
- for (int i = 0; i < wim->hdr.image_count; i++) {
+ for (i = 0; i < wim->hdr.image_count; i++) {
struct wim_image_metadata *imd;
struct wim_lookup_table_entry *lte;
imd = wim->image_metadata[i];
- image_for_each_unhashed_stream(lte, imd) {
- ret = lte_overwrite_prepare(lte, &args);
- if (ret)
- goto out_destroy_stream_size_table;
- }
+ image_for_each_unhashed_stream(lte, imd)
+ lte_overwrite_prepare(lte, &args);
}
- ret = for_lookup_table_entry(wim->lookup_table,
- lte_overwrite_prepare, &args);
- if (ret)
- goto out_destroy_stream_size_table;
-
- for (int i = 0; i < wim->hdr.image_count; i++)
- lte_set_output_res_entry(wim->image_metadata[i]->metadata_lte,
- wim);
- for_lookup_table_entry(wim->lookup_table, lte_set_output_res_entry, wim);
+ for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args);
list_transfer(&args.stream_list, stream_list);
+
+ for (i = 0; i < wim->hdr.image_count; i++) {
+ ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte,
+ &args);
+ if (ret)
+ goto out_destroy_stream_size_table;
+ }
+ ret = for_lookup_table_entry(wim->lookup_table,
+ lte_overwrite_prepare_2, &args);
out_destroy_stream_size_table:
destroy_stream_size_table(&args.stream_size_tab);
return ret;
int ret;
struct list_head stream_list;
off_t old_wim_end;
+ u64 old_lookup_table_end, old_xml_begin, old_xml_end;
DEBUG("Overwriting `%"TS"' in-place", w->filename);
* data, and that there are no stream resources, metadata resources, or
* lookup tables after the XML data. Otherwise, these data would be
* overwritten. */
- if (w->hdr.integrity.offset != 0 &&
- w->hdr.integrity.offset < w->hdr.xml_res_entry.offset) {
+ old_xml_begin = w->hdr.xml_res_entry.offset;
+ old_xml_end = old_xml_begin + w->hdr.xml_res_entry.size;
+ old_lookup_table_end = w->hdr.lookup_table_res_entry.offset +
+ w->hdr.lookup_table_res_entry.size;
+ if (w->hdr.integrity.offset != 0 && w->hdr.integrity.offset < old_xml_end) {
ERROR("Didn't expect the integrity table to be before the XML data");
return WIMLIB_ERR_RESOURCE_ORDER;
}
- if (w->hdr.lookup_table_res_entry.offset > w->hdr.xml_res_entry.offset) {
+ if (old_lookup_table_end > old_xml_begin) {
ERROR("Didn't expect the lookup table to be after the XML data");
return WIMLIB_ERR_RESOURCE_ORDER;
}
-
- if (w->hdr.integrity.offset)
- old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size;
- else
- old_wim_end = w->hdr.xml_res_entry.offset + w->hdr.xml_res_entry.size;
-
+ /* Set @old_wim_end, which indicates the point beyond which we don't
+ * allow any file and metadata resources to appear without returning
+ * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise
+ * overwrite these resources). */
if (!w->deletion_occurred && !any_images_modified(w)) {
/* If no images have been modified and no images have been
- * deleted, a new lookup table does not need to be written. */
+ * deleted, a new lookup table does not need to be written. We
+ * shall write the new XML data and optional integrity table
+ * immediately after the lookup table. Note that this may
+ * overwrite an existing integrity table. */
DEBUG("Skipping writing lookup table "
"(no images modified or deleted)");
- old_wim_end = w->hdr.lookup_table_res_entry.offset +
- w->hdr.lookup_table_res_entry.size;
+ old_wim_end = old_lookup_table_end;
write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE |
WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML;
+ } else if (w->hdr.integrity.offset) {
+ /* Old WIM has an integrity table; begin writing new streams
+ * after it. */
+ old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size;
+ } else {
+ /* No existing integrity table; begin writing new streams after
+ * the old XML data. */
+ old_wim_end = old_xml_end;
}
+
ret = prepare_streams_for_overwrite(w, old_wim_end, &stream_list);
if (ret)
return ret;
ret = lock_wim(w, w->out_fp);
if (ret) {
- fclose(w->out_fp);
- w->out_fp = NULL;
+ close_wim_writable(w);
return ret;
}
if (fseeko(w->out_fp, old_wim_end, SEEK_SET) != 0) {
ERROR_WITH_ERRNO("Can't seek to end of WIM");
- fclose(w->out_fp);
- w->out_fp = NULL;
+ close_wim_writable(w);
w->wim_locked = 0;
return WIMLIB_ERR_WRITE;
}
num_threads,
progress_func);
if (ret)
- goto out_ftruncate;
+ goto out_truncate;
for (int i = 0; i < w->hdr.image_count; i++) {
if (w->image_metadata[i]->modified) {
select_wim_image(w, i + 1);
ret = write_metadata_resource(w);
if (ret)
- goto out_ftruncate;
+ goto out_truncate;
}
}
write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE;
ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags,
progress_func);
-out_ftruncate:
+out_truncate:
close_wim_writable(w);
if (ret != 0 && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) {
WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)",