+ if (!chunk_tab) {
+ ERROR("Failed to allocate chunk table for %"PRIu64" byte "
+ "resource", size);
+ ret = WIMLIB_ERR_NOMEM;
+ goto out;
+ }
+ chunk_tab->file_offset = file_offset;
+ chunk_tab->num_chunks = num_chunks;
+ chunk_tab->original_resource_size = size;
+ chunk_tab->bytes_per_chunk_entry = (size >= (1ULL << 32)) ? 8 : 4;
+ chunk_tab->table_disk_size = chunk_tab->bytes_per_chunk_entry *
+ (num_chunks - 1);
+ chunk_tab->cur_offset = 0;
+ chunk_tab->cur_offset_p = chunk_tab->offsets;
+
+ if (fwrite(chunk_tab, 1, chunk_tab->table_disk_size, out_fp) !=
+ chunk_tab->table_disk_size) {
+ ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
+ "file resource");
+ FREE(chunk_tab);
+ ret = WIMLIB_ERR_WRITE;
+ goto out;
+ }
+
+ ret = 0;
+ *chunk_tab_ret = chunk_tab;
+out:
+ return ret;
+}
+
+/*
+ * compress_func_t- Pointer to a function to compresses a chunk
+ * of a WIM resource. This may be either
+ * wimlib_xpress_compress() (xpress-compress.c) or
+ * wimlib_lzx_compress() (lzx-compress.c).
+ *
+ * @chunk: Uncompressed data of the chunk.
+ * @chunk_size: Size of the uncompressed chunk, in bytes.
+ * @out: Pointer to output buffer of size at least (@chunk_size - 1) bytes.
+ *
+ * Returns the size of the compressed data written to @out in bytes, or 0 if the
+ * data could not be compressed to (@chunk_size - 1) bytes or fewer.
+ *
+ * As a special requirement, the compression code is optimized for the WIM
+ * format and therefore requires (@chunk_size <= 32768).
+ *
+ * As another special requirement, the compression code will read up to 8 bytes
+ * off the end of the @chunk array for performance reasons. The values of these
+ * bytes will not affect the output of the compression, but the calling code
+ * must make sure that the buffer holding the uncompressed chunk is actually at
+ * least (@chunk_size + 8) bytes, or at least that these extra bytes are in
+ * mapped memory that will not cause a memory access violation if accessed.
+ */
+typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size,
+ void *out);
+
+compress_func_t
+get_compress_func(int out_ctype)
+{
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX)
+ return wimlib_lzx_compress;
+ else
+ return wimlib_xpress_compress;
+}
+
+/*
+ * Writes a chunk of a WIM resource to an output file.
+ *
+ * @chunk: Uncompressed data of the chunk.
+ * @chunk_size: Size of the chunk (<= WIM_CHUNK_SIZE)
+ * @out_fp: FILE * to write the chunk to.
+ * @compress: Compression function to use (NULL if writing uncompressed
+ * data).
+ * @chunk_tab: Pointer to chunk table being created. It is updated with the
+ * offset of the chunk we write.
+ *
+ * Returns 0 on success; nonzero on failure.
+ */
+static int
+write_wim_resource_chunk(const void *chunk, unsigned chunk_size,
+ FILE *out_fp, compress_func_t compress,
+ struct chunk_table *chunk_tab)
+{
+ const u8 *out_chunk;
+ unsigned out_chunk_size;
+ if (compress) {
+ u8 *compressed_chunk = alloca(chunk_size);
+
+ out_chunk_size = compress(chunk, chunk_size, compressed_chunk);
+ if (out_chunk_size) {
+ /* Write compressed */
+ out_chunk = compressed_chunk;
+ } else {
+ /* Write uncompressed */
+ out_chunk = chunk;
+ out_chunk_size = chunk_size;
+ }
+ *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
+ chunk_tab->cur_offset += out_chunk_size;
+ } else {
+ /* Write uncompressed */
+ out_chunk = chunk;
+ out_chunk_size = chunk_size;
+ }
+ if (fwrite(out_chunk, 1, out_chunk_size, out_fp) != out_chunk_size) {
+ ERROR_WITH_ERRNO("Failed to write WIM resource chunk");
+ return WIMLIB_ERR_WRITE;
+ }
+ return 0;
+}
+
+/*
+ * Finishes a WIM chunk table and writes it to the output file at the correct
+ * offset.
+ *
+ * The final size of the full compressed resource is returned in the
+ * @compressed_size_p.
+ */
+static int
+finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab,
+ FILE *out_fp, u64 *compressed_size_p)
+{
+ size_t bytes_written;
+ if (fseeko(out_fp, chunk_tab->file_offset, SEEK_SET) != 0) {
+ ERROR_WITH_ERRNO("Failed to seek to byte %"PRIu64" of output "
+ "WIM file", chunk_tab->file_offset);
+ return WIMLIB_ERR_WRITE;
+ }
+
+ if (chunk_tab->bytes_per_chunk_entry == 8) {
+ array_cpu_to_le64(chunk_tab->offsets, chunk_tab->num_chunks);
+ } else {
+ for (u64 i = 0; i < chunk_tab->num_chunks; i++)
+ ((u32*)chunk_tab->offsets)[i] =
+ cpu_to_le32(chunk_tab->offsets[i]);
+ }
+ bytes_written = fwrite((u8*)chunk_tab->offsets +
+ chunk_tab->bytes_per_chunk_entry,
+ 1, chunk_tab->table_disk_size, out_fp);
+ if (bytes_written != chunk_tab->table_disk_size) {
+ ERROR_WITH_ERRNO("Failed to write chunk table in compressed "
+ "file resource");
+ return WIMLIB_ERR_WRITE;
+ }
+ if (fseeko(out_fp, 0, SEEK_END) != 0) {
+ ERROR_WITH_ERRNO("Failed to seek to end of output WIM file");
+ return WIMLIB_ERR_WRITE;
+ }
+ *compressed_size_p = chunk_tab->cur_offset + chunk_tab->table_disk_size;
+ return 0;
+}
+
+static int
+finalize_and_check_sha1(SHA_CTX *sha_ctx, struct wim_lookup_table_entry *lte)
+{
+ u8 md[SHA1_HASH_SIZE];
+ sha1_final(md, sha_ctx);
+ if (lte->unhashed) {
+ copy_hash(lte->hash, md);
+ } else if (!hashes_equal(md, lte->hash)) {
+ ERROR("WIM resource has incorrect hash!");
+ if (lte_filename_valid(lte)) {
+ ERROR("We were reading it from \"%"TS"\"; maybe "
+ "it changed while we were reading it.",
+ lte->file_on_disk);
+ }
+ return WIMLIB_ERR_INVALID_RESOURCE_HASH;
+ }
+ return 0;
+}
+
+
+struct write_resource_ctx {
+ compress_func_t compress;
+ struct chunk_table *chunk_tab;
+ FILE *out_fp;
+ SHA_CTX sha_ctx;
+ bool doing_sha;
+};
+
+static int
+write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx)
+{
+ struct write_resource_ctx *ctx = _ctx;
+
+ if (ctx->doing_sha)
+ sha1_update(&ctx->sha_ctx, chunk, chunk_size);
+
+ if (ctx->compress) {
+ return write_wim_resource_chunk(chunk, chunk_size,
+ ctx->out_fp, ctx->compress,
+ ctx->chunk_tab);
+ } else {
+ if (fwrite(chunk, 1, chunk_size, ctx->out_fp) != chunk_size) {
+ ERROR_WITH_ERRNO("Error writing to output WIM");
+ return WIMLIB_ERR_WRITE;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/*
+ * Write a resource to an output WIM.
+ *
+ * @lte: Lookup table entry for the resource, which could be in another WIM,
+ * in an external file, or in another location.
+ *
+ * @out_fp: FILE * opened to the output WIM.
+ *
+ * @out_ctype: One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate
+ * which compression algorithm to use.
+ *
+ * @out_res_entry: On success, this is filled in with the offset, flags,
+ * compressed size, and uncompressed size of the resource
+ * in the output WIM.
+ *
+ * @flags: WIMLIB_RESOURCE_FLAG_RECOMPRESS to force data to be recompressed
+ * even if it could otherwise be copied directly from the input.
+ *
+ * Additional notes: The SHA1 message digest of the uncompressed data is
+ * calculated (except when doing a raw copy --- see below). If the @unhashed
+ * flag is set on the lookup table entry, this message digest is simply copied
+ * to it; otherwise, the message digest is compared with the existing one, and
+ * the function will fail if they do not match.
+ */
+int
+write_wim_resource(struct wim_lookup_table_entry *lte,
+ FILE *out_fp, int out_ctype,
+ struct resource_entry *out_res_entry,
+ int flags)
+{
+ struct write_resource_ctx write_ctx;
+ u64 read_size;
+ u64 new_size;
+ off_t offset;
+ int ret;
+
+ flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS;
+
+ /* Get current position in output WIM */
+ offset = ftello(out_fp);
+ if (offset == -1) {
+ ERROR_WITH_ERRNO("Can't get position in output WIM");
+ return WIMLIB_ERR_WRITE;
+ }
+
+ /* If we are not forcing the data to be recompressed, and the input
+ * resource is located in a WIM with the same compression type as that
+ * desired other than no compression, we can simply copy the compressed
+ * data without recompressing it. This also means we must skip
+ * calculating the SHA1, as we never will see the uncompressed data. */
+ if (!(flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) &&
+ lte->resource_location == RESOURCE_IN_WIM &&
+ out_ctype != WIMLIB_COMPRESSION_TYPE_NONE &&
+ wimlib_get_compression_type(lte->wim) == out_ctype)
+ {
+ flags |= WIMLIB_RESOURCE_FLAG_RAW;
+ write_ctx.doing_sha = false;
+ read_size = lte->resource_entry.size;
+ } else {
+ write_ctx.doing_sha = true;
+ sha1_init(&write_ctx.sha_ctx);
+ read_size = lte->resource_entry.original_size;
+ }
+
+ /* Initialize the chunk table and set the compression function if
+ * compressing the resource. */
+ if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE ||
+ (flags & WIMLIB_RESOURCE_FLAG_RAW)) {
+ write_ctx.compress = NULL;
+ write_ctx.chunk_tab = NULL;
+ } else {
+ write_ctx.compress = get_compress_func(out_ctype);
+ ret = begin_wim_resource_chunk_tab(lte, out_fp,
+ offset,
+ &write_ctx.chunk_tab);
+ if (ret)
+ return ret;
+ }
+
+ /* Write the entire resource by reading the entire resource and feeding
+ * the data through the write_resource_cb function. */
+ write_ctx.out_fp = out_fp;
+try_write_again:
+ ret = read_resource_prefix(lte, read_size,
+ write_resource_cb, &write_ctx, flags);
+ if (ret)
+ goto out_free_chunk_tab;
+
+ /* Verify SHA1 message digest of the resource, or set the hash for the
+ * first time. */
+ if (write_ctx.doing_sha) {
+ ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte);
+ if (ret)
+ goto out_free_chunk_tab;
+ }
+
+ out_res_entry->flags = lte->resource_entry.flags;
+ out_res_entry->original_size = wim_resource_size(lte);
+ out_res_entry->offset = offset;
+ if (flags & WIMLIB_RESOURCE_FLAG_RAW) {
+ /* Doing a raw write: The new compressed size is the same as
+ * the compressed size in the other WIM. */
+ new_size = lte->resource_entry.size;
+ } else if (out_ctype == WIMLIB_COMPRESSION_TYPE_NONE) {
+ /* Using WIMLIB_COMPRESSION_TYPE_NONE: The new compressed size
+ * is the original size. */
+ new_size = lte->resource_entry.original_size;
+ out_res_entry->flags &= ~WIM_RESHDR_FLAG_COMPRESSED;
+ } else {
+ /* Using a different compression type: Call
+ * finish_wim_resource_chunk_tab() and it will provide the new
+ * compressed size. */
+ ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, out_fp,
+ &new_size);
+ if (ret)
+ goto out_free_chunk_tab;
+ if (new_size >= wim_resource_size(lte)) {
+ /* Oops! We compressed the resource to larger than the original
+ * size. Write the resource uncompressed instead. */
+ if (fseeko(out_fp, offset, SEEK_SET) ||
+ fflush(out_fp) ||
+ ftruncate(fileno(out_fp),
+ offset + wim_resource_size(lte)))
+ {
+ ERROR_WITH_ERRNO("Failed to flush and/or truncate "
+ "output WIM file");
+ ret = WIMLIB_ERR_WRITE;
+ goto out_free_chunk_tab;
+ }
+ DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; "
+ "writing uncompressed instead",
+ wim_resource_size(lte), new_size);
+ write_ctx.compress = NULL;
+ write_ctx.doing_sha = false;
+ out_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
+ goto try_write_again;
+ }
+ out_res_entry->flags |= WIM_RESHDR_FLAG_COMPRESSED;
+ }
+ out_res_entry->size = new_size;
+ ret = 0;
+out_free_chunk_tab:
+ FREE(write_ctx.chunk_tab);
+ return ret;
+}
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+
+/* Blocking shared queue (solves the producer-consumer problem) */
+struct shared_queue {
+ unsigned size;
+ unsigned front;
+ unsigned back;
+ unsigned filled_slots;
+ void **array;
+ pthread_mutex_t lock;
+ pthread_cond_t msg_avail_cond;
+ pthread_cond_t space_avail_cond;
+};
+
+static int
+shared_queue_init(struct shared_queue *q, unsigned size)
+{
+ wimlib_assert(size != 0);
+ q->array = CALLOC(sizeof(q->array[0]), size);
+ if (!q->array)
+ return WIMLIB_ERR_NOMEM;
+ q->filled_slots = 0;
+ q->front = 0;
+ q->back = size - 1;
+ q->size = size;
+ pthread_mutex_init(&q->lock, NULL);
+ pthread_cond_init(&q->msg_avail_cond, NULL);
+ pthread_cond_init(&q->space_avail_cond, NULL);
+ return 0;
+}
+
+static void
+shared_queue_destroy(struct shared_queue *q)
+{
+ FREE(q->array);
+ pthread_mutex_destroy(&q->lock);
+ pthread_cond_destroy(&q->msg_avail_cond);
+ pthread_cond_destroy(&q->space_avail_cond);
+}
+
+static void
+shared_queue_put(struct shared_queue *q, void *obj)
+{
+ pthread_mutex_lock(&q->lock);
+ while (q->filled_slots == q->size)
+ pthread_cond_wait(&q->space_avail_cond, &q->lock);
+
+ q->back = (q->back + 1) % q->size;
+ q->array[q->back] = obj;
+ q->filled_slots++;
+
+ pthread_cond_broadcast(&q->msg_avail_cond);
+ pthread_mutex_unlock(&q->lock);
+}
+
+static void *
+shared_queue_get(struct shared_queue *q)
+{
+ void *obj;
+
+ pthread_mutex_lock(&q->lock);
+ while (q->filled_slots == 0)
+ pthread_cond_wait(&q->msg_avail_cond, &q->lock);
+
+ obj = q->array[q->front];
+ q->array[q->front] = NULL;
+ q->front = (q->front + 1) % q->size;
+ q->filled_slots--;
+
+ pthread_cond_broadcast(&q->space_avail_cond);
+ pthread_mutex_unlock(&q->lock);
+ return obj;
+}
+
+struct compressor_thread_params {
+ struct shared_queue *res_to_compress_queue;
+ struct shared_queue *compressed_res_queue;
+ compress_func_t compress;
+};
+
+#define MAX_CHUNKS_PER_MSG 2
+
+struct message {
+ struct wim_lookup_table_entry *lte;
+ u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG];
+ u8 *out_compressed_chunks[MAX_CHUNKS_PER_MSG];
+ u8 *compressed_chunks[MAX_CHUNKS_PER_MSG];
+ unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
+ unsigned compressed_chunk_sizes[MAX_CHUNKS_PER_MSG];
+ unsigned num_chunks;
+ struct list_head list;
+ bool complete;
+ u64 begin_chunk;
+};
+
+static void
+compress_chunks(struct message *msg, compress_func_t compress)
+{
+ for (unsigned i = 0; i < msg->num_chunks; i++) {
+ DEBUG2("compress chunk %u of %u", i, msg->num_chunks);
+ unsigned len = compress(msg->uncompressed_chunks[i],
+ msg->uncompressed_chunk_sizes[i],
+ msg->compressed_chunks[i]);
+ if (len) {
+ /* To be written compressed */
+ msg->out_compressed_chunks[i] = msg->compressed_chunks[i];
+ msg->compressed_chunk_sizes[i] = len;
+ } else {
+ /* To be written uncompressed */
+ msg->out_compressed_chunks[i] = msg->uncompressed_chunks[i];
+ msg->compressed_chunk_sizes[i] = msg->uncompressed_chunk_sizes[i];
+
+ }
+ }
+}
+
+/* Compressor thread routine. This is a lot simpler than the main thread
+ * routine: just repeatedly get a group of chunks from the
+ * res_to_compress_queue, compress them, and put them in the
+ * compressed_res_queue. A NULL pointer indicates that the thread should stop.
+ * */
+static void *
+compressor_thread_proc(void *arg)
+{
+ struct compressor_thread_params *params = arg;
+ struct shared_queue *res_to_compress_queue = params->res_to_compress_queue;
+ struct shared_queue *compressed_res_queue = params->compressed_res_queue;
+ compress_func_t compress = params->compress;
+ struct message *msg;
+
+ DEBUG("Compressor thread ready");
+ while ((msg = shared_queue_get(res_to_compress_queue)) != NULL) {
+ compress_chunks(msg, compress);
+ shared_queue_put(compressed_res_queue, msg);
+ }
+ DEBUG("Compressor thread terminating");
+ return NULL;
+}
+#endif /* ENABLE_MULTITHREADED_COMPRESSION */
+
+static void
+do_write_streams_progress(union wimlib_progress_info *progress,
+ wimlib_progress_func_t progress_func,
+ uint64_t size_added)
+{
+ progress->write_streams.completed_bytes += size_added;
+ progress->write_streams.completed_streams++;
+ if (progress_func &&
+ progress->write_streams.completed_bytes >= progress->write_streams._private)
+ {
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS,
+ progress);
+ if (progress->write_streams._private == progress->write_streams.total_bytes) {
+ progress->write_streams._private = ~0;
+ } else {
+ progress->write_streams._private =
+ min(progress->write_streams.total_bytes,
+ progress->write_streams.completed_bytes +
+ progress->write_streams.total_bytes / 100);
+ }
+ }
+}
+
+static int
+do_write_stream_list(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp,
+ int out_ctype,
+ int write_resource_flags,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress)
+{
+ int ret = 0;
+ struct wim_lookup_table_entry *lte;
+
+ /* For each stream in @stream_list ... */
+ while (!list_empty(stream_list)) {
+ lte = container_of(stream_list->next,
+ struct wim_lookup_table_entry,
+ write_streams_list);
+ list_del(<e->write_streams_list);
+ if (lte->unhashed && !lte->unique_size) {
+ /* Unhashed stream that shares a size with some other
+ * stream in the WIM we are writing. The stream must be
+ * checksummed to know if we need to write it or not. */
+ struct wim_lookup_table_entry *tmp;
+ u32 orig_refcnt = lte->out_refcnt;
+
+ ret = hash_unhashed_stream(lte,
+ lookup_table,
+ &tmp);
+ if (ret)
+ break;
+ if (tmp != lte) {
+ lte = tmp;
+ /* We found a duplicate stream. */
+ if (orig_refcnt != tmp->out_refcnt) {
+ /* We have already written, or are going
+ * to write, the duplicate stream. So
+ * just skip to the next stream. */
+ DEBUG("Discarding duplicate stream of length %"PRIu64,
+ wim_resource_size(lte));
+ goto skip_to_progress;
+ }
+ }
+ }
+
+ /* Here, @lte is either a hashed stream or an unhashed stream
+ * with a unique size. In either case we know that the stream
+ * has to be written. In either case the SHA1 message digest
+ * will be calculated over the stream while writing it; however,
+ * in the former case this is done merely to check the data,
+ * while in the latter case this is done because we do not have
+ * the SHA1 message digest yet. */
+ wimlib_assert(lte->out_refcnt != 0);
+ ret = write_wim_resource(lte,
+ out_fp,
+ out_ctype,
+ <e->output_resource_entry,
+ write_resource_flags);
+ if (ret)
+ break;
+ if (lte->unhashed) {
+ list_del(<e->unhashed_list);
+ lookup_table_insert(lookup_table, lte);
+ lte->unhashed = 0;
+ }
+ skip_to_progress:
+ do_write_streams_progress(progress,
+ progress_func,
+ wim_resource_size(lte));
+ }
+ return ret;
+}
+
+static int
+write_stream_list_serial(struct list_head *stream_list,
+ struct wim_lookup_table *lookup_table,
+ FILE *out_fp,
+ int out_ctype,
+ int write_flags,
+ wimlib_progress_func_t progress_func,
+ union wimlib_progress_info *progress)
+{
+ int write_resource_flags = 0;
+ if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS)
+ write_resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS;
+
+ progress->write_streams.num_threads = 1;
+ if (progress_func)
+ progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress);
+ return do_write_stream_list(stream_list,
+ lookup_table,
+ out_fp,
+ out_ctype,
+ write_resource_flags,
+ progress_func,
+ progress);
+}
+
+#ifdef ENABLE_MULTITHREADED_COMPRESSION
+static int
+write_wim_chunks(struct message *msg, FILE *out_fp,
+ struct chunk_table *chunk_tab)
+{
+ for (unsigned i = 0; i < msg->num_chunks; i++) {
+ unsigned chunk_csize = msg->compressed_chunk_sizes[i];
+
+ DEBUG2("Write wim chunk %u of %u (csize = %u)",
+ i, msg->num_chunks, chunk_csize);
+
+ if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp)
+ != chunk_csize)
+ {
+ ERROR_WITH_ERRNO("Failed to write WIM chunk");
+ return WIMLIB_ERR_WRITE;
+ }
+
+ *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset;
+ chunk_tab->cur_offset += chunk_csize;
+ }
+ return 0;
+}
+
+struct main_writer_thread_ctx {
+ struct list_head *stream_list;
+ struct wim_lookup_table *lookup_table;
+ FILE *out_fp;
+ int out_ctype;
+ struct shared_queue *res_to_compress_queue;
+ struct shared_queue *compressed_res_queue;
+ size_t num_messages;
+ int write_flags;
+ wimlib_progress_func_t progress_func;
+ union wimlib_progress_info *progress;
+
+ struct list_head available_msgs;
+ struct list_head outstanding_streams;
+ struct list_head serial_streams;
+ u64 next_chunk;
+ u64 next_num_chunks;
+ struct message *msgs;
+ struct message *next_msg;
+ size_t next_chunk_in_msg;
+ struct wim_lookup_table_entry *cur_lte;
+ struct chunk_table *cur_chunk_tab;
+ struct wim_lookup_table_entry *next_lte;
+ SHA_CTX sha_ctx;
+ u8 next_hash[20];
+};
+
+static int
+init_message(struct message *msg)
+{
+ for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
+ msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
+ msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE);
+ if (msg->compressed_chunks[i] == NULL ||
+ msg->uncompressed_chunks[i] == NULL)
+ return WIMLIB_ERR_NOMEM;
+ }
+ return 0;
+}
+
+static void
+destroy_message(struct message *msg)
+{
+ for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) {
+ FREE(msg->compressed_chunks[i]);
+ FREE(msg->uncompressed_chunks[i]);
+ }
+}
+
+static void
+free_messages(struct message *msgs, size_t num_messages)
+{
+ if (msgs) {
+ for (size_t i = 0; i < num_messages; i++)
+ destroy_message(&msgs[i]);
+ FREE(msgs);
+ }
+}
+
+static struct message *
+allocate_messages(size_t num_messages)
+{
+ struct message *msgs;
+
+ msgs = CALLOC(num_messages, sizeof(struct message));
+ if (!msgs)
+ return NULL;
+ for (size_t i = 0; i < num_messages; i++) {
+ if (init_message(&msgs[i])) {
+ free_messages(msgs, num_messages);
+ return NULL;
+ }