+ wimlib_assert(ctx->cur_write_blob_offset ==
+ ctx->cur_write_res_size);
+
+ ret = end_write_resource(ctx, &blob->out_reshdr);
+ if (ret)
+ return ret;
+
+ blob->out_reshdr.flags = reshdr_flags_for_blob(blob);
+ if (ctx->compressor != NULL)
+ blob->out_reshdr.flags |= WIM_RESHDR_FLAG_COMPRESSED;
+
+ ret = maybe_rewrite_blob_uncompressed(ctx, blob);
+ if (ret)
+ return ret;
+
+ wimlib_assert(blob->out_reshdr.uncompressed_size == blob->size);
+
+ ctx->cur_write_blob_offset = 0;
+
+ ret = done_with_blob(blob, ctx);
+ if (ret)
+ return ret;
+ list_del(&blob->write_blobs_list);
+ completed_blob_count++;
+ }
+ }
+
+ return do_write_blobs_progress(&ctx->progress_data, completed_size,
+ completed_blob_count, false);
+
+write_error:
+ ERROR_WITH_ERRNO("Write error");
+ return ret;
+}
+
+static int
+prepare_chunk_buffer(struct write_blobs_ctx *ctx)
+{
+ /* While we are unable to get a new chunk buffer due to too many chunks
+ * already outstanding, retrieve and write the next compressed chunk. */
+ while (!(ctx->cur_chunk_buf =
+ ctx->compressor->get_chunk_buffer(ctx->compressor)))
+ {
+ const void *cchunk;
+ u32 csize;
+ u32 usize;
+ bool bret;
+ int ret;
+
+ bret = ctx->compressor->get_compression_result(ctx->compressor,
+ &cchunk,
+ &csize,
+ &usize);
+ wimlib_assert(bret);
+
+ ret = write_chunk(ctx, cchunk, csize, usize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Process the next chunk of data to be written to a WIM resource. */
+static int
+write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
+{
+ struct write_blobs_ctx *ctx = _ctx;
+ int ret;
+ const u8 *chunkptr, *chunkend;
+
+ wimlib_assert(size != 0);
+
+ if (ctx->compressor == NULL) {
+ /* Write chunk uncompressed. */
+ ret = write_chunk(ctx, chunk, size, size);
+ if (ret)
+ return ret;
+ ctx->cur_read_blob_offset += size;
+ return 0;
+ }
+
+ /* Submit the chunk for compression, but take into account that the
+ * @size the chunk was provided in may not correspond to the
+ * @out_chunk_size being used for compression. */
+ chunkptr = chunk;
+ chunkend = chunkptr + size;
+ do {
+ size_t needed_chunk_size;
+ size_t bytes_consumed;
+
+ if (!ctx->cur_chunk_buf) {
+ ret = prepare_chunk_buffer(ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
+ needed_chunk_size = ctx->out_chunk_size;
+ } else {
+ needed_chunk_size = min(ctx->out_chunk_size,
+ ctx->cur_chunk_buf_filled +
+ (ctx->cur_read_blob_size -
+ ctx->cur_read_blob_offset));
+ }
+
+ bytes_consumed = min(chunkend - chunkptr,
+ needed_chunk_size - ctx->cur_chunk_buf_filled);
+
+ memcpy(&ctx->cur_chunk_buf[ctx->cur_chunk_buf_filled],
+ chunkptr, bytes_consumed);
+
+ chunkptr += bytes_consumed;
+ ctx->cur_read_blob_offset += bytes_consumed;
+ ctx->cur_chunk_buf_filled += bytes_consumed;
+
+ if (ctx->cur_chunk_buf_filled == needed_chunk_size) {
+ ctx->compressor->signal_chunk_filled(ctx->compressor,
+ ctx->cur_chunk_buf_filled);
+ ctx->cur_chunk_buf = NULL;
+ ctx->cur_chunk_buf_filled = 0;
+ }
+ } while (chunkptr != chunkend);
+ return 0;
+}
+
+/* Finish processing a blob for writing. It may not have been completely
+ * written yet, as the chunk_compressor implementation may still have chunks
+ * buffered or being compressed. */
+static int
+write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx)
+{
+ struct write_blobs_ctx *ctx = _ctx;
+
+ wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status);
+
+ if (!blob->will_be_in_output_wim) {
+ /* The blob was a duplicate. Now that its data has finished
+ * being read, it is being discarded in favor of the duplicate
+ * entry. It therefore is no longer needed, and we can fire the
+ * DONE_WITH_FILE callback because the file will not be read
+ * again.
+ *
+ * Note: we can't yet fire DONE_WITH_FILE for non-duplicate
+ * blobs, since it needs to be possible to re-read the file if
+ * it does not compress to less than its original size. */
+ if (!status)
+ status = done_with_blob(blob, ctx);
+ free_blob_descriptor(blob);
+ } else if (!status && blob->unhashed && ctx->blob_table != NULL) {
+ /* The blob was not a duplicate and was previously unhashed.
+ * Since we passed COMPUTE_MISSING_BLOB_HASHES to
+ * read_blob_list(), blob->hash is now computed and valid. So
+ * turn this blob into a "hashed" blob. */
+ list_del(&blob->unhashed_list);
+ blob_table_insert(ctx->blob_table, blob);
+ blob->unhashed = 0;
+ }
+ return status;
+}
+
+/*
+ * Compute statistics about a list of blobs that will be written.
+ *
+ * Assumes the blobs are sorted such that all blobs located in each distinct WIM
+ * (specified by WIMStruct) are together.
+ *
+ * For compactions, also verify that there are no overlapping resources. This
+ * really should be checked earlier, but for now it's easiest to check here.
+ */
+static int
+compute_blob_list_stats(struct list_head *blob_list,
+ struct write_blobs_ctx *ctx)
+{
+ struct blob_descriptor *blob;
+ u64 total_bytes = 0;
+ u64 num_blobs = 0;
+ u64 total_parts = 0;
+ WIMStruct *prev_wim_part = NULL;
+ const struct wim_resource_descriptor *prev_rdesc = NULL;
+
+ list_for_each_entry(blob, blob_list, write_blobs_list) {
+ num_blobs++;
+ total_bytes += blob->size;
+ if (blob->blob_location == BLOB_IN_WIM) {
+ const struct wim_resource_descriptor *rdesc = blob->rdesc;
+ WIMStruct *wim = rdesc->wim;
+
+ if (prev_wim_part != wim) {
+ prev_wim_part = wim;
+ total_parts++;
+ }
+ if (unlikely(wim->being_compacted) && rdesc != prev_rdesc) {
+ if (prev_rdesc != NULL &&
+ rdesc->offset_in_wim <
+ prev_rdesc->offset_in_wim +
+ prev_rdesc->size_in_wim)
+ {
+ WARNING("WIM file contains overlapping "
+ "resources! Compaction is not "
+ "possible.");
+ return WIMLIB_ERR_RESOURCE_ORDER;
+ }
+ prev_rdesc = rdesc;