+ do_write_streams_progress(&ctx->progress_data, lte,
+ completed_size, completed_stream_count,
+ false);
+
+ return 0;
+
+error:
+ ERROR_WITH_ERRNO("Write error");
+ return ret;
+}
+
+static int
+submit_chunk_for_compression(struct write_streams_ctx *ctx,
+ const void *chunk, size_t size)
+{
+ /* While we are unable to submit the chunk for compression (due to too
+ * many chunks already outstanding), retrieve and write the next
+ * compressed chunk. */
+ while (!ctx->compressor->submit_chunk(ctx->compressor, chunk, size)) {
+ const void *cchunk;
+ unsigned csize;
+ unsigned usize;
+ bool bret;
+ int ret;
+
+ bret = ctx->compressor->get_chunk(ctx->compressor,
+ &cchunk, &csize, &usize);
+
+ wimlib_assert(bret);
+
+ ret = write_chunk(ctx, cchunk, csize, usize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Process the next chunk of data to be written to a WIM resource. */
+static int
+write_stream_process_chunk(const void *chunk, size_t size, void *_ctx)
+{
+ struct write_streams_ctx *ctx = _ctx;
+ int ret;
+ const u8 *chunkptr, *chunkend;
+
+ wimlib_assert(size != 0);
+
+ if (ctx->compressor == NULL) {
+ /* Write chunk uncompressed. */
+ ret = write_chunk(ctx, chunk, size, size);
+ if (ret)
+ return ret;
+ ctx->cur_read_stream_offset += size;
+ return 0;
+ }
+
+ /* Submit the chunk for compression, but take into account that the
+ * @size the chunk was provided in may not correspond to the
+ * @out_chunk_size being used for compression. */
+ chunkptr = chunk;
+ chunkend = chunkptr + size;
+ do {
+ const u8 *resized_chunk;
+ size_t needed_chunk_size;
+
+ if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_PACK_STREAMS) {
+ needed_chunk_size = ctx->out_chunk_size;
+ } else {
+ u64 res_bytes_remaining;
+
+ res_bytes_remaining = ctx->cur_read_stream_size -
+ ctx->cur_read_stream_offset;
+ needed_chunk_size = min(ctx->out_chunk_size,
+ ctx->chunk_buf_filled +
+ res_bytes_remaining);
+ }
+
+ if (ctx->chunk_buf_filled == 0 &&
+ chunkend - chunkptr >= needed_chunk_size)
+ {
+ /* No intermediate buffering needed. */
+ resized_chunk = chunkptr;
+ chunkptr += needed_chunk_size;
+ ctx->cur_read_stream_offset += needed_chunk_size;
+ } else {
+ /* Intermediate buffering needed. */
+ size_t bytes_consumed;
+
+ bytes_consumed = min(chunkend - chunkptr,
+ needed_chunk_size - ctx->chunk_buf_filled);
+
+ memcpy(&ctx->chunk_buf[ctx->chunk_buf_filled],
+ chunkptr, bytes_consumed);
+
+ chunkptr += bytes_consumed;
+ ctx->cur_read_stream_offset += bytes_consumed;
+ ctx->chunk_buf_filled += bytes_consumed;
+ if (ctx->chunk_buf_filled == needed_chunk_size) {
+ resized_chunk = ctx->chunk_buf;
+ ctx->chunk_buf_filled = 0;
+ } else {
+ break;
+ }
+
+ }
+
+ ret = submit_chunk_for_compression(ctx, resized_chunk,
+ needed_chunk_size);
+ if (ret)
+ return ret;
+
+ } while (chunkptr != chunkend);
+ return 0;
+}
+
+/* Finish processing a stream for writing. It may not have been completely
+ * written yet, as the chunk_compressor implementation may still have chunks
+ * buffered or being compressed. */
+static int
+write_stream_end_read(struct wim_lookup_table_entry *lte, int status, void *_ctx)
+{
+ struct write_streams_ctx *ctx = _ctx;
+ if (status == 0)
+ wimlib_assert(ctx->cur_read_stream_offset == ctx->cur_read_stream_size);
+ if (ctx->stream_was_duplicate) {
+ free_lookup_table_entry(lte);
+ } else if (lte->unhashed && ctx->lookup_table != NULL) {
+ list_del(<e->unhashed_list);
+ lookup_table_insert(ctx->lookup_table, lte);
+ lte->unhashed = 0;
+ }
+ return status;
+}
+
+/* Compute statistics about a list of streams that will be written.
+ *
+ * Assumes the streams are sorted such that all streams located in each distinct
+ * WIM (specified by WIMStruct) are together. */
+static void
+compute_stream_list_stats(struct list_head *stream_list,
+ struct write_streams_ctx *ctx)
+{
+ struct wim_lookup_table_entry *lte;
+ u64 total_bytes = 0;
+ u64 num_streams = 0;
+ u64 total_parts = 0;
+ WIMStruct *prev_wim_part = NULL;
+
+ list_for_each_entry(lte, stream_list, write_streams_list) {
+ num_streams++;
+ total_bytes += lte->size;
+ if (lte->resource_location == RESOURCE_IN_WIM) {
+ if (prev_wim_part != lte->rspec->wim) {
+ prev_wim_part = lte->rspec->wim;
+ total_parts++;
+ }
+ }
+ }
+ ctx->progress_data.progress.write_streams.total_bytes = total_bytes;
+ ctx->progress_data.progress.write_streams.total_streams = num_streams;
+ ctx->progress_data.progress.write_streams.completed_bytes = 0;
+ ctx->progress_data.progress.write_streams.completed_streams = 0;
+ ctx->progress_data.progress.write_streams.compression_type = ctx->out_ctype;
+ ctx->progress_data.progress.write_streams.total_parts = total_parts;
+ ctx->progress_data.progress.write_streams.completed_parts = 0;
+ ctx->progress_data.next_progress = 0;
+ ctx->progress_data.prev_wim_part = NULL;
+}
+
+/* Find streams in @stream_list that can be copied to the output WIM in raw form
+ * rather than compressed. Delete these streams from @stream_list, and move one
+ * per resource to @raw_copy_resources. Return the total uncompressed size of
+ * the streams that need to be compressed. */
+static u64
+find_raw_copy_resources(struct list_head *stream_list,
+ int write_resource_flags,
+ int out_ctype,
+ u32 out_chunk_size,
+ struct list_head *raw_copy_resources)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+ u64 num_bytes_to_compress = 0;
+
+ INIT_LIST_HEAD(raw_copy_resources);
+
+ /* Initialize temporary raw_copy_ok flag. */
+ list_for_each_entry(lte, stream_list, write_streams_list)
+ if (lte->resource_location == RESOURCE_IN_WIM)
+ lte->rspec->raw_copy_ok = 0;
+
+ list_for_each_entry_safe(lte, tmp, stream_list, write_streams_list) {
+ if (lte->resource_location == RESOURCE_IN_WIM &&
+ lte->rspec->raw_copy_ok)
+ {
+ list_del(<e->write_streams_list);
+ } else if (can_raw_copy(lte, write_resource_flags,
+ out_ctype, out_chunk_size))
+ {
+ lte->rspec->raw_copy_ok = 1;
+ list_move_tail(<e->write_streams_list,
+ raw_copy_resources);
+ } else {
+ num_bytes_to_compress += lte->size;
+ }
+ }
+
+ return num_bytes_to_compress;
+}
+
+/* Copy a raw compressed resource located in another WIM file to the WIM file
+ * being written. */
+static int
+write_raw_copy_resource(struct wim_resource_spec *in_rspec,
+ struct filedes *out_fd)
+{
+ u64 cur_read_offset;
+ u64 end_read_offset;
+ u8 buf[BUFFER_SIZE];
+ size_t bytes_to_read;
+ int ret;
+ struct filedes *in_fd;
+ struct wim_lookup_table_entry *lte;
+ u64 out_offset_in_wim;
+
+ DEBUG("Copying raw compressed data (size_in_wim=%"PRIu64", "
+ "uncompressed_size=%"PRIu64")",
+ in_rspec->size_in_wim, in_rspec->uncompressed_size);
+
+ /* Copy the raw data. */
+ cur_read_offset = in_rspec->offset_in_wim;
+ end_read_offset = cur_read_offset + in_rspec->size_in_wim;
+
+ out_offset_in_wim = out_fd->offset;
+
+ if (in_rspec->is_pipable) {
+ if (cur_read_offset < sizeof(struct pwm_stream_hdr))
+ return WIMLIB_ERR_INVALID_PIPABLE_WIM;
+ cur_read_offset -= sizeof(struct pwm_stream_hdr);
+ out_offset_in_wim += sizeof(struct pwm_stream_hdr);
+ }
+ in_fd = &in_rspec->wim->in_fd;
+ wimlib_assert(cur_read_offset != end_read_offset);
+ do {
+
+ bytes_to_read = min(sizeof(buf), end_read_offset - cur_read_offset);
+
+ ret = full_pread(in_fd, buf, bytes_to_read, cur_read_offset);
+ if (ret)
+ return ret;
+
+ ret = full_write(out_fd, buf, bytes_to_read);
+ if (ret)
+ return ret;
+
+ cur_read_offset += bytes_to_read;
+
+ } while (cur_read_offset != end_read_offset);
+
+ list_for_each_entry(lte, &in_rspec->stream_list, rspec_node) {
+ if (lte->will_be_in_output_wim) {
+ stream_set_out_reshdr_for_reuse(lte);
+ if (in_rspec->flags & WIM_RESHDR_FLAG_PACKED_STREAMS)
+ lte->out_res_offset_in_wim = out_offset_in_wim;
+ else
+ lte->out_reshdr.offset_in_wim = out_offset_in_wim;
+
+ }
+ }
+ return 0;
+}
+
+/* Copy a list of raw compressed resources located other WIM file(s) to the WIM
+ * file being written. */
+static int
+write_raw_copy_resources(struct list_head *raw_copy_resources,
+ struct filedes *out_fd,
+ struct write_streams_progress_data *progress_data)
+{
+ struct wim_lookup_table_entry *lte;
+ int ret;
+
+ list_for_each_entry(lte, raw_copy_resources, write_streams_list) {
+ ret = write_raw_copy_resource(lte->rspec, out_fd);
+ if (ret)
+ return ret;
+ do_write_streams_progress(progress_data, lte, lte->size,
+ 1, false);
+ }
+ return 0;
+}
+
+/* Wait for and write all chunks pending in the compressor. */
+static int
+finish_remaining_chunks(struct write_streams_ctx *ctx)
+{
+ const void *cdata;
+ unsigned csize;
+ unsigned usize;
+ int ret;
+
+ if (ctx->compressor == NULL)
+ return 0;
+
+ if (ctx->chunk_buf_filled != 0) {
+ ret = submit_chunk_for_compression(ctx, ctx->chunk_buf,
+ ctx->chunk_buf_filled);
+ if (ret)
+ return ret;
+ }
+
+ while (ctx->compressor->get_chunk(ctx->compressor, &cdata, &csize, &usize)) {
+ ret = write_chunk(ctx, cdata, csize, usize);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void
+remove_zero_length_streams(struct list_head *stream_list)
+{
+ struct wim_lookup_table_entry *lte, *tmp;
+
+ list_for_each_entry_safe(lte, tmp, stream_list, write_streams_list) {
+ wimlib_assert(lte->will_be_in_output_wim);
+ if (lte->size == 0) {
+ list_del(<e->write_streams_list);
+ lte->out_reshdr.offset_in_wim = 0;
+ lte->out_reshdr.size_in_wim = 0;
+ lte->out_reshdr.uncompressed_size = 0;
+ lte->out_reshdr.flags = filter_resource_flags(lte->flags);
+ }
+ }