]> wimlib.net Git - wimlib/blobdiff - src/write.c
Fix slow progress updating for wimsplit
[wimlib] / src / write.c
index f5a9935dff47ea146780c6444812aa80eda541c8..bca5e862a2f1e2549bb471c9a66855b4714f0216 100644 (file)
@@ -301,7 +301,8 @@ struct write_blobs_progress_data {
 
 static int
 do_write_blobs_progress(struct write_blobs_progress_data *progress_data,
-                       u64 complete_size, u32 complete_count, bool discarded)
+                       u64 complete_size, u64 complete_compressed_size,
+                       u32 complete_count, bool discarded)
 {
        union wimlib_progress_info *progress = &progress_data->progress;
        int ret;
@@ -316,6 +317,8 @@ do_write_blobs_progress(struct write_blobs_progress_data *progress_data,
                }
        } else {
                progress->write_streams.completed_bytes += complete_size;
+               progress->write_streams.completed_compressed_bytes +=
+                       complete_compressed_size;
                progress->write_streams.completed_streams += complete_count;
        }
 
@@ -377,12 +380,6 @@ struct write_blobs_ctx {
         * @blobs_being_compressed only when writing a solid resource.  */
        struct list_head blobs_in_solid_resource;
 
-       /* Current uncompressed offset in the blob being read.  */
-       u64 cur_read_blob_offset;
-
-       /* Uncompressed size of the blob currently being read.  */
-       u64 cur_read_blob_size;
-
        /* Current uncompressed offset in the blob being written.  */
        u64 cur_write_blob_offset;
 
@@ -689,9 +686,6 @@ write_blob_begin_read(struct blob_descriptor *blob, void *_ctx)
 
        wimlib_assert(blob->size > 0);
 
-       ctx->cur_read_blob_offset = 0;
-       ctx->cur_read_blob_size = blob->size;
-
        /* As an optimization, we allow some blobs to be "unhashed", meaning
         * their SHA-1 message digests are unknown.  This is the case with blobs
         * that are added by scanning a directory tree with wimlib_add_image(),
@@ -722,7 +716,9 @@ write_blob_begin_read(struct blob_descriptor *blob, void *_ctx)
                                 * output reference count to the duplicate blob
                                 * in the former case.  */
                                ret = do_write_blobs_progress(&ctx->progress_data,
-                                                             blob->size, 1, true);
+                                                             blob->size,
+                                                             blob->size,
+                                                             1, true);
                                list_del(&blob->write_blobs_list);
                                list_del(&blob->blob_table_list);
                                if (new_blob->will_be_in_output_wim)
@@ -876,8 +872,7 @@ write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
 {
        int ret;
        struct blob_descriptor *blob;
-       u32 completed_blob_count;
-       u32 completed_size;
+       u32 completed_blob_count = 0;
 
        blob = list_entry(ctx->blobs_being_compressed.next,
                          struct blob_descriptor, write_blobs_list);
@@ -924,8 +919,6 @@ write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
 
        ctx->cur_write_blob_offset += usize;
 
-       completed_size = usize;
-       completed_blob_count = 0;
        if (ctx->write_resource_flags & WRITE_RESOURCE_FLAG_SOLID) {
                /* Wrote chunk in solid mode.  It may have finished multiple
                 * blobs.  */
@@ -982,7 +975,7 @@ write_chunk(struct write_blobs_ctx *ctx, const void *cchunk,
                }
        }
 
-       return do_write_blobs_progress(&ctx->progress_data, completed_size,
+       return do_write_blobs_progress(&ctx->progress_data, usize, csize,
                                       completed_blob_count, false);
 
 write_error:
@@ -1019,7 +1012,8 @@ prepare_chunk_buffer(struct write_blobs_ctx *ctx)
 
 /* Process the next chunk of data to be written to a WIM resource.  */
 static int
-write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
+write_blob_process_chunk(const struct blob_descriptor *blob, u64 offset,
+                        const void *chunk, size_t size, void *_ctx)
 {
        struct write_blobs_ctx *ctx = _ctx;
        int ret;
@@ -1032,7 +1026,6 @@ write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
                 ret = write_chunk(ctx, chunk, size, size);
                 if (ret)
                         return ret;
-                ctx->cur_read_blob_offset += size;
                 return 0;
        }
 
@@ -1056,8 +1049,7 @@ write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
                } else {
                        needed_chunk_size = min(ctx->out_chunk_size,
                                                ctx->cur_chunk_buf_filled +
-                                                       (ctx->cur_read_blob_size -
-                                                        ctx->cur_read_blob_offset));
+                                                       (blob->size - offset));
                }
 
                bytes_consumed = min(chunkend - chunkptr,
@@ -1067,7 +1059,7 @@ write_blob_process_chunk(const void *chunk, size_t size, void *_ctx)
                       chunkptr, bytes_consumed);
 
                chunkptr += bytes_consumed;
-               ctx->cur_read_blob_offset += bytes_consumed;
+               offset += bytes_consumed;
                ctx->cur_chunk_buf_filled += bytes_consumed;
 
                if (ctx->cur_chunk_buf_filled == needed_chunk_size) {
@@ -1088,8 +1080,6 @@ write_blob_end_read(struct blob_descriptor *blob, int status, void *_ctx)
 {
        struct write_blobs_ctx *ctx = _ctx;
 
-       wimlib_assert(ctx->cur_read_blob_offset == ctx->cur_read_blob_size || status);
-
        if (!blob->will_be_in_output_wim) {
                /* The blob was a duplicate.  Now that its data has finished
                 * being read, it is being discarded in favor of the duplicate
@@ -1299,15 +1289,18 @@ write_raw_copy_resources(struct list_head *raw_copy_blobs,
                blob->rdesc->raw_copy_ok = 1;
 
        list_for_each_entry(blob, raw_copy_blobs, write_blobs_list) {
+               u64 compressed_size = 0;
+
                if (blob->rdesc->raw_copy_ok) {
                        /* Write each solid resource only one time.  */
                        ret = write_raw_copy_resource(blob->rdesc, out_fd);
                        if (ret)
                                return ret;
                        blob->rdesc->raw_copy_ok = 0;
+                       compressed_size = blob->rdesc->size_in_wim;
                }
                ret = do_write_blobs_progress(progress_data, blob->size,
-                                             1, false);
+                                             compressed_size, 1, false);
                if (ret)
                        return ret;
        }
@@ -1616,7 +1609,7 @@ write_blob_list(struct list_head *blob_list,
 
        struct read_blob_callbacks cbs = {
                .begin_blob     = write_blob_begin_read,
-               .consume_chunk  = write_blob_process_chunk,
+               .continue_blob  = write_blob_process_chunk,
                .end_blob       = write_blob_end_read,
                .ctx            = &ctx,
        };
@@ -2602,14 +2595,15 @@ write_pipable_wim(WIMStruct *wim, int image, int write_flags,
        /* At this point, the header at the beginning of the file has already
         * been written.  */
 
-       /* For efficiency, when wimlib adds an image to the WIM with
-        * wimlib_add_image(), the SHA-1 message digests of files are not
-        * calculated; instead, they are calculated while the files are being
-        * written.  However, this does not work when writing a pipable WIM,
-        * since when writing a blob to a pipable WIM, its SHA-1 message digest
-        * needs to be known before the blob data is written.  Therefore, before
-        * getting much farther, we need to pre-calculate the SHA-1 message
-        * digests of all blobs that will be written.  */
+       /*
+        * For efficiency, wimlib normally delays calculating each newly added
+        * stream's hash until while that stream being written, or just before
+        * it is written.  However, when writing a pipable WIM (potentially to a
+        * pipe), we first have to write the metadata resources, which contain
+        * all the hashes.  Moreover each blob is prefixed with its hash (struct
+        * pwm_blob_hdr).  Thus, we have to calculate all the hashes before
+        * writing anything.
+        */
        ret = wim_checksum_unhashed_blobs(wim);
        if (ret)
                return ret;
@@ -3257,7 +3251,7 @@ overwrite_wim_via_tmpfile(WIMStruct *wim, int write_flags, unsigned num_threads)
        wim_name_len = tstrlen(wim->filename);
        tchar tmpfile[wim_name_len + 10];
        tmemcpy(tmpfile, wim->filename, wim_name_len);
-       randomize_char_array_with_alnum(tmpfile + wim_name_len, 9);
+       get_random_alnum_chars(tmpfile + wim_name_len, 9);
        tmpfile[wim_name_len + 9] = T('\0');
 
        ret = wimlib_write(wim, tmpfile, WIMLIB_ALL_IMAGES,