X-Git-Url: https://wimlib.net/git/?p=wimlib;a=blobdiff_plain;f=src%2Fwrite.c;h=1dd35e52c047f14316ac223ee529ce39ba3def3c;hp=159fd57da9f731195f4aa5ae4c985195d7df36ff;hb=161e7cdd2c0d3b3c1025da452a3192d381297465;hpb=f3e97b29c4a8c564d54b0fd11cd43a9b4cd6a8ad diff --git a/src/write.c b/src/write.c index 159fd57d..1dd35e52 100644 --- a/src/write.c +++ b/src/write.c @@ -48,6 +48,7 @@ #endif #include +#include #include #ifdef WITH_NTFS_3G @@ -65,8 +66,8 @@ #include -#if defined(__WIN32__) && !defined(INVALID_HANDLE_VALUE) -# define INVALID_HANDLE_VALUE ((HANDLE)(-1)) +#ifndef __WIN32__ +# include /* for `struct iovec' */ #endif /* Chunk table that's located at the beginning of each compressed resource in @@ -89,7 +90,7 @@ struct chunk_table { */ static int begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte, - FILE *out_fp, + int out_fd, off_t file_offset, struct chunk_table **chunk_tab_ret) { @@ -97,13 +98,13 @@ begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte, u64 num_chunks = (size + WIM_CHUNK_SIZE - 1) / WIM_CHUNK_SIZE; size_t alloc_size = sizeof(struct chunk_table) + num_chunks * sizeof(u64); struct chunk_table *chunk_tab = CALLOC(1, alloc_size); - int ret; + + DEBUG("Begin chunk table for stream with size %"PRIu64, size); if (!chunk_tab) { ERROR("Failed to allocate chunk table for %"PRIu64" byte " "resource", size); - ret = WIMLIB_ERR_NOMEM; - goto out; + return WIMLIB_ERR_NOMEM; } chunk_tab->file_offset = file_offset; chunk_tab->num_chunks = num_chunks; @@ -114,19 +115,16 @@ begin_wim_resource_chunk_tab(const struct wim_lookup_table_entry *lte, chunk_tab->cur_offset = 0; chunk_tab->cur_offset_p = chunk_tab->offsets; - if (fwrite(chunk_tab, 1, chunk_tab->table_disk_size, out_fp) != - chunk_tab->table_disk_size) { + if (full_write(out_fd, chunk_tab, + chunk_tab->table_disk_size) != chunk_tab->table_disk_size) + { ERROR_WITH_ERRNO("Failed to write chunk table in compressed " "file resource"); FREE(chunk_tab); - ret = WIMLIB_ERR_WRITE; - goto out; + return WIMLIB_ERR_WRITE; } - - ret = 0; *chunk_tab_ret = chunk_tab; -out: - return ret; + return 0; } /* @@ -155,7 +153,7 @@ out: typedef unsigned (*compress_func_t)(const void *chunk, unsigned chunk_size, void *out); -compress_func_t +static compress_func_t get_compress_func(int out_ctype) { if (out_ctype == WIMLIB_COMPRESSION_TYPE_LZX) @@ -169,7 +167,7 @@ get_compress_func(int out_ctype) * * @chunk: Uncompressed data of the chunk. * @chunk_size: Size of the chunk (<= WIM_CHUNK_SIZE) - * @out_fp: FILE * to write the chunk to. + * @out_fd: File descriptor to write the chunk to. * @compress: Compression function to use (NULL if writing uncompressed * data). * @chunk_tab: Pointer to chunk table being created. It is updated with the @@ -178,16 +176,18 @@ get_compress_func(int out_ctype) * Returns 0 on success; nonzero on failure. */ static int -write_wim_resource_chunk(const void *chunk, unsigned chunk_size, - FILE *out_fp, compress_func_t compress, - struct chunk_table *chunk_tab) +write_wim_resource_chunk(const void * restrict chunk, + unsigned chunk_size, + filedes_t out_fd, + compress_func_t compress, + struct chunk_table * restrict chunk_tab) { - const u8 *out_chunk; + const void *out_chunk; unsigned out_chunk_size; if (compress) { - u8 *compressed_chunk = alloca(chunk_size); + void *compressed_chunk = alloca(chunk_size); - out_chunk_size = compress(chunk, chunk_size, compressed_chunk); + out_chunk_size = (*compress)(chunk, chunk_size, compressed_chunk); if (out_chunk_size) { /* Write compressed */ out_chunk = compressed_chunk; @@ -203,7 +203,7 @@ write_wim_resource_chunk(const void *chunk, unsigned chunk_size, out_chunk = chunk; out_chunk_size = chunk_size; } - if (fwrite(out_chunk, 1, out_chunk_size, out_fp) != out_chunk_size) { + if (full_write(out_fd, out_chunk, out_chunk_size) != out_chunk_size) { ERROR_WITH_ERRNO("Failed to write WIM resource chunk"); return WIMLIB_ERR_WRITE; } @@ -219,14 +219,9 @@ write_wim_resource_chunk(const void *chunk, unsigned chunk_size, */ static int finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab, - FILE *out_fp, u64 *compressed_size_p) + filedes_t out_fd, u64 *compressed_size_p) { size_t bytes_written; - if (fseeko(out_fp, chunk_tab->file_offset, SEEK_SET) != 0) { - ERROR_WITH_ERRNO("Failed to seek to byte %"PRIu64" of output " - "WIM file", chunk_tab->file_offset); - return WIMLIB_ERR_WRITE; - } if (chunk_tab->bytes_per_chunk_entry == 8) { array_cpu_to_le64(chunk_tab->offsets, chunk_tab->num_chunks); @@ -235,50 +230,72 @@ finish_wim_resource_chunk_tab(struct chunk_table *chunk_tab, ((u32*)chunk_tab->offsets)[i] = cpu_to_le32(chunk_tab->offsets[i]); } - bytes_written = fwrite((u8*)chunk_tab->offsets + - chunk_tab->bytes_per_chunk_entry, - 1, chunk_tab->table_disk_size, out_fp); + bytes_written = full_pwrite(out_fd, + (u8*)chunk_tab->offsets + chunk_tab->bytes_per_chunk_entry, + chunk_tab->table_disk_size, + chunk_tab->file_offset); if (bytes_written != chunk_tab->table_disk_size) { ERROR_WITH_ERRNO("Failed to write chunk table in compressed " "file resource"); return WIMLIB_ERR_WRITE; } - if (fseeko(out_fp, 0, SEEK_END) != 0) { - ERROR_WITH_ERRNO("Failed to seek to end of output WIM file"); + *compressed_size_p = chunk_tab->cur_offset + chunk_tab->table_disk_size; + return 0; +} + +static int +seek_and_truncate(filedes_t out_fd, off_t offset) +{ + if (lseek(out_fd, offset, SEEK_SET) == -1 || + ftruncate(out_fd, offset)) + { + ERROR_WITH_ERRNO("Failed to truncate output WIM file"); return WIMLIB_ERR_WRITE; + } else { + return 0; + } +} + +static int +finalize_and_check_sha1(SHA_CTX * restrict sha_ctx, + struct wim_lookup_table_entry * restrict lte) +{ + u8 md[SHA1_HASH_SIZE]; + sha1_final(md, sha_ctx); + if (lte->unhashed) { + copy_hash(lte->hash, md); + } else if (!hashes_equal(md, lte->hash)) { + ERROR("WIM resource has incorrect hash!"); + if (lte_filename_valid(lte)) { + ERROR("We were reading it from \"%"TS"\"; maybe " + "it changed while we were reading it.", + lte->file_on_disk); + } + return WIMLIB_ERR_INVALID_RESOURCE_HASH; } - *compressed_size_p = chunk_tab->cur_offset + chunk_tab->table_disk_size; return 0; } + struct write_resource_ctx { compress_func_t compress; struct chunk_table *chunk_tab; - FILE *out_fp; + filedes_t out_fd; SHA_CTX sha_ctx; bool doing_sha; }; static int -write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx) +write_resource_cb(const void *restrict chunk, size_t chunk_size, + void *restrict _ctx) { struct write_resource_ctx *ctx = _ctx; if (ctx->doing_sha) sha1_update(&ctx->sha_ctx, chunk, chunk_size); - - if (ctx->compress) { - return write_wim_resource_chunk(chunk, chunk_size, - ctx->out_fp, ctx->compress, - ctx->chunk_tab); - } else { - if (fwrite(chunk, 1, chunk_size, ctx->out_fp) != chunk_size) { - ERROR_WITH_ERRNO("Error writing to output WIM"); - return WIMLIB_ERR_WRITE; - } else { - return 0; - } - } + return write_wim_resource_chunk(chunk, chunk_size, + ctx->out_fd, ctx->compress, + ctx->chunk_tab); } /* @@ -287,7 +304,7 @@ write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx) * @lte: Lookup table entry for the resource, which could be in another WIM, * in an external file, or in another location. * - * @out_fp: FILE * opened to the output WIM. + * @out_fd: File descriptor opened to the output WIM. * * @out_ctype: One of the WIMLIB_COMPRESSION_TYPE_* constants to indicate * which compression algorithm to use. @@ -307,25 +324,20 @@ write_resource_cb(const void *chunk, size_t chunk_size, void *_ctx) */ int write_wim_resource(struct wim_lookup_table_entry *lte, - FILE *out_fp, int out_ctype, + filedes_t out_fd, int out_ctype, struct resource_entry *out_res_entry, int flags) { struct write_resource_ctx write_ctx; + u64 read_size; u64 new_size; off_t offset; int ret; flags &= ~WIMLIB_RESOURCE_FLAG_RECOMPRESS; - if (wim_resource_size(lte) == 0) { - /* Empty resource; nothing needs to be done, so just return - * success. */ - return 0; - } - /* Get current position in output WIM */ - offset = ftello(out_fp); + offset = filedes_offset(out_fd); if (offset == -1) { ERROR_WITH_ERRNO("Can't get position in output WIM"); return WIMLIB_ERR_WRITE; @@ -343,9 +355,11 @@ write_wim_resource(struct wim_lookup_table_entry *lte, { flags |= WIMLIB_RESOURCE_FLAG_RAW; write_ctx.doing_sha = false; + read_size = lte->resource_entry.size; } else { write_ctx.doing_sha = true; sha1_init(&write_ctx.sha_ctx); + read_size = lte->resource_entry.original_size; } /* Initialize the chunk table and set the compression function if @@ -356,7 +370,7 @@ write_wim_resource(struct wim_lookup_table_entry *lte, write_ctx.chunk_tab = NULL; } else { write_ctx.compress = get_compress_func(out_ctype); - ret = begin_wim_resource_chunk_tab(lte, out_fp, + ret = begin_wim_resource_chunk_tab(lte, out_fd, offset, &write_ctx.chunk_tab); if (ret) @@ -365,28 +379,19 @@ write_wim_resource(struct wim_lookup_table_entry *lte, /* Write the entire resource by reading the entire resource and feeding * the data through the write_resource_cb function. */ - write_ctx.out_fp = out_fp; + write_ctx.out_fd = out_fd; try_write_again: - ret = read_resource_prefix(lte, wim_resource_size(lte), + ret = read_resource_prefix(lte, read_size, write_resource_cb, &write_ctx, flags); + if (ret) + goto out_free_chunk_tab; /* Verify SHA1 message digest of the resource, or set the hash for the * first time. */ if (write_ctx.doing_sha) { - u8 md[SHA1_HASH_SIZE]; - sha1_final(md, &write_ctx.sha_ctx); - if (lte->unhashed) { - copy_hash(lte->hash, md); - } else if (!hashes_equal(md, lte->hash)) { - ERROR("WIM resource has incorrect hash!"); - if (lte_filename_valid(lte)) { - ERROR("We were reading it from \"%"TS"\"; maybe " - "it changed while we were reading it.", - lte->file_on_disk); - } - ret = WIMLIB_ERR_INVALID_RESOURCE_HASH; + ret = finalize_and_check_sha1(&write_ctx.sha_ctx, lte); + if (ret) goto out_free_chunk_tab; - } } out_res_entry->flags = lte->resource_entry.flags; @@ -405,26 +410,19 @@ try_write_again: /* Using a different compression type: Call * finish_wim_resource_chunk_tab() and it will provide the new * compressed size. */ - ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, out_fp, + ret = finish_wim_resource_chunk_tab(write_ctx.chunk_tab, out_fd, &new_size); if (ret) goto out_free_chunk_tab; if (new_size >= wim_resource_size(lte)) { /* Oops! We compressed the resource to larger than the original * size. Write the resource uncompressed instead. */ - if (fseeko(out_fp, offset, SEEK_SET) || - fflush(out_fp) || - ftruncate(fileno(out_fp), - offset + wim_resource_size(lte))) - { - ERROR_WITH_ERRNO("Failed to flush and/or truncate " - "output WIM file"); - ret = WIMLIB_ERR_WRITE; - goto out_free_chunk_tab; - } DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; " "writing uncompressed instead", wim_resource_size(lte), new_size); + ret = seek_and_truncate(out_fd, offset); + if (ret) + goto out_free_chunk_tab; write_ctx.compress = NULL; write_ctx.doing_sha = false; out_ctype = WIMLIB_COMPRESSION_TYPE_NONE; @@ -459,15 +457,30 @@ shared_queue_init(struct shared_queue *q, unsigned size) wimlib_assert(size != 0); q->array = CALLOC(sizeof(q->array[0]), size); if (!q->array) - return WIMLIB_ERR_NOMEM; + goto err; q->filled_slots = 0; q->front = 0; q->back = size - 1; q->size = size; - pthread_mutex_init(&q->lock, NULL); - pthread_cond_init(&q->msg_avail_cond, NULL); - pthread_cond_init(&q->space_avail_cond, NULL); + if (pthread_mutex_init(&q->lock, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize mutex"); + goto err; + } + if (pthread_cond_init(&q->msg_avail_cond, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize condition variable"); + goto err_destroy_lock; + } + if (pthread_cond_init(&q->space_avail_cond, NULL)) { + ERROR_WITH_ERRNO("Failed to initialize condition variable"); + goto err_destroy_msg_avail_cond; + } return 0; +err_destroy_msg_avail_cond: + pthread_cond_destroy(&q->msg_avail_cond); +err_destroy_lock: + pthread_mutex_destroy(&q->lock); +err: + return WIMLIB_ERR_NOMEM; } static void @@ -524,10 +537,10 @@ struct compressor_thread_params { struct message { struct wim_lookup_table_entry *lte; u8 *uncompressed_chunks[MAX_CHUNKS_PER_MSG]; - u8 *out_compressed_chunks[MAX_CHUNKS_PER_MSG]; u8 *compressed_chunks[MAX_CHUNKS_PER_MSG]; unsigned uncompressed_chunk_sizes[MAX_CHUNKS_PER_MSG]; - unsigned compressed_chunk_sizes[MAX_CHUNKS_PER_MSG]; + struct iovec out_chunks[MAX_CHUNKS_PER_MSG]; + size_t total_out_bytes; unsigned num_chunks; struct list_head list; bool complete; @@ -537,21 +550,25 @@ struct message { static void compress_chunks(struct message *msg, compress_func_t compress) { + msg->total_out_bytes = 0; for (unsigned i = 0; i < msg->num_chunks; i++) { - DEBUG2("compress chunk %u of %u", i, msg->num_chunks); unsigned len = compress(msg->uncompressed_chunks[i], msg->uncompressed_chunk_sizes[i], msg->compressed_chunks[i]); + void *out_chunk; + unsigned out_len; if (len) { /* To be written compressed */ - msg->out_compressed_chunks[i] = msg->compressed_chunks[i]; - msg->compressed_chunk_sizes[i] = len; + out_chunk = msg->compressed_chunks[i]; + out_len = len; } else { /* To be written uncompressed */ - msg->out_compressed_chunks[i] = msg->uncompressed_chunks[i]; - msg->compressed_chunk_sizes[i] = msg->uncompressed_chunk_sizes[i]; - + out_chunk = msg->uncompressed_chunks[i]; + out_len = msg->uncompressed_chunk_sizes[i]; } + msg->out_chunks[i].iov_base = out_chunk; + msg->out_chunks[i].iov_len = out_len; + msg->total_out_bytes += out_len; } } @@ -602,42 +619,34 @@ do_write_streams_progress(union wimlib_progress_info *progress, } } -static int -sha1_chunk(const void *buf, size_t len, void *ctx) -{ - sha1_update(ctx, buf, len); - return 0; -} +struct serial_write_stream_ctx { + filedes_t out_fd; + int out_ctype; + int write_resource_flags; +}; static int -sha1_resource(struct wim_lookup_table_entry *lte) +serial_write_stream(struct wim_lookup_table_entry *lte, void *_ctx) { - int ret; - SHA_CTX sha_ctx; - - sha1_init(&sha_ctx); - ret = read_resource_prefix(lte, wim_resource_size(lte), - sha1_chunk, &sha_ctx, 0); - if (ret == 0) - sha1_final(lte->hash, &sha_ctx); - return ret; + struct serial_write_stream_ctx *ctx = _ctx; + return write_wim_resource(lte, ctx->out_fd, + ctx->out_ctype, <e->output_resource_entry, + ctx->write_resource_flags); } -enum { - STREAMS_MERGED = 0, - STREAMS_NOT_MERGED = 1, -}; - +/* Write a list of streams, taking into account that some streams may be + * duplicates that are checksummed and discarded on the fly, and also delegating + * the actual writing of a stream to a function @write_stream_cb, which is + * passed the context @write_stream_ctx. */ static int do_write_stream_list(struct list_head *stream_list, struct wim_lookup_table *lookup_table, - FILE *out_fp, - int out_ctype, + int (*write_stream_cb)(struct wim_lookup_table_entry *, void *), + void *write_stream_ctx, wimlib_progress_func_t progress_func, - union wimlib_progress_info *progress, - int write_resource_flags) + union wimlib_progress_info *progress) { - int ret; + int ret = 0; struct wim_lookup_table_entry *lte; /* For each stream in @stream_list ... */ @@ -647,572 +656,531 @@ do_write_stream_list(struct list_head *stream_list, write_streams_list); list_del(<e->write_streams_list); if (lte->unhashed && !lte->unique_size) { - /* Unhashed stream that shares a size with some other * stream in the WIM we are writing. The stream must be * checksummed to know if we need to write it or not. */ - struct wim_lookup_table_entry *duplicate_lte; - struct wim_lookup_table_entry **back_ptr; - - /* back_ptr must be saved because it's in union with the - * SHA1 message digest and will no longer be valid once - * the SHA1 has been calculated. */ - back_ptr = lte->back_ptr; + struct wim_lookup_table_entry *tmp; + u32 orig_refcnt = lte->out_refcnt; - /* Checksum the stream */ - ret = sha1_resource(lte); + ret = hash_unhashed_stream(lte, lookup_table, &tmp); if (ret) - return ret; - - /* Look for a duplicate stream */ - duplicate_lte = __lookup_resource(lookup_table, lte->hash); - if (duplicate_lte) { - /* We have a duplicate stream. Transfer the - * reference counts from this stream to the - * duplicate, update the reference to this - * stream (in an inode or ads_entry) to point to - * the duplicate, then free this stream. */ - wimlib_assert(!(duplicate_lte->unhashed)); - bool is_new_stream = (duplicate_lte->out_refcnt == 0); - duplicate_lte->refcnt += lte->refcnt; - duplicate_lte->out_refcnt += lte->refcnt; - *back_ptr = duplicate_lte; - list_del(<e->unhashed_list); - free_lookup_table_entry(lte); - lte = duplicate_lte; - - if (is_new_stream) { - /* The duplicate stream is one we - * weren't already planning to write. - * But, now we must write it. - * - * XXX: Currently, the copy of the - * stream in the WIM is always chosen - * for writing, rather than the extra - * copy we just read (which may be in an - * external file). This may not always - * be fastest. */ - } else { + break; + if (tmp != lte) { + lte = tmp; + /* We found a duplicate stream. */ + if (orig_refcnt != tmp->out_refcnt) { /* We have already written, or are going * to write, the duplicate stream. So * just skip to the next stream. */ DEBUG("Discarding duplicate stream of length %"PRIu64, wim_resource_size(lte)); + lte->no_progress = 0; goto skip_to_progress; } - - } else { - /* No duplicate stream, so we need to insert - * this stream into the lookup table and treat - * it as a hashed stream. */ - list_del(<e->unhashed_list); - lookup_table_insert(lookup_table, lte); - lte->out_refcnt = lte->refcnt; - lte->unhashed = 0; } } - /* Here, @lte either a hashed stream or an unhashed stream with - * a unique size. In either case we know that the stream has to - * be written. In either case the SHA1 message digest will be - * calculated over the stream while writing it; however, in the - * former case this is done merely to check the data, while in - * the latter case this is done because we do not have the SHA1 - * message digest yet. */ - + /* Here, @lte is either a hashed stream or an unhashed stream + * with a unique size. In either case we know that the stream + * has to be written. In either case the SHA1 message digest + * will be calculated over the stream while writing it; however, + * in the former case this is done merely to check the data, + * while in the latter case this is done because we do not have + * the SHA1 message digest yet. */ wimlib_assert(lte->out_refcnt != 0); - - ret = write_wim_resource(lte, - out_fp, - out_ctype, - <e->output_resource_entry, - write_resource_flags); + lte->deferred = 0; + lte->no_progress = 0; + ret = (*write_stream_cb)(lte, write_stream_ctx); if (ret) - return ret; + break; + /* In parallel mode, some streams are deferred for later, + * serialized processing; ignore them here. */ + if (lte->deferred) + continue; if (lte->unhashed) { list_del(<e->unhashed_list); lookup_table_insert(lookup_table, lte); lte->unhashed = 0; } skip_to_progress: - do_write_streams_progress(progress, - progress_func, - wim_resource_size(lte)); + if (!lte->no_progress) { + do_write_streams_progress(progress, + progress_func, + wim_resource_size(lte)); + } } - return 0; + return ret; +} + +static int +do_write_stream_list_serial(struct list_head *stream_list, + struct wim_lookup_table *lookup_table, + filedes_t out_fd, + int out_ctype, + int write_resource_flags, + wimlib_progress_func_t progress_func, + union wimlib_progress_info *progress) +{ + struct serial_write_stream_ctx ctx = { + .out_fd = out_fd, + .out_ctype = out_ctype, + .write_resource_flags = write_resource_flags, + }; + return do_write_stream_list(stream_list, + lookup_table, + serial_write_stream, + &ctx, + progress_func, + progress); +} + +static inline int +write_flags_to_resource_flags(int write_flags) +{ + int resource_flags = 0; + + if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) + resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS; + return resource_flags; } static int write_stream_list_serial(struct list_head *stream_list, struct wim_lookup_table *lookup_table, - FILE *out_fp, + filedes_t out_fd, int out_ctype, - int write_flags, + int write_resource_flags, wimlib_progress_func_t progress_func, union wimlib_progress_info *progress) { - int write_resource_flags = 0; - if (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) - write_resource_flags |= WIMLIB_RESOURCE_FLAG_RECOMPRESS; - + DEBUG("Writing stream list (serial version)"); progress->write_streams.num_threads = 1; if (progress_func) progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress); - return do_write_stream_list(stream_list, - lookup_table, - out_fp, - out_ctype, progress_func, - progress, write_resource_flags); + return do_write_stream_list_serial(stream_list, + lookup_table, + out_fd, + out_ctype, + write_resource_flags, + progress_func, + progress); } #ifdef ENABLE_MULTITHREADED_COMPRESSION static int -write_wim_chunks(struct message *msg, FILE *out_fp, +write_wim_chunks(struct message *msg, filedes_t out_fd, struct chunk_table *chunk_tab) { for (unsigned i = 0; i < msg->num_chunks; i++) { - unsigned chunk_csize = msg->compressed_chunk_sizes[i]; - - DEBUG2("Write wim chunk %u of %u (csize = %u)", - i, msg->num_chunks, chunk_csize); - - if (fwrite(msg->out_compressed_chunks[i], 1, chunk_csize, out_fp) - != chunk_csize) - { - ERROR_WITH_ERRNO("Failed to write WIM chunk"); - return WIMLIB_ERR_WRITE; - } - *chunk_tab->cur_offset_p++ = chunk_tab->cur_offset; - chunk_tab->cur_offset += chunk_csize; + chunk_tab->cur_offset += msg->out_chunks[i].iov_len; + } + if (full_writev(out_fd, msg->out_chunks, + msg->num_chunks) != msg->total_out_bytes) + { + ERROR_WITH_ERRNO("Failed to write WIM chunks"); + return WIMLIB_ERR_WRITE; } return 0; } -/* - * This function is executed by the main thread when the resources are being - * compressed in parallel. The main thread is in change of all reading of the - * uncompressed data and writing of the compressed data. The compressor threads - * *only* do compression from/to in-memory buffers. - * - * Each unit of work given to a compressor thread is up to MAX_CHUNKS_PER_MSG - * chunks of compressed data to compress, represented in a `struct message'. - * Each message is passed from the main thread to a worker thread through the - * res_to_compress_queue, and it is passed back through the - * compressed_res_queue. - */ -static int -main_writer_thread_proc(struct list_head *stream_list, - FILE *out_fp, - int out_ctype, - struct shared_queue *res_to_compress_queue, - struct shared_queue *compressed_res_queue, - size_t num_messages, - int write_flags, - wimlib_progress_func_t progress_func, - union wimlib_progress_info *progress) -{ - int ret; - struct chunk_table *cur_chunk_tab = NULL; - struct message *msgs = CALLOC(num_messages, sizeof(struct message)); - struct wim_lookup_table_entry *next_lte = NULL; +struct main_writer_thread_ctx { + struct list_head *stream_list; + struct wim_lookup_table *lookup_table; + filedes_t out_fd; + int out_ctype; + int write_resource_flags; + struct shared_queue *res_to_compress_queue; + struct shared_queue *compressed_res_queue; + size_t num_messages; + wimlib_progress_func_t progress_func; + union wimlib_progress_info *progress; - // Initially, all the messages are available to use. - LIST_HEAD(available_msgs); + struct list_head available_msgs; + struct list_head outstanding_streams; + struct list_head serial_streams; + size_t num_outstanding_messages; - if (!msgs) { - ret = WIMLIB_ERR_NOMEM; - goto out; - } - - for (size_t i = 0; i < num_messages; i++) - list_add(&msgs[i].list, &available_msgs); - - // outstanding_resources is the list of resources that currently have - // had chunks sent off for compression. - // - // The first stream in outstanding_resources is the stream that is - // currently being written (cur_lte). - // - // The last stream in outstanding_resources is the stream that is - // currently being read and chunks fed to the compressor threads - // (next_lte). - // - // Depending on the number of threads and the sizes of the resource, - // the outstanding streams list may contain streams between cur_lte and - // next_lte that have all their chunks compressed or being compressed, - // but haven't been written yet. - // - LIST_HEAD(outstanding_resources); - struct list_head *next_resource = stream_list->next; - u64 next_chunk = 0; - u64 next_num_chunks = 0; - - // As in write_wim_resource(), each resource we read is checksummed. SHA_CTX next_sha_ctx; - u8 next_hash[SHA1_HASH_SIZE]; + u64 next_chunk; + u64 next_num_chunks; + struct wim_lookup_table_entry *next_lte; - // Resources that don't need any chunks compressed are added to this - // list and written directly by the main thread. - LIST_HEAD(my_resources); + struct message *msgs; + struct message *next_msg; + struct chunk_table *cur_chunk_tab; +}; - struct wim_lookup_table_entry *cur_lte = NULL; - struct message *msg; +static int +init_message(struct message *msg) +{ + for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) { + msg->compressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE); + msg->uncompressed_chunks[i] = MALLOC(WIM_CHUNK_SIZE); + if (msg->compressed_chunks[i] == NULL || + msg->uncompressed_chunks[i] == NULL) + return WIMLIB_ERR_NOMEM; + } + return 0; +} -#ifdef WITH_NTFS_3G - ntfs_inode *ni = NULL; -#endif +static void +destroy_message(struct message *msg) +{ + for (size_t i = 0; i < MAX_CHUNKS_PER_MSG; i++) { + FREE(msg->compressed_chunks[i]); + FREE(msg->uncompressed_chunks[i]); + } +} - DEBUG("Initializing buffers for uncompressed " - "and compressed data (%zu bytes needed)", - num_messages * MAX_CHUNKS_PER_MSG * WIM_CHUNK_SIZE * 2); +static void +free_messages(struct message *msgs, size_t num_messages) +{ + if (msgs) { + for (size_t i = 0; i < num_messages; i++) + destroy_message(&msgs[i]); + FREE(msgs); + } +} + +static struct message * +allocate_messages(size_t num_messages) +{ + struct message *msgs; - // Pre-allocate all the buffers that will be needed to do the chunk - // compression. + msgs = CALLOC(num_messages, sizeof(struct message)); + if (!msgs) + return NULL; for (size_t i = 0; i < num_messages; i++) { - for (size_t j = 0; j < MAX_CHUNKS_PER_MSG; j++) { - msgs[i].compressed_chunks[j] = MALLOC(WIM_CHUNK_SIZE); - - // The extra 8 bytes is because longest_match() in - // lz77.c may read a little bit off the end of the - // uncompressed data. It doesn't need to be - // initialized--- we really just need to avoid accessing - // an unmapped page. - msgs[i].uncompressed_chunks[j] = MALLOC(WIM_CHUNK_SIZE + 8); - if (msgs[i].compressed_chunks[j] == NULL || - msgs[i].uncompressed_chunks[j] == NULL) - { - ret = WIMLIB_ERR_NOMEM; - goto out; - } + if (init_message(&msgs[i])) { + free_messages(msgs, num_messages); + return NULL; } } + return msgs; +} - // This loop is executed until all resources have been written, except - // possibly a few that have been added to the @my_resources list for - // writing later. - while (1) { - // Send chunks to the compressor threads until either (a) there - // are no more messages available since they were all sent off, - // or (b) there are no more resources that need to be - // compressed. - while (!list_empty(&available_msgs)) { - if (next_chunk == next_num_chunks) { - // If next_chunk == next_num_chunks, there are - // no more chunks to write in the current - // stream. So, check the SHA1 message digest of - // the stream that was just finished (unless - // next_lte == NULL, which is the case the very - // first time this loop is entered, and also - // near the very end of the compression when - // there are no more streams.) Then, advance to - // the next stream (if there is one). - if (next_lte != NULL) { - #ifdef WITH_NTFS_3G - end_wim_resource_read(next_lte, ni); - ni = NULL; - #else - end_wim_resource_read(next_lte); - #endif - DEBUG2("Finalize SHA1 md (next_num_chunks=%zu)", - next_num_chunks); - sha1_final(next_hash, &next_sha_ctx); - if (!hashes_equal(next_lte->hash, next_hash)) { - ERROR("WIM resource has incorrect hash!"); - if (next_lte->resource_location == - RESOURCE_IN_FILE_ON_DISK) - { - ERROR("We were reading it from `%"TS"'; " - "maybe it changed while we were " - "reading it.", - next_lte->file_on_disk); - } - ret = WIMLIB_ERR_INVALID_RESOURCE_HASH; - goto out; - } - } +static void +main_writer_thread_destroy_ctx(struct main_writer_thread_ctx *ctx) +{ + while (ctx->num_outstanding_messages--) + shared_queue_get(ctx->compressed_res_queue); + free_messages(ctx->msgs, ctx->num_messages); + FREE(ctx->cur_chunk_tab); +} - // Advance to the next resource. - // - // If the next resource needs no compression, just write - // it with this thread (not now though--- we could be in - // the middle of writing another resource.) Keep doing - // this until we either get to the end of the resources - // list, or we get to a resource that needs compression. - while (1) { - if (next_resource == stream_list) { - // No more resources to send for - // compression - next_lte = NULL; - break; - } - next_lte = container_of(next_resource, - struct wim_lookup_table_entry, - write_streams_list); - next_resource = next_resource->next; - if ((!(write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS) - && wim_resource_compression_type(next_lte) == out_ctype) - || wim_resource_size(next_lte) == 0) - { - list_add_tail(&next_lte->write_streams_list, - &my_resources); - } else { - list_add_tail(&next_lte->write_streams_list, - &outstanding_resources); - next_chunk = 0; - next_num_chunks = wim_resource_chunks(next_lte); - sha1_init(&next_sha_ctx); - INIT_LIST_HEAD(&next_lte->msg_list); - #ifdef WITH_NTFS_3G - ret = prepare_resource_for_read(next_lte, &ni); - #else - ret = prepare_resource_for_read(next_lte); - #endif - - if (ret != 0) - goto out; - if (cur_lte == NULL) { - // Set cur_lte for the - // first time - cur_lte = next_lte; - } - break; - } - } - } +static int +main_writer_thread_init_ctx(struct main_writer_thread_ctx *ctx) +{ + /* Pre-allocate all the buffers that will be needed to do the chunk + * compression. */ + ctx->msgs = allocate_messages(ctx->num_messages); + if (!ctx->msgs) + return WIMLIB_ERR_NOMEM; - if (next_lte == NULL) { - // No more resources to send for compression - break; - } + /* Initially, all the messages are available to use. */ + INIT_LIST_HEAD(&ctx->available_msgs); + for (size_t i = 0; i < ctx->num_messages; i++) + list_add_tail(&ctx->msgs[i].list, &ctx->available_msgs); - // Get a message from the available messages - // list - msg = container_of(available_msgs.next, - struct message, - list); - - // ... and delete it from the available messages - // list - list_del(&msg->list); - - // Initialize the message with the chunks to - // compress. - msg->num_chunks = min(next_num_chunks - next_chunk, - MAX_CHUNKS_PER_MSG); - msg->lte = next_lte; - msg->complete = false; - msg->begin_chunk = next_chunk; - - unsigned size = WIM_CHUNK_SIZE; - for (unsigned i = 0; i < msg->num_chunks; i++) { - - // Read chunk @next_chunk of the stream into the - // message so that a compressor thread can - // compress it. - - if (next_chunk == next_num_chunks - 1) { - size = MODULO_NONZERO(wim_resource_size(next_lte), - WIM_CHUNK_SIZE); - } + /* outstanding_streams is the list of streams that currently have had + * chunks sent off for compression. + * + * The first stream in outstanding_streams is the stream that is + * currently being written. + * + * The last stream in outstanding_streams is the stream that is + * currently being read and having chunks fed to the compressor threads. + * */ + INIT_LIST_HEAD(&ctx->outstanding_streams); + ctx->num_outstanding_messages = 0; - DEBUG2("Read resource (size=%u, offset=%zu)", - size, next_chunk * WIM_CHUNK_SIZE); - - msg->uncompressed_chunk_sizes[i] = size; - - ret = read_wim_resource(next_lte, - msg->uncompressed_chunks[i], - size, - next_chunk * WIM_CHUNK_SIZE, - 0); - if (ret != 0) - goto out; - sha1_update(&next_sha_ctx, - msg->uncompressed_chunks[i], size); - next_chunk++; - } + ctx->next_msg = NULL; - // Send the compression request - list_add_tail(&msg->list, &next_lte->msg_list); - shared_queue_put(res_to_compress_queue, msg); - DEBUG2("Compression request sent"); - } + /* Resources that don't need any chunks compressed are added to this + * list and written directly by the main thread. */ + INIT_LIST_HEAD(&ctx->serial_streams); - // If there are no outstanding resources, there are no more - // resources that need to be written. - if (list_empty(&outstanding_resources)) { - ret = 0; - goto out; + ctx->cur_chunk_tab = NULL; + + return 0; +} + +static int +receive_compressed_chunks(struct main_writer_thread_ctx *ctx) +{ + struct message *msg; + struct wim_lookup_table_entry *cur_lte; + int ret; + + wimlib_assert(!list_empty(&ctx->outstanding_streams)); + wimlib_assert(ctx->num_outstanding_messages != 0); + + cur_lte = container_of(ctx->outstanding_streams.next, + struct wim_lookup_table_entry, + being_compressed_list); + + /* Get the next message from the queue and process it. + * The message will contain 1 or more data chunks that have been + * compressed. */ + msg = shared_queue_get(ctx->compressed_res_queue); + msg->complete = true; + --ctx->num_outstanding_messages; + + /* Is this the next chunk in the current resource? If it's not + * (i.e., an earlier chunk in a same or different resource + * hasn't been compressed yet), do nothing, and keep this + * message around until all earlier chunks are received. + * + * Otherwise, write all the chunks we can. */ + while (cur_lte != NULL && + !list_empty(&cur_lte->msg_list) + && (msg = container_of(cur_lte->msg_list.next, + struct message, + list))->complete) + { + list_move(&msg->list, &ctx->available_msgs); + if (msg->begin_chunk == 0) { + /* This is the first set of chunks. Leave space + * for the chunk table in the output file. */ + off_t cur_offset = filedes_offset(ctx->out_fd); + if (cur_offset == -1) + return WIMLIB_ERR_WRITE; + ret = begin_wim_resource_chunk_tab(cur_lte, + ctx->out_fd, + cur_offset, + &ctx->cur_chunk_tab); + if (ret) + return ret; } - // Get the next message from the queue and process it. - // The message will contain 1 or more data chunks that have been - // compressed. - msg = shared_queue_get(compressed_res_queue); - msg->complete = true; - - // Is this the next chunk in the current resource? If it's not - // (i.e., an earlier chunk in a same or different resource - // hasn't been compressed yet), do nothing, and keep this - // message around until all earlier chunks are received. - // - // Otherwise, write all the chunks we can. - while (cur_lte != NULL && - !list_empty(&cur_lte->msg_list) && - (msg = container_of(cur_lte->msg_list.next, - struct message, - list))->complete) + /* Write the compressed chunks from the message. */ + ret = write_wim_chunks(msg, ctx->out_fd, ctx->cur_chunk_tab); + if (ret) + return ret; + + /* Was this the last chunk of the stream? If so, finish + * it. */ + if (list_empty(&cur_lte->msg_list) && + msg->begin_chunk + msg->num_chunks == ctx->cur_chunk_tab->num_chunks) { - DEBUG2("Complete msg (begin_chunk=%"PRIu64")", msg->begin_chunk); - if (msg->begin_chunk == 0) { - DEBUG2("Begin chunk tab"); - - // This is the first set of chunks. Leave space - // for the chunk table in the output file. - off_t cur_offset = ftello(out_fp); - if (cur_offset == -1) { - ret = WIMLIB_ERR_WRITE; - goto out; - } - ret = begin_wim_resource_chunk_tab(cur_lte, - out_fp, - cur_offset, - &cur_chunk_tab); - if (ret != 0) - goto out; - } + u64 res_csize; + off_t offset; - // Write the compressed chunks from the message. - ret = write_wim_chunks(msg, out_fp, cur_chunk_tab); - if (ret != 0) - goto out; - - list_del(&msg->list); - - // This message is available to use for different chunks - // now. - list_add(&msg->list, &available_msgs); - - // Was this the last chunk of the stream? If so, finish - // it. - if (list_empty(&cur_lte->msg_list) && - msg->begin_chunk + msg->num_chunks == cur_chunk_tab->num_chunks) - { - DEBUG2("Finish wim chunk tab"); - u64 res_csize; - ret = finish_wim_resource_chunk_tab(cur_chunk_tab, - out_fp, - &res_csize); - if (ret != 0) - goto out; - - if (res_csize >= wim_resource_size(cur_lte)) { - /* Oops! We compressed the resource to - * larger than the original size. Write - * the resource uncompressed instead. */ - ret = write_uncompressed_resource_and_truncate( - cur_lte, - out_fp, - cur_chunk_tab->file_offset, - &cur_lte->output_resource_entry); - if (ret != 0) - goto out; - } else { - cur_lte->output_resource_entry.size = - res_csize; - - cur_lte->output_resource_entry.original_size = - cur_lte->resource_entry.original_size; - - cur_lte->output_resource_entry.offset = - cur_chunk_tab->file_offset; - - cur_lte->output_resource_entry.flags = - cur_lte->resource_entry.flags | - WIM_RESHDR_FLAG_COMPRESSED; - } + ret = finish_wim_resource_chunk_tab(ctx->cur_chunk_tab, + ctx->out_fd, + &res_csize); + if (ret) + return ret; - do_write_streams_progress(progress, progress_func, - wim_resource_size(cur_lte)); - - FREE(cur_chunk_tab); - cur_chunk_tab = NULL; - - struct list_head *next = cur_lte->write_streams_list.next; - list_del(&cur_lte->write_streams_list); - - if (next == &outstanding_resources) - cur_lte = NULL; - else - cur_lte = container_of(cur_lte->write_streams_list.next, - struct wim_lookup_table_entry, - write_streams_list); - - // Since we just finished writing a stream, - // write any streams that have been added to the - // my_resources list for direct writing by the - // main thread (e.g. resources that don't need - // to be compressed because the desired - // compression type is the same as the previous - // compression type). - ret = do_write_stream_list(&my_resources, - out_fp, - out_ctype, - progress_func, - progress, - 0); - if (ret != 0) - goto out; - } - } - } + list_del(&cur_lte->being_compressed_list); + + /* Grab the offset of this stream in the output file + * from the chunk table before we free it. */ + offset = ctx->cur_chunk_tab->file_offset; + + FREE(ctx->cur_chunk_tab); + ctx->cur_chunk_tab = NULL; + + if (res_csize >= wim_resource_size(cur_lte)) { + /* Oops! We compressed the resource to + * larger than the original size. Write + * the resource uncompressed instead. */ + DEBUG("Compressed %"PRIu64" => %"PRIu64" bytes; " + "writing uncompressed instead", + wim_resource_size(cur_lte), res_csize); + ret = seek_and_truncate(ctx->out_fd, offset); + if (ret) + return ret; + ret = write_wim_resource(cur_lte, + ctx->out_fd, + WIMLIB_COMPRESSION_TYPE_NONE, + &cur_lte->output_resource_entry, + ctx->write_resource_flags); + if (ret) + return ret; + } else { + cur_lte->output_resource_entry.size = + res_csize; -out: - if (ret == WIMLIB_ERR_NOMEM) { - ERROR("Could not allocate enough memory for " - "multi-threaded compression"); - } + cur_lte->output_resource_entry.original_size = + cur_lte->resource_entry.original_size; - if (next_lte) { -#ifdef WITH_NTFS_3G - end_wim_resource_read(next_lte, ni); -#else - end_wim_resource_read(next_lte); -#endif - } + cur_lte->output_resource_entry.offset = + offset; - if (ret == 0) { - ret = do_write_stream_list(&my_resources, out_fp, - out_ctype, progress_func, - progress, 0); - } else { - if (msgs) { - size_t num_available_msgs = 0; - struct list_head *cur; + cur_lte->output_resource_entry.flags = + cur_lte->resource_entry.flags | + WIM_RESHDR_FLAG_COMPRESSED; + } - list_for_each(cur, &available_msgs) { - num_available_msgs++; + do_write_streams_progress(ctx->progress, + ctx->progress_func, + wim_resource_size(cur_lte)); + + /* Since we just finished writing a stream, write any + * streams that have been added to the serial_streams + * list for direct writing by the main thread (e.g. + * resources that don't need to be compressed because + * the desired compression type is the same as the + * previous compression type). */ + if (!list_empty(&ctx->serial_streams)) { + ret = do_write_stream_list_serial(&ctx->serial_streams, + ctx->lookup_table, + ctx->out_fd, + ctx->out_ctype, + ctx->write_resource_flags, + ctx->progress_func, + ctx->progress); + if (ret) + return ret; } - while (num_available_msgs < num_messages) { - shared_queue_get(compressed_res_queue); - num_available_msgs++; + /* Advance to the next stream to write. */ + if (list_empty(&ctx->outstanding_streams)) { + cur_lte = NULL; + } else { + cur_lte = container_of(ctx->outstanding_streams.next, + struct wim_lookup_table_entry, + being_compressed_list); } } } + return 0; +} - if (msgs) { - for (size_t i = 0; i < num_messages; i++) { - for (size_t j = 0; j < MAX_CHUNKS_PER_MSG; j++) { - FREE(msgs[i].compressed_chunks[j]); - FREE(msgs[i].uncompressed_chunks[j]); - } +/* Called when the main thread has read a new chunk of data. */ +static int +main_writer_thread_cb(const void *chunk, size_t chunk_size, void *_ctx) +{ + struct main_writer_thread_ctx *ctx = _ctx; + int ret; + struct message *next_msg; + u64 next_chunk_in_msg; + + /* Update SHA1 message digest for the stream currently being read by the + * main thread. */ + sha1_update(&ctx->next_sha_ctx, chunk, chunk_size); + + /* We send chunks of data to the compressor chunks in batches which we + * refer to as "messages". @next_msg is the message that is currently + * being prepared to send off. If it is NULL, that indicates that we + * need to start a new message. */ + next_msg = ctx->next_msg; + if (!next_msg) { + /* We need to start a new message. First check to see if there + * is a message available in the list of available messages. If + * so, we can just take one. If not, all the messages (there is + * a fixed number of them, proportional to the number of + * threads) have been sent off to the compressor threads, so we + * receive messages from the compressor threads containing + * compressed chunks of data. + * + * We may need to receive multiple messages before one is + * actually available to use because messages received that are + * *not* for the very next set of chunks to compress must be + * buffered until it's time to write those chunks. */ + while (list_empty(&ctx->available_msgs)) { + ret = receive_compressed_chunks(ctx); + if (ret) + return ret; } - FREE(msgs); + + next_msg = container_of(ctx->available_msgs.next, + struct message, list); + list_del(&next_msg->list); + next_msg->complete = false; + next_msg->begin_chunk = ctx->next_chunk; + next_msg->num_chunks = min(MAX_CHUNKS_PER_MSG, + ctx->next_num_chunks - ctx->next_chunk); + ctx->next_msg = next_msg; + } + + /* Fill in the next chunk to compress */ + next_chunk_in_msg = ctx->next_chunk - next_msg->begin_chunk; + + next_msg->uncompressed_chunk_sizes[next_chunk_in_msg] = chunk_size; + memcpy(next_msg->uncompressed_chunks[next_chunk_in_msg], + chunk, chunk_size); + ctx->next_chunk++; + if (++next_chunk_in_msg == next_msg->num_chunks) { + /* Send off an array of chunks to compress */ + list_add_tail(&next_msg->list, &ctx->next_lte->msg_list); + shared_queue_put(ctx->res_to_compress_queue, next_msg); + ++ctx->num_outstanding_messages; + ctx->next_msg = NULL; } + return 0; +} + +static int +main_writer_thread_finish(void *_ctx) +{ + struct main_writer_thread_ctx *ctx = _ctx; + int ret; + while (ctx->num_outstanding_messages != 0) { + ret = receive_compressed_chunks(ctx); + if (ret) + return ret; + } + wimlib_assert(list_empty(&ctx->outstanding_streams)); + return do_write_stream_list_serial(&ctx->serial_streams, + ctx->lookup_table, + ctx->out_fd, + ctx->out_ctype, + ctx->write_resource_flags, + ctx->progress_func, + ctx->progress); +} + +static int +submit_stream_for_compression(struct wim_lookup_table_entry *lte, + struct main_writer_thread_ctx *ctx) +{ + int ret; - FREE(cur_chunk_tab); + /* Read the entire stream @lte, feeding its data chunks to the + * compressor threads. Also SHA1-sum the stream; this is required in + * the case that @lte is unhashed, and a nice additional verification + * when @lte is already hashed. */ + sha1_init(&ctx->next_sha_ctx); + ctx->next_chunk = 0; + ctx->next_num_chunks = wim_resource_chunks(lte); + ctx->next_lte = lte; + INIT_LIST_HEAD(<e->msg_list); + list_add_tail(<e->being_compressed_list, &ctx->outstanding_streams); + ret = read_resource_prefix(lte, wim_resource_size(lte), + main_writer_thread_cb, ctx, 0); + if (ret == 0) { + wimlib_assert(ctx->next_chunk == ctx->next_num_chunks); + ret = finalize_and_check_sha1(&ctx->next_sha_ctx, lte); + } + return ret; +} + +static int +main_thread_process_next_stream(struct wim_lookup_table_entry *lte, void *_ctx) +{ + struct main_writer_thread_ctx *ctx = _ctx; + int ret; + + if (wim_resource_size(lte) < 1000 || + ctx->out_ctype == WIMLIB_COMPRESSION_TYPE_NONE || + (lte->resource_location == RESOURCE_IN_WIM && + !(ctx->write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS) && + wimlib_get_compression_type(lte->wim) == ctx->out_ctype)) + { + /* Stream is too small or isn't being compressed. Process it by + * the main thread when we have a chance. We can't necessarily + * process it right here, as the main thread could be in the + * middle of writing a different stream. */ + list_add_tail(<e->write_streams_list, &ctx->serial_streams); + lte->deferred = 1; + ret = 0; + } else { + ret = submit_stream_for_compression(lte, ctx); + } + lte->no_progress = 1; return ret; } @@ -1226,15 +1194,41 @@ get_default_num_threads() #endif } +/* Equivalent to write_stream_list_serial(), except this takes a @num_threads + * parameter and will perform compression using that many threads. Falls + * back to write_stream_list_serial() on certain errors, such as a failure to + * create the number of threads requested. + * + * High level description of the algorithm for writing compressed streams in + * parallel: We perform compression on chunks of size WIM_CHUNK_SIZE bytes + * rather than on full files. The currently executing thread becomes the main + * thread and is entirely in charge of reading the data to compress (which may + * be in any location understood by the resource code--- such as in an external + * file being captured, or in another WIM file from which an image is being + * exported) and actually writing the compressed data to the output file. + * Additional threads are "compressor threads" and all execute the + * compressor_thread_proc, where they repeatedly retrieve buffers of data from + * the main thread, compress them, and hand them back to the main thread. + * + * Certain streams, such as streams that do not need to be compressed (e.g. + * input compression type same as output compression type) or streams of very + * small size are placed in a list (main_writer_thread_ctx.serial_list) and + * handled entirely by the main thread at an appropriate time. + * + * At any given point in time, multiple streams may be having chunks compressed + * concurrently. The stream that the main thread is currently *reading* may be + * later in the list that the stream that the main thread is currently + * *writing*. + */ static int write_stream_list_parallel(struct list_head *stream_list, struct wim_lookup_table *lookup_table, - FILE *out_fp, + filedes_t out_fd, int out_ctype, - int write_flags, - unsigned num_threads, + int write_resource_flags, wimlib_progress_func_t progress_func, - union wimlib_progress_info *progress) + union wimlib_progress_info *progress, + unsigned num_threads) { int ret; struct shared_queue res_to_compress_queue; @@ -1246,25 +1240,29 @@ write_stream_list_parallel(struct list_head *stream_list, if (nthreads < 1 || nthreads > UINT_MAX) { WARNING("Could not determine number of processors! Assuming 1"); goto out_serial; + } else if (nthreads == 1) { + goto out_serial_quiet; } else { num_threads = nthreads; } } + DEBUG("Writing stream list (parallel version, num_threads=%u)", + num_threads); + progress->write_streams.num_threads = num_threads; - wimlib_assert(stream_list->next != stream_list); - static const double MESSAGES_PER_THREAD = 2.0; + static const size_t MESSAGES_PER_THREAD = 2; size_t queue_size = (size_t)(num_threads * MESSAGES_PER_THREAD); DEBUG("Initializing shared queues (queue_size=%zu)", queue_size); ret = shared_queue_init(&res_to_compress_queue, queue_size); - if (ret != 0) + if (ret) goto out_serial; ret = shared_queue_init(&compressed_res_queue, queue_size); - if (ret != 0) + if (ret) goto out_destroy_res_to_compress_queue; struct compressor_thread_params params; @@ -1279,13 +1277,14 @@ write_stream_list_parallel(struct list_head *stream_list, } for (unsigned i = 0; i < num_threads; i++) { - DEBUG("pthread_create thread %u", i); + DEBUG("pthread_create thread %u of %u", i + 1, num_threads); ret = pthread_create(&compressor_threads[i], NULL, compressor_thread_proc, ¶ms); if (ret != 0) { ret = -1; ERROR_WITH_ERRNO("Failed to create compressor " - "thread %u", i); + "thread %u of %u", + i + 1, num_threads); num_threads = i; goto out_join; } @@ -1294,15 +1293,35 @@ write_stream_list_parallel(struct list_head *stream_list, if (progress_func) progress_func(WIMLIB_PROGRESS_MSG_WRITE_STREAMS, progress); - ret = main_writer_thread_proc(stream_list, - out_fp, - out_ctype, - &res_to_compress_queue, - &compressed_res_queue, - queue_size, - write_flags, - progress_func, - progress); + struct main_writer_thread_ctx ctx; + ctx.stream_list = stream_list; + ctx.lookup_table = lookup_table; + ctx.out_fd = out_fd; + ctx.out_ctype = out_ctype; + ctx.res_to_compress_queue = &res_to_compress_queue; + ctx.compressed_res_queue = &compressed_res_queue; + ctx.num_messages = queue_size; + ctx.write_resource_flags = write_resource_flags; + ctx.progress_func = progress_func; + ctx.progress = progress; + ret = main_writer_thread_init_ctx(&ctx); + if (ret) + goto out_join; + ret = do_write_stream_list(stream_list, lookup_table, + main_thread_process_next_stream, + &ctx, progress_func, progress); + if (ret) + goto out_destroy_ctx; + + /* The main thread has finished reading all streams that are going to be + * compressed in parallel, and it now needs to wait for all remaining + * chunks to be compressed so that the remaining streams can actually be + * written to the output file. Furthermore, any remaining streams that + * had processing deferred to the main thread need to be handled. These + * tasks are done by the main_writer_thread_finish() function. */ + ret = main_writer_thread_finish(&ctx); +out_destroy_ctx: + main_writer_thread_destroy_ctx(&ctx); out_join: for (unsigned i = 0; i < num_threads; i++) shared_queue_put(&res_to_compress_queue, NULL); @@ -1310,7 +1329,8 @@ out_join: for (unsigned i = 0; i < num_threads; i++) { if (pthread_join(compressor_threads[i], NULL)) { WARNING_WITH_ERRNO("Failed to join compressor " - "thread %u", i); + "thread %u of %u", + i + 1, num_threads); } } FREE(compressor_threads); @@ -1322,11 +1342,12 @@ out_destroy_res_to_compress_queue: return ret; out_serial: WARNING("Falling back to single-threaded compression"); +out_serial_quiet: return write_stream_list_serial(stream_list, lookup_table, - out_fp, + out_fd, out_ctype, - write_flags, + write_resource_flags, progress_func, progress); @@ -1334,13 +1355,13 @@ out_serial: #endif /* - * Write a list of streams to a WIM (@out_fp) using the compression type + * Write a list of streams to a WIM (@out_fd) using the compression type * @out_ctype and up to @num_threads compressor threads. */ static int write_stream_list(struct list_head *stream_list, struct wim_lookup_table *lookup_table, - FILE *out_fp, int out_ctype, int write_flags, + filedes_t out_fd, int out_ctype, int write_flags, unsigned num_threads, wimlib_progress_func_t progress_func) { struct wim_lookup_table_entry *lte; @@ -1349,16 +1370,23 @@ write_stream_list(struct list_head *stream_list, u64 total_compression_bytes = 0; union wimlib_progress_info progress; int ret; + int write_resource_flags; if (list_empty(stream_list)) return 0; + write_resource_flags = write_flags_to_resource_flags(write_flags); + + /* Calculate the total size of the streams to be written. Note: this + * will be the uncompressed size, as we may not know the compressed size + * yet, and also this will assume that every unhashed stream will be + * written (which will not necessarily be the case). */ list_for_each_entry(lte, stream_list, write_streams_list) { num_streams++; total_bytes += wim_resource_size(lte); if (out_ctype != WIMLIB_COMPRESSION_TYPE_NONE && (wim_resource_compression_type(lte) != out_ctype || - (write_flags & WIMLIB_WRITE_FLAG_RECOMPRESS))) + (write_resource_flags & WIMLIB_RESOURCE_FLAG_RECOMPRESS))) { total_compression_bytes += wim_resource_size(lte); } @@ -1375,19 +1403,19 @@ write_stream_list(struct list_head *stream_list, if (total_compression_bytes >= 1000000 && num_threads != 1) ret = write_stream_list_parallel(stream_list, lookup_table, - out_fp, + out_fd, out_ctype, - write_flags, - num_threads, + write_resource_flags, progress_func, - &progress); + &progress, + num_threads); else #endif ret = write_stream_list_serial(stream_list, lookup_table, - out_fp, + out_fd, out_ctype, - write_flags, + write_resource_flags, progress_func, &progress); return ret; @@ -1421,15 +1449,15 @@ stream_size_table_insert(struct wim_lookup_table_entry *lte, void *_tab) { struct stream_size_table *tab = _tab; size_t pos; - struct wim_lookup_table_entry *hashed_lte; + struct wim_lookup_table_entry *same_size_lte; struct hlist_node *tmp; pos = hash_u64(wim_resource_size(lte)) % tab->capacity; lte->unique_size = 1; - hlist_for_each_entry(hashed_lte, tmp, &tab->array[pos], hash_list_2) { - if (wim_resource_size(hashed_lte) == wim_resource_size(lte)) { + hlist_for_each_entry(same_size_lte, tmp, &tab->array[pos], hash_list_2) { + if (wim_resource_size(same_size_lte) == wim_resource_size(lte)) { lte->unique_size = 0; - hashed_lte->unique_size = 0; + same_size_lte->unique_size = 0; break; } } @@ -1447,14 +1475,34 @@ struct lte_overwrite_prepare_args { struct stream_size_table stream_size_tab; }; +/* First phase of preparing streams for an in-place overwrite. This is called + * on all streams, both hashed and unhashed, except the metadata resources. */ static int -lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *arg) +lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *_args) { - struct lte_overwrite_prepare_args *args = arg; + struct lte_overwrite_prepare_args *args = _args; - if (lte->resource_location == RESOURCE_IN_WIM && - lte->wim == args->wim) - { + wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA)); + if (lte->resource_location != RESOURCE_IN_WIM || lte->wim != args->wim) + list_add_tail(<e->write_streams_list, &args->stream_list); + lte->out_refcnt = lte->refcnt; + stream_size_table_insert(lte, &args->stream_size_tab); + return 0; +} + +/* Second phase of preparing streams for an in-place overwrite. This is called + * on existing metadata resources and hashed streams, but not unhashed streams. + * + * NOTE: lte->output_resource_entry is in union with lte->hash_list_2, so + * lte_overwrite_prepare_2() must be called after lte_overwrite_prepare(), as + * the latter uses lte->hash_list_2, while the former expects to set + * lte->output_resource_entry. */ +static int +lte_overwrite_prepare_2(struct wim_lookup_table_entry *lte, void *_args) +{ + struct lte_overwrite_prepare_args *args = _args; + + if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == args->wim) { /* We can't do an in place overwrite on the WIM if there are * streams after the XML data. */ if (lte->resource_entry.offset + @@ -1466,19 +1514,6 @@ lte_overwrite_prepare(struct wim_lookup_table_entry *lte, void *arg) #endif return WIMLIB_ERR_RESOURCE_ORDER; } - } else { - wimlib_assert(!(lte->resource_entry.flags & WIM_RESHDR_FLAG_METADATA)); - list_add_tail(<e->write_streams_list, &args->stream_list); - } - lte->out_refcnt = lte->refcnt; - stream_size_table_insert(lte, &args->stream_size_tab); - return 0; -} - -static int -lte_set_output_res_entry(struct wim_lookup_table_entry *lte, void *_wim) -{ - if (lte->resource_location == RESOURCE_IN_WIM && lte->wim == _wim) { copy_resource_entry(<e->output_resource_entry, <e->resource_entry); } @@ -1504,6 +1539,7 @@ prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset, { int ret; struct lte_overwrite_prepare_args args; + unsigned i; args.wim = wim; args.end_offset = end_offset; @@ -1513,28 +1549,25 @@ prepare_streams_for_overwrite(WIMStruct *wim, off_t end_offset, return ret; INIT_LIST_HEAD(&args.stream_list); - for (int i = 0; i < wim->hdr.image_count; i++) { + for (i = 0; i < wim->hdr.image_count; i++) { struct wim_image_metadata *imd; struct wim_lookup_table_entry *lte; imd = wim->image_metadata[i]; - image_for_each_unhashed_stream(lte, imd) { - ret = lte_overwrite_prepare(lte, &args); - if (ret) - goto out_destroy_stream_size_table; - } + image_for_each_unhashed_stream(lte, imd) + lte_overwrite_prepare(lte, &args); + } + for_lookup_table_entry(wim->lookup_table, lte_overwrite_prepare, &args); + list_transfer(&args.stream_list, stream_list); + + for (i = 0; i < wim->hdr.image_count; i++) { + ret = lte_overwrite_prepare_2(wim->image_metadata[i]->metadata_lte, + &args); + if (ret) + goto out_destroy_stream_size_table; } ret = for_lookup_table_entry(wim->lookup_table, - lte_overwrite_prepare, &args); - if (ret) - goto out_destroy_stream_size_table; - - for (int i = 0; i < wim->hdr.image_count; i++) - lte_set_output_res_entry(wim->image_metadata[i]->metadata_lte, - wim); - for_lookup_table_entry(wim->lookup_table, lte_set_output_res_entry, wim); - INIT_LIST_HEAD(stream_list); - list_splice(&args.stream_list, stream_list); + lte_overwrite_prepare_2, &args); out_destroy_stream_size_table: destroy_stream_size_table(&args.stream_size_tab); return ret; @@ -1569,19 +1602,16 @@ inode_find_streams_to_write(struct wim_inode *inode, static int image_find_streams_to_write(WIMStruct *w) { - struct wim_image_metadata *imd; struct find_streams_ctx *ctx; + struct wim_image_metadata *imd; struct wim_inode *inode; struct wim_lookup_table_entry *lte; ctx = w->private; imd = wim_get_current_image_metadata(w); - image_for_each_unhashed_stream(lte, imd) { + image_for_each_unhashed_stream(lte, imd) lte->out_refcnt = 0; - wimlib_assert(lte->unhashed); - wimlib_assert(lte->back_ptr != NULL); - } /* Go through this image's inodes to find any streams that have not been * found yet. */ @@ -1623,14 +1653,12 @@ prepare_stream_list(WIMStruct *wim, int image, struct list_head *stream_list) wim->private = &ctx; ret = for_image(wim, image, image_find_streams_to_write); destroy_stream_size_table(&ctx.stream_size_tab); - if (ret == 0) { - INIT_LIST_HEAD(stream_list); - list_splice(&ctx.stream_list, stream_list); - } + if (ret == 0) + list_transfer(&ctx.stream_list, stream_list); return ret; } -/* Writes the streams for the specified @image in @wim to @wim->out_fp. +/* Writes the streams for the specified @image in @wim to @wim->out_fd. */ static int write_wim_streams(WIMStruct *wim, int image, int write_flags, @@ -1645,7 +1673,7 @@ write_wim_streams(WIMStruct *wim, int image, int write_flags, return ret; return write_stream_list(&stream_list, wim->lookup_table, - wim->out_fp, + wim->out_fd, wimlib_get_compression_type(wim), write_flags, num_threads, @@ -1687,7 +1715,6 @@ finish_write(WIMStruct *w, int image, int write_flags, { int ret; struct wim_header hdr; - FILE *out = w->out_fp; /* @hdr will be the header for the new WIM. First copy all the data * from the header in the WIMStruct; then set all the fields that may @@ -1695,46 +1722,49 @@ finish_write(WIMStruct *w, int image, int write_flags, * count. */ memcpy(&hdr, &w->hdr, sizeof(struct wim_header)); + /* Set image count and boot index correctly for single image writes */ + if (image != WIMLIB_ALL_IMAGES) { + hdr.image_count = 1; + if (hdr.boot_idx == image) + hdr.boot_idx = 1; + else + hdr.boot_idx = 0; + } + + /* In the WIM header, there is room for the resource entry for a + * metadata resource labeled as the "boot metadata". This entry should + * be zeroed out if there is no bootable image (boot_idx 0). Otherwise, + * it should be a copy of the resource entry for the image that is + * marked as bootable. This is not well documented... */ + if (hdr.boot_idx == 0) { + zero_resource_entry(&hdr.boot_metadata_res_entry); + } else { + copy_resource_entry(&hdr.boot_metadata_res_entry, + &w->image_metadata[ hdr.boot_idx- 1 + ]->metadata_lte->output_resource_entry); + } + if (!(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { ret = write_lookup_table(w, image, &hdr.lookup_table_res_entry); - if (ret != 0) - goto out; + if (ret) + goto out_close_wim; } - ret = write_xml_data(w->wim_info, image, out, + ret = write_xml_data(w->wim_info, image, w->out_fd, (write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE) ? wim_info_get_total_bytes(w->wim_info) : 0, &hdr.xml_res_entry); - if (ret != 0) - goto out; + if (ret) + goto out_close_wim; if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) { if (write_flags & WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML) { struct wim_header checkpoint_hdr; memcpy(&checkpoint_hdr, &hdr, sizeof(struct wim_header)); - memset(&checkpoint_hdr.integrity, 0, sizeof(struct resource_entry)); - if (fseeko(out, 0, SEEK_SET) != 0) { - ERROR_WITH_ERRNO("Failed to seek to beginning " - "of WIM being written"); - ret = WIMLIB_ERR_WRITE; - goto out; - } - ret = write_header(&checkpoint_hdr, out); - if (ret != 0) - goto out; - - if (fflush(out) != 0) { - ERROR_WITH_ERRNO("Can't write data to WIM"); - ret = WIMLIB_ERR_WRITE; - goto out; - } - - if (fseeko(out, 0, SEEK_END) != 0) { - ERROR_WITH_ERRNO("Failed to seek to end " - "of WIM being written"); - ret = WIMLIB_ERR_WRITE; - goto out; - } + zero_resource_entry(&checkpoint_hdr.integrity); + ret = write_header(&checkpoint_hdr, w->out_fd); + if (ret) + goto out_close_wim; } off_t old_lookup_table_end; @@ -1748,80 +1778,44 @@ finish_write(WIMStruct *w, int image, int write_flags, new_lookup_table_end = hdr.lookup_table_res_entry.offset + hdr.lookup_table_res_entry.size; - ret = write_integrity_table(out, + ret = write_integrity_table(w->out_fd, &hdr.integrity, new_lookup_table_end, old_lookup_table_end, progress_func); - if (ret != 0) - goto out; - } else { - memset(&hdr.integrity, 0, sizeof(struct resource_entry)); - } - - /* - * In the WIM header, there is room for the resource entry for a - * metadata resource labeled as the "boot metadata". This entry should - * be zeroed out if there is no bootable image (boot_idx 0). Otherwise, - * it should be a copy of the resource entry for the image that is - * marked as bootable. This is not well documented... - */ - - /* Set image count and boot index correctly for single image writes */ - if (image != WIMLIB_ALL_IMAGES) { - hdr.image_count = 1; - if (hdr.boot_idx == image) - hdr.boot_idx = 1; - else - hdr.boot_idx = 0; - } - - if (hdr.boot_idx == 0) { - memset(&hdr.boot_metadata_res_entry, 0, - sizeof(struct resource_entry)); + if (ret) + goto out_close_wim; } else { - memcpy(&hdr.boot_metadata_res_entry, - &w->image_metadata[ - hdr.boot_idx - 1]->metadata_lte->output_resource_entry, - sizeof(struct resource_entry)); - } - - if (fseeko(out, 0, SEEK_SET) != 0) { - ERROR_WITH_ERRNO("Failed to seek to beginning of WIM " - "being written"); - ret = WIMLIB_ERR_WRITE; - goto out; + zero_resource_entry(&hdr.integrity); } - ret = write_header(&hdr, out); + ret = write_header(&hdr, w->out_fd); if (ret) - goto out; + goto out_close_wim; if (write_flags & WIMLIB_WRITE_FLAG_FSYNC) { - if (fflush(out) != 0 - || fsync(fileno(out)) != 0) - { - ERROR_WITH_ERRNO("Error flushing data to WIM file"); + if (fsync(w->out_fd)) { + ERROR_WITH_ERRNO("Error syncing data to WIM file"); ret = WIMLIB_ERR_WRITE; } } -out: - if (fclose(out) != 0) { - ERROR_WITH_ERRNO("Failed to close the WIM file"); +out_close_wim: + if (close(w->out_fd)) { + ERROR_WITH_ERRNO("Failed to close the output WIM file"); if (ret == 0) ret = WIMLIB_ERR_WRITE; } - w->out_fp = NULL; + w->out_fd = INVALID_FILEDES; return ret; } #if defined(HAVE_SYS_FILE_H) && defined(HAVE_FLOCK) int -lock_wim(WIMStruct *w, FILE *fp) +lock_wim(WIMStruct *w, filedes_t fd) { int ret = 0; - if (fp && !w->wim_locked) { - ret = flock(fileno(fp), LOCK_EX | LOCK_NB); + if (fd != INVALID_FILEDES && !w->wim_locked) { + ret = flock(fd, LOCK_EX | LOCK_NB); if (ret != 0) { if (errno == EWOULDBLOCK) { ERROR("`%"TS"' is already being modified or has been " @@ -1842,37 +1836,24 @@ lock_wim(WIMStruct *w, FILE *fp) #endif static int -open_wim_writable(WIMStruct *w, const tchar *path, - bool trunc, bool also_readable) +open_wim_writable(WIMStruct *w, const tchar *path, int open_flags) { - const tchar *mode; - if (trunc) - if (also_readable) - mode = T("w+b"); - else - mode = T("wb"); - else - mode = T("r+b"); - - wimlib_assert(w->out_fp == NULL); - w->out_fp = tfopen(path, mode); - if (w->out_fp) { - return 0; - } else { + w->out_fd = topen(path, open_flags, 0644); + if (w->out_fd == INVALID_FILEDES) { ERROR_WITH_ERRNO("Failed to open `%"TS"' for writing", path); return WIMLIB_ERR_OPEN; } + return 0; } void close_wim_writable(WIMStruct *w) { - if (w->out_fp) { - if (fclose(w->out_fp) != 0) { + if (w->out_fd != INVALID_FILEDES) { + if (close(w->out_fd)) WARNING_WITH_ERRNO("Failed to close output WIM"); - } - w->out_fp = NULL; + w->out_fd = INVALID_FILEDES; } } @@ -1881,12 +1862,23 @@ int begin_write(WIMStruct *w, const tchar *path, int write_flags) { int ret; - ret = open_wim_writable(w, path, true, - (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) != 0); + int open_flags = O_TRUNC | O_CREAT; + if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) + open_flags |= O_RDWR; + else + open_flags |= O_WRONLY; + ret = open_wim_writable(w, path, open_flags); if (ret) return ret; /* Write dummy header. It will be overwritten later. */ - return write_header(&w->hdr, w->out_fp); + ret = write_header(&w->hdr, w->out_fd); + if (ret) + return ret; + if (lseek(w->out_fd, WIM_HEADER_DISK_SIZE, SEEK_SET) == -1) { + ERROR_WITH_ERRNO("Failed to seek to end of WIM header"); + return WIMLIB_ERR_WRITE; + } + return 0; } /* Writes a stand-alone WIM to a file. */ @@ -1913,26 +1905,29 @@ wimlib_write(WIMStruct *w, const tchar *path, ret = begin_write(w, path, write_flags); if (ret) - goto out; + goto out_close_wim; ret = write_wim_streams(w, image, write_flags, num_threads, progress_func); if (ret) - goto out; + goto out_close_wim; if (progress_func) progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_BEGIN, NULL); ret = for_image(w, image, write_metadata_resource); if (ret) - goto out; + goto out_close_wim; if (progress_func) progress_func(WIMLIB_PROGRESS_MSG_WRITE_METADATA_END, NULL); ret = finish_write(w, image, write_flags, progress_func); -out: + /* finish_write() closed the WIM for us */ + goto out; +out_close_wim: close_wim_writable(w); +out: DEBUG("wimlib_write(path=%"TS") = %d", path, ret); return ret; } @@ -2011,6 +2006,8 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, int ret; struct list_head stream_list; off_t old_wim_end; + u64 old_lookup_table_end, old_xml_begin, old_xml_end; + int open_flags; DEBUG("Overwriting `%"TS"' in-place", w->filename); @@ -2018,51 +2015,67 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, * data, and that there are no stream resources, metadata resources, or * lookup tables after the XML data. Otherwise, these data would be * overwritten. */ - if (w->hdr.integrity.offset != 0 && - w->hdr.integrity.offset < w->hdr.xml_res_entry.offset) { + old_xml_begin = w->hdr.xml_res_entry.offset; + old_xml_end = old_xml_begin + w->hdr.xml_res_entry.size; + old_lookup_table_end = w->hdr.lookup_table_res_entry.offset + + w->hdr.lookup_table_res_entry.size; + if (w->hdr.integrity.offset != 0 && w->hdr.integrity.offset < old_xml_end) { ERROR("Didn't expect the integrity table to be before the XML data"); return WIMLIB_ERR_RESOURCE_ORDER; } - if (w->hdr.lookup_table_res_entry.offset > w->hdr.xml_res_entry.offset) { + if (old_lookup_table_end > old_xml_begin) { ERROR("Didn't expect the lookup table to be after the XML data"); return WIMLIB_ERR_RESOURCE_ORDER; } - - if (w->hdr.integrity.offset) - old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size; - else - old_wim_end = w->hdr.xml_res_entry.offset + w->hdr.xml_res_entry.size; - + /* Set @old_wim_end, which indicates the point beyond which we don't + * allow any file and metadata resources to appear without returning + * WIMLIB_ERR_RESOURCE_ORDER (due to the fact that we would otherwise + * overwrite these resources). */ if (!w->deletion_occurred && !any_images_modified(w)) { /* If no images have been modified and no images have been - * deleted, a new lookup table does not need to be written. */ - old_wim_end = w->hdr.lookup_table_res_entry.offset + - w->hdr.lookup_table_res_entry.size; + * deleted, a new lookup table does not need to be written. We + * shall write the new XML data and optional integrity table + * immediately after the lookup table. Note that this may + * overwrite an existing integrity table. */ + DEBUG("Skipping writing lookup table " + "(no images modified or deleted)"); + old_wim_end = old_lookup_table_end; write_flags |= WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE | WIMLIB_WRITE_FLAG_CHECKPOINT_AFTER_XML; + } else if (w->hdr.integrity.offset) { + /* Old WIM has an integrity table; begin writing new streams + * after it. */ + old_wim_end = w->hdr.integrity.offset + w->hdr.integrity.size; + } else { + /* No existing integrity table; begin writing new streams after + * the old XML data. */ + old_wim_end = old_xml_end; } + ret = prepare_streams_for_overwrite(w, old_wim_end, &stream_list); if (ret) return ret; - ret = open_wim_writable(w, w->filename, false, - (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) != 0); + open_flags = 0; + if (write_flags & WIMLIB_WRITE_FLAG_CHECK_INTEGRITY) + open_flags |= O_RDWR; + else + open_flags |= O_WRONLY; + ret = open_wim_writable(w, w->filename, open_flags); if (ret) return ret; - ret = lock_wim(w, w->out_fp); + ret = lock_wim(w, w->out_fd); if (ret) { - fclose(w->out_fp); - w->out_fp = NULL; + close_wim_writable(w); return ret; } - if (fseeko(w->out_fp, old_wim_end, SEEK_SET) != 0) { + if (lseek(w->out_fd, old_wim_end, SEEK_SET) == -1) { ERROR_WITH_ERRNO("Can't seek to end of WIM"); - fclose(w->out_fp); - w->out_fp = NULL; + close_wim_writable(w); w->wim_locked = 0; return WIMLIB_ERR_WRITE; } @@ -2071,26 +2084,26 @@ overwrite_wim_inplace(WIMStruct *w, int write_flags, old_wim_end); ret = write_stream_list(&stream_list, w->lookup_table, - w->out_fp, + w->out_fd, wimlib_get_compression_type(w), write_flags, num_threads, progress_func); if (ret) - goto out_ftruncate; + goto out_truncate; for (int i = 0; i < w->hdr.image_count; i++) { if (w->image_metadata[i]->modified) { select_wim_image(w, i + 1); ret = write_metadata_resource(w); if (ret) - goto out_ftruncate; + goto out_truncate; } } write_flags |= WIMLIB_WRITE_FLAG_REUSE_INTEGRITY_TABLE; ret = finish_write(w, WIMLIB_ALL_IMAGES, write_flags, progress_func); -out_ftruncate: +out_truncate: close_wim_writable(w); if (ret != 0 && !(write_flags & WIMLIB_WRITE_FLAG_NO_LOOKUP_TABLE)) { WARNING("Truncating `%"TS"' to its original size (%"PRIu64" bytes)", @@ -2124,30 +2137,20 @@ overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags, ret = wimlib_write(w, tmpfile, WIMLIB_ALL_IMAGES, write_flags | WIMLIB_WRITE_FLAG_FSYNC, num_threads, progress_func); - if (ret != 0) { + if (ret) { ERROR("Failed to write the WIM file `%"TS"'", tmpfile); - goto err; + goto out_unlink; } - DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, w->filename); - -#ifdef __WIN32__ - /* Windows won't let you delete open files unless FILE_SHARE_DELETE was - * specified to CreateFile(). The WIM was opened with fopen(), which - * didn't provided this flag to CreateFile, so the handle must be closed - * before executing the rename(). */ - if (w->fp != NULL) { - fclose(w->fp); - w->fp = NULL; - } -#endif + close_wim(w); + DEBUG("Renaming `%"TS"' to `%"TS"'", tmpfile, w->filename); /* Rename the new file to the old file .*/ if (trename(tmpfile, w->filename) != 0) { ERROR_WITH_ERRNO("Failed to rename `%"TS"' to `%"TS"'", tmpfile, w->filename); ret = WIMLIB_ERR_RENAME; - goto err; + goto out_unlink; } if (progress_func) { @@ -2156,27 +2159,12 @@ overwrite_wim_via_tmpfile(WIMStruct *w, int write_flags, progress.rename.to = w->filename; progress_func(WIMLIB_PROGRESS_MSG_RENAME, &progress); } - - /* Close the original WIM file that was opened for reading. */ - if (w->fp != NULL) { - fclose(w->fp); - w->fp = NULL; - } - - /* Re-open the WIM read-only. */ - w->fp = tfopen(w->filename, T("rb")); - if (w->fp == NULL) { - ret = WIMLIB_ERR_REOPEN; - WARNING_WITH_ERRNO("Failed to re-open `%"TS"' read-only", - w->filename); - FREE(w->filename); - w->filename = NULL; - } - return ret; -err: + goto out; +out_unlink: /* Remove temporary file. */ if (tunlink(tmpfile) != 0) WARNING_WITH_ERRNO("Failed to remove `%"TS"'", tmpfile); +out: return ret; }