+/* Read the full data of the specified blob, passing the data into the specified
+ * callbacks (all of which are optional) and either checking or computing the
+ * SHA-1 message digest of the blob. */
+int
+read_blob_with_sha1(struct blob_descriptor *blob,
+ const struct read_blob_callbacks *cbs)
+{
+ struct hasher_context hasher_ctx = {
+ .flags = VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES,
+ .cbs = *cbs,
+ };
+ struct read_blob_callbacks hasher_cbs = {
+ .begin_blob = hasher_begin_blob,
+ .continue_blob = hasher_continue_blob,
+ .end_blob = hasher_end_blob,
+ .ctx = &hasher_ctx,
+ };
+ return read_blob_with_cbs(blob, &hasher_cbs);
+}
+
+static int
+read_blobs_in_solid_resource(struct blob_descriptor *first_blob,
+ struct blob_descriptor *last_blob,
+ size_t blob_count,
+ size_t list_head_offset,
+ const struct read_blob_callbacks *sink_cbs)
+{
+ struct data_range *ranges;
+ bool ranges_malloced;
+ struct blob_descriptor *cur_blob;
+ size_t i;
+ int ret;
+ u64 ranges_alloc_size;
+
+ /* Setup data ranges array (one range per blob to read); this way
+ * read_compressed_wim_resource() does not need to be aware of blobs.
+ */
+
+ ranges_alloc_size = (u64)blob_count * sizeof(ranges[0]);
+
+ if (unlikely((size_t)ranges_alloc_size != ranges_alloc_size))
+ goto oom;
+
+ if (ranges_alloc_size <= STACK_MAX) {
+ ranges = alloca(ranges_alloc_size);
+ ranges_malloced = false;
+ } else {
+ ranges = MALLOC(ranges_alloc_size);
+ if (unlikely(!ranges))
+ goto oom;
+ ranges_malloced = true;
+ }
+
+ for (i = 0, cur_blob = first_blob;
+ i < blob_count;
+ i++, cur_blob = next_blob(cur_blob, list_head_offset))
+ {
+ ranges[i].offset = cur_blob->offset_in_res;
+ ranges[i].size = cur_blob->size;
+ }
+
+ struct blobifier_context blobifier_ctx = {
+ .cbs = *sink_cbs,
+ .cur_blob = first_blob,
+ .next_blob = next_blob(first_blob, list_head_offset),
+ .cur_blob_offset = 0,
+ .final_blob = last_blob,
+ .list_head_offset = list_head_offset,
+ };
+ struct consume_chunk_callback cb = {
+ .func = blobifier_cb,
+ .ctx = &blobifier_ctx,
+ };
+
+ ret = read_compressed_wim_resource(first_blob->rdesc, ranges,
+ blob_count, &cb);
+
+ if (ranges_malloced)
+ FREE(ranges);
+
+ if (unlikely(ret && blobifier_ctx.cur_blob_offset != 0)) {
+ ret = call_end_blob(blobifier_ctx.cur_blob, ret,
+ &blobifier_ctx.cbs);
+ }
+ return ret;
+
+oom:
+ ERROR("Too many blobs in one resource!");
+ return WIMLIB_ERR_NOMEM;
+}
+
+/*
+ * Read a list of blobs, each of which may be in any supported location (e.g.
+ * in a WIM or in an external file). This function optimizes the case where
+ * multiple blobs are combined into a single solid compressed WIM resource by
+ * reading the blobs in sequential order, only decompressing the solid resource
+ * one time.
+ *
+ * @blob_list
+ * List of blobs to read.
+ * @list_head_offset
+ * Offset of the `struct list_head' within each `struct blob_descriptor'
+ * that makes up the @blob_list.
+ * @cbs
+ * Callback functions to accept the blob data.
+ * @flags
+ * Bitwise OR of zero or more of the following flags:
+ *
+ * VERIFY_BLOB_HASHES:
+ * For all blobs being read that have already had SHA-1 message
+ * digests computed, calculate the SHA-1 message digest of the read
+ * data and compare it with the previously computed value. If they
+ * do not match, return WIMLIB_ERR_INVALID_RESOURCE_HASH.
+ *
+ * COMPUTE_MISSING_BLOB_HASHES
+ * For all blobs being read that have not yet had their SHA-1
+ * message digests computed, calculate and save their SHA-1 message
+ * digests.
+ *
+ * BLOB_LIST_ALREADY_SORTED
+ * @blob_list is already sorted in sequential order for reading.