struct blob_descriptor *
new_blob_descriptor(void)
{
- struct blob_descriptor *blob;
-
- blob = CALLOC(1, sizeof(struct blob_descriptor));
- if (blob == NULL)
- return NULL;
-
- blob->refcnt = 1;
-
- /* blob->blob_location = BLOB_NONEXISTENT */
BUILD_BUG_ON(BLOB_NONEXISTENT != 0);
-
- return blob;
+ return CALLOC(1, sizeof(struct blob_descriptor));
}
struct blob_descriptor *
}
break;
#endif
- default:
- break;
}
return new;
void
blob_decrement_refcnt(struct blob_descriptor *blob, struct blob_table *table)
{
- if (unlikely(blob->refcnt == 0)) /* See comment above */
+ blob_subtract_refcnt(blob, table, 1);
+}
+
+void
+blob_subtract_refcnt(struct blob_descriptor *blob, struct blob_table *table,
+ u32 count)
+{
+ if (unlikely(blob->refcnt < count)) {
+ blob->refcnt = 0; /* See comment above */
return;
+ }
+
+ blob->refcnt -= count;
- if (--blob->refcnt != 0)
+ if (blob->refcnt != 0)
return;
if (blob->unhashed) {
size_t old_capacity, new_capacity;
struct hlist_head *old_array, *new_array;
struct blob_descriptor *blob;
- struct hlist_node *cur, *tmp;
+ struct hlist_node *tmp;
size_t i;
old_capacity = table->capacity;
table->capacity = new_capacity;
for (i = 0; i < old_capacity; i++) {
- hlist_for_each_entry_safe(blob, cur, tmp, &old_array[i], hash_list) {
+ hlist_for_each_entry_safe(blob, tmp, &old_array[i], hash_list) {
hlist_del(&blob->hash_list);
blob_table_insert_raw(table, blob);
}
{
size_t i;
struct blob_descriptor *blob;
- struct hlist_node *pos;
i = load_size_t_unaligned(hash) % table->capacity;
- hlist_for_each_entry(blob, pos, &table->array[i], hash_list)
+ hlist_for_each_entry(blob, &table->array[i], hash_list)
if (hashes_equal(hash, blob->hash))
return blob;
return NULL;
int (*visitor)(struct blob_descriptor *, void *), void *arg)
{
struct blob_descriptor *blob;
- struct hlist_node *pos, *tmp;
+ struct hlist_node *tmp;
int ret;
for (size_t i = 0; i < table->capacity; i++) {
- hlist_for_each_entry_safe(blob, pos, tmp, &table->array[i],
+ hlist_for_each_entry_safe(blob, tmp, &table->array[i],
hash_list)
{
ret = visitor(blob, arg);
/* XXX: This linear search will be slow in the degenerate case where the
* number of solid resources in the run is huge. */
blob->size = reshdr->size_in_wim;
- blob->flags = reshdr->flags;
for (size_t i = 0; i < num_rdescs; i++) {
if (offset + blob->size <= rdescs[i]->uncompressed_size) {
- blob->offset_in_res = offset;
- blob_set_is_located_in_wim_resource(blob, rdescs[i]);
+ blob_set_is_located_in_wim_resource(blob, rdescs[i], offset);
return 0;
}
offset -= rdescs[i]->uncompressed_size;
wim_res_hdr_to_desc(&reshdr, wim, rdesc);
- cur_blob->offset_in_res = 0;
- cur_blob->size = reshdr.uncompressed_size;
- cur_blob->flags = reshdr.flags;
-
- blob_set_is_located_in_wim_resource(cur_blob, rdesc);
+ blob_set_is_located_in_nonsolid_wim_resource(cur_blob, rdesc);
}
/* cur_blob is now a blob bound to a resource. */
if (reshdr.flags & WIM_RESHDR_FLAG_METADATA) {
+ cur_blob->is_metadata = 1;
+
/* Blob table entry for a metadata resource. */
/* Metadata entries with no references must be ignored.
}
/* Note: the list of blob descriptors must be sorted so that all entries for the
- * same solid resource are consecutive. In addition, blob descriptors with
- * WIM_RESHDR_FLAG_METADATA set must be in the same order as the indices of the
- * underlying images. */
+ * same solid resource are consecutive. In addition, blob descriptors for
+ * metadata resources must be in the same order as the indices of the underlying
+ * images. */
int
write_blob_table_from_blob_list(struct list_head *blob_list,
struct filedes *out_fd,
* compressed blob table, MS software cannot. */
ret = write_wim_resource_from_buffer(table_buf,
table_size,
- WIM_RESHDR_FLAG_METADATA,
+ true,
out_fd,
WIMLIB_COMPRESSION_TYPE_NONE,
0,
struct blob_table *blob_table)
{
u8 hash[SHA1_HASH_SIZE];
- struct blob_descriptor *blob, *existing_blob;
+ struct blob_descriptor *blob;
+ void *buffer_copy;
sha1_buffer(buffer, size, hash);
- existing_blob = lookup_blob(blob_table, hash);
- if (existing_blob) {
- wimlib_assert(existing_blob->size == size);
- blob = existing_blob;
- blob->refcnt++;
+
+ blob = lookup_blob(blob_table, hash);
+ if (blob)
+ return blob;
+
+ blob = new_blob_descriptor();
+ if (!blob)
+ return NULL;
+
+ buffer_copy = memdup(buffer, size);
+ if (!buffer_copy) {
+ free_blob_descriptor(blob);
+ return NULL;
+ }
+ blob_set_is_located_in_attached_buffer(blob, buffer_copy, size);
+ copy_hash(blob->hash, hash);
+ blob_table_insert(blob_table, blob);
+ return blob;
+}
+
+struct blob_descriptor *
+after_blob_hashed(struct blob_descriptor *blob,
+ struct blob_descriptor **back_ptr,
+ struct blob_table *blob_table)
+{
+ struct blob_descriptor *duplicate_blob;
+
+ list_del(&blob->unhashed_list);
+ blob->unhashed = 0;
+
+ /* Look for a duplicate blob */
+ duplicate_blob = lookup_blob(blob_table, blob->hash);
+ if (duplicate_blob) {
+ /* We have a duplicate blob. Transfer the reference counts from
+ * this blob to the duplicate and update the reference to this
+ * blob (from a stream) to point to the duplicate. The caller
+ * is responsible for freeing @blob if needed. */
+ wimlib_assert(duplicate_blob->size == blob->size);
+ duplicate_blob->refcnt += blob->refcnt;
+ blob->refcnt = 0;
+ *back_ptr = duplicate_blob;
+ return duplicate_blob;
} else {
- void *buffer_copy;
- blob = new_blob_descriptor();
- if (blob == NULL)
- return NULL;
- buffer_copy = memdup(buffer, size);
- if (buffer_copy == NULL) {
- free_blob_descriptor(blob);
- return NULL;
- }
- blob->blob_location = BLOB_IN_ATTACHED_BUFFER;
- blob->attached_buffer = buffer_copy;
- blob->size = size;
- copy_hash(blob->hash, hash);
+ /* No duplicate blob, so we need to insert this blob into the
+ * blob table and treat it as a hashed blob. */
blob_table_insert(blob_table, blob);
+ return blob;
}
- return blob;
}
/*
hash_unhashed_blob(struct blob_descriptor *blob, struct blob_table *blob_table,
struct blob_descriptor **blob_ret)
{
- int ret;
- struct blob_descriptor *duplicate_blob;
struct blob_descriptor **back_ptr;
+ int ret;
- wimlib_assert(blob->unhashed);
-
- /* back_ptr must be saved because @back_inode and @back_stream_id are in
- * union with the SHA-1 message digest and will no longer be valid once
- * the SHA-1 has been calculated. */
back_ptr = retrieve_pointer_to_unhashed_blob(blob);
ret = sha1_blob(blob);
if (ret)
return ret;
- list_del(&blob->unhashed_list);
- blob->unhashed = 0;
-
- /* Look for a duplicate blob */
- duplicate_blob = lookup_blob(blob_table, blob->hash);
- if (duplicate_blob) {
- /* We have a duplicate blob. Transfer the reference counts from
- * this blob to the duplicate and update the reference to this
- * blob (from an stream) to point to the duplicate. The caller
- * is responsible for freeing @blob if needed. */
- wimlib_assert(duplicate_blob->size == blob->size);
- duplicate_blob->refcnt += blob->refcnt;
- blob->refcnt = 0;
- *back_ptr = duplicate_blob;
- blob = duplicate_blob;
- } else {
- /* No duplicate blob, so we need to insert this blob into the
- * blob table and treat it as a hashed blob. */
- blob_table_insert(blob_table, blob);
- }
- *blob_ret = blob;
+ *blob_ret = after_blob_hashed(blob, back_ptr, blob_table);
return 0;
}
wentry->uncompressed_size = blob->size;
if (blob->blob_location == BLOB_IN_WIM) {
+ unsigned res_flags = blob->rdesc->flags;
+
wentry->part_number = blob->rdesc->wim->hdr.part_number;
- if (blob->flags & WIM_RESHDR_FLAG_SOLID) {
+ if (res_flags & WIM_RESHDR_FLAG_SOLID) {
wentry->offset = blob->offset_in_res;
} else {
wentry->compressed_size = blob->rdesc->size_in_wim;
wentry->raw_resource_offset_in_wim = blob->rdesc->offset_in_wim;
wentry->raw_resource_compressed_size = blob->rdesc->size_in_wim;
wentry->raw_resource_uncompressed_size = blob->rdesc->uncompressed_size;
+
+ wentry->is_compressed = (res_flags & WIM_RESHDR_FLAG_COMPRESSED) != 0;
+ wentry->is_free = (res_flags & WIM_RESHDR_FLAG_FREE) != 0;
+ wentry->is_spanned = (res_flags & WIM_RESHDR_FLAG_SPANNED) != 0;
+ wentry->packed = (res_flags & WIM_RESHDR_FLAG_SOLID) != 0;
}
- copy_hash(wentry->sha1_hash, blob->hash);
+ if (!blob->unhashed)
+ copy_hash(wentry->sha1_hash, blob->hash);
wentry->reference_count = blob->refcnt;
- wentry->is_compressed = (blob->flags & WIM_RESHDR_FLAG_COMPRESSED) != 0;
- wentry->is_metadata = (blob->flags & WIM_RESHDR_FLAG_METADATA) != 0;
- wentry->is_free = (blob->flags & WIM_RESHDR_FLAG_FREE) != 0;
- wentry->is_spanned = (blob->flags & WIM_RESHDR_FLAG_SPANNED) != 0;
- wentry->packed = (blob->flags & WIM_RESHDR_FLAG_SOLID) != 0;
+ wentry->is_metadata = blob->is_metadata;
}
struct iterate_blob_context {
if (wim_has_metadata(wim)) {
int ret;
for (int i = 0; i < wim->hdr.image_count; i++) {
- ret = do_iterate_blob(wim->image_metadata[i]->metadata_blob,
- &ctx);
+ struct blob_descriptor *blob;
+ struct wim_image_metadata *imd = wim->image_metadata[i];
+
+ ret = do_iterate_blob(imd->metadata_blob, &ctx);
if (ret)
return ret;
+ image_for_each_unhashed_blob(blob, imd) {
+ ret = do_iterate_blob(blob, &ctx);
+ if (ret)
+ return ret;
+ }
}
}
return for_blob_in_table(wim->blob_table, do_iterate_blob, &ctx);