]> wimlib.net Git - wimlib/blob - src/resource.c
Consistently use the name "solid resource"
[wimlib] / src / resource.c
1 /*
2  * resource.c
3  *
4  * Code for reading streams and resources, including compressed WIM resources.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23
24 #ifdef HAVE_CONFIG_H
25 #  include "config.h"
26 #endif
27
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31
32 #include "wimlib/alloca.h"
33 #include "wimlib/assert.h"
34 #include "wimlib/bitops.h"
35 #include "wimlib/endianness.h"
36 #include "wimlib/error.h"
37 #include "wimlib/file_io.h"
38 #include "wimlib/lookup_table.h"
39 #include "wimlib/resource.h"
40 #include "wimlib/sha1.h"
41 #include "wimlib/wim.h"
42
43 #ifdef __WIN32__
44 /* for read_winnt_file_prefix(), read_win32_encrypted_file_prefix() */
45 #  include "wimlib/win32.h"
46 #endif
47
48 #ifdef WITH_NTFS_3G
49 /* for read_ntfs_file_prefix() */
50 #  include "wimlib/ntfs_3g.h"
51 #endif
52
53
54 /*
55  *                         Compressed WIM resources
56  *
57  * A compressed resource in a WIM consists of a number of compressed chunks,
58  * each of which decompresses to a fixed chunk size (given in the WIM header;
59  * usually 32768) except possibly the last, which always decompresses to any
60  * remaining bytes.  In addition, immediately before the chunks, a table (the
61  * "chunk table") provides the offset, in bytes relative to the end of the chunk
62  * table, of the start of each compressed chunk, except for the first chunk
63  * which is omitted as it always has an offset of 0.  Therefore, a compressed
64  * resource with N chunks will have a chunk table with N - 1 entries.
65  *
66  * Additional information:
67  *
68  * - Entries in the chunk table are 4 bytes each, except if the uncompressed
69  *   size of the resource is greater than 4 GiB, in which case the entries in
70  *   the chunk table are 8 bytes each.  In either case, the entries are unsigned
71  *   little-endian integers.
72  *
73  * - The chunk table is included in the compressed size of the resource provided
74  *   in the corresponding entry in the WIM's stream lookup table.
75  *
76  * - The compressed size of a chunk is never greater than the uncompressed size.
77  *   From the compressor's point of view, chunks that would have compressed to a
78  *   size greater than or equal to their original size are in fact stored
79  *   uncompressed.  From the decompresser's point of view, chunks with
80  *   compressed size equal to their uncompressed size are in fact uncompressed.
81  *
82  * Furthermore, wimlib supports its own "pipable" WIM format, and for this the
83  * structure of compressed resources was modified to allow piped reading and
84  * writing.  To make sequential writing possible, the chunk table is placed
85  * after the chunks rather than before the chunks, and to make sequential
86  * reading possible, each chunk is prefixed with a 4-byte header giving its
87  * compressed size as a 32-bit, unsigned, little-endian integer.  Otherwise the
88  * details are the same.
89  */
90
91
92 struct data_range {
93         u64 offset;
94         u64 size;
95 };
96
97 /*
98  * read_compressed_wim_resource() -
99  *
100  * Read data from a compressed WIM resource.
101  *
102  * @rspec
103  *      Specification of the compressed WIM resource to read from.
104  * @ranges
105  *      Nonoverlapping, nonempty ranges of the uncompressed resource data to
106  *      read, sorted by increasing offset.
107  * @num_ranges
108  *      Number of ranges in @ranges; must be at least 1.
109  * @cb
110  *      Callback function to feed the data being read.  Each call provides the
111  *      next chunk of the requested data, uncompressed.  Each chunk will be of
112  *      nonzero size and will not cross range boundaries, but otherwise will be
113  *      of unspecified size.
114  * @cb_ctx
115  *      Parameter to pass to @cb_ctx.
116  *
117  * Possible return values:
118  *
119  *      WIMLIB_ERR_SUCCESS (0)
120  *      WIMLIB_ERR_READ                   (errno set)
121  *      WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
122  *      WIMLIB_ERR_NOMEM                  (errno set to ENOMEM)
123  *      WIMLIB_ERR_DECOMPRESSION          (errno set to EINVAL)
124  *
125  *      or other error code returned by the @cb function.
126  */
127 static int
128 read_compressed_wim_resource(const struct wim_resource_spec * const rspec,
129                              const struct data_range * const ranges,
130                              const size_t num_ranges,
131                              const consume_data_callback_t cb,
132                              void * const cb_ctx)
133 {
134         int ret;
135         int errno_save;
136
137         u64 *chunk_offsets = NULL;
138         u8 *ubuf = NULL;
139         void *cbuf = NULL;
140         bool chunk_offsets_malloced = false;
141         bool ubuf_malloced = false;
142         bool cbuf_malloced = false;
143         struct wimlib_decompressor *decompressor = NULL;
144
145         /* Sanity checks  */
146         wimlib_assert(rspec != NULL);
147         wimlib_assert(resource_is_compressed(rspec));
148         wimlib_assert(cb != NULL);
149         wimlib_assert(num_ranges != 0);
150         for (size_t i = 0; i < num_ranges; i++) {
151                 DEBUG("Range %zu/%zu: %"PRIu64"@+%"PRIu64" / %"PRIu64,
152                       i + 1, num_ranges, ranges[i].size, ranges[i].offset,
153                       rspec->uncompressed_size);
154                 wimlib_assert(ranges[i].size != 0);
155                 wimlib_assert(ranges[i].offset + ranges[i].size >= ranges[i].size);
156                 wimlib_assert(ranges[i].offset + ranges[i].size <= rspec->uncompressed_size);
157         }
158         for (size_t i = 0; i < num_ranges - 1; i++)
159                 wimlib_assert(ranges[i].offset + ranges[i].size <= ranges[i + 1].offset);
160
161         /* Get the offsets of the first and last bytes of the read.  */
162         const u64 first_offset = ranges[0].offset;
163         const u64 last_offset = ranges[num_ranges - 1].offset + ranges[num_ranges - 1].size - 1;
164
165         /* Get the file descriptor for the WIM.  */
166         struct filedes * const in_fd = &rspec->wim->in_fd;
167
168         /* Determine if we're reading a pipable resource from a pipe or not.  */
169         const bool is_pipe_read = (rspec->is_pipable && !filedes_is_seekable(in_fd));
170
171         /* Determine if the chunk table is in an alternate format.  */
172         const bool alt_chunk_table = (rspec->flags & WIM_RESHDR_FLAG_SOLID)
173                                         && !is_pipe_read;
174
175         /* Get the maximum size of uncompressed chunks in this resource, which
176          * we require be a power of 2.  */
177         u64 cur_read_offset = rspec->offset_in_wim;
178         int ctype = rspec->compression_type;
179         u32 chunk_size = rspec->chunk_size;
180         if (alt_chunk_table) {
181                 /* Alternate chunk table format.  Its header specifies the chunk
182                  * size and compression format.  Note: it could be read here;
183                  * however, the relevant data was already loaded into @rspec by
184                  * read_wim_lookup_table().  */
185                 cur_read_offset += sizeof(struct alt_chunk_table_header_disk);
186         }
187
188         if (!is_power_of_2(chunk_size)) {
189                 ERROR("Invalid compressed resource: "
190                       "expected power-of-2 chunk size (got %"PRIu32")",
191                       chunk_size);
192                 ret = WIMLIB_ERR_INVALID_CHUNK_SIZE;
193                 errno = EINVAL;
194                 goto out_free_memory;
195         }
196
197         /* Get valid decompressor.  */
198         if (ctype == rspec->wim->decompressor_ctype &&
199             chunk_size == rspec->wim->decompressor_max_block_size)
200         {
201                 /* Cached decompressor.  */
202                 decompressor = rspec->wim->decompressor;
203                 rspec->wim->decompressor_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
204                 rspec->wim->decompressor = NULL;
205         } else {
206                 ret = wimlib_create_decompressor(ctype, chunk_size,
207                                                  &decompressor);
208                 if (ret) {
209                         if (ret != WIMLIB_ERR_NOMEM)
210                                 errno = EINVAL;
211                         goto out_free_memory;
212                 }
213         }
214
215         const u32 chunk_order = fls32(chunk_size);
216
217         /* Calculate the total number of chunks the resource is divided into.  */
218         const u64 num_chunks = (rspec->uncompressed_size + chunk_size - 1) >> chunk_order;
219
220         /* Calculate the 0-based indices of the first and last chunks containing
221          * data that needs to be passed to the callback.  */
222         const u64 first_needed_chunk = first_offset >> chunk_order;
223         const u64 last_needed_chunk = last_offset >> chunk_order;
224
225         /* Calculate the 0-based index of the first chunk that actually needs to
226          * be read.  This is normally first_needed_chunk, but for pipe reads we
227          * must always start from the 0th chunk.  */
228         const u64 read_start_chunk = (is_pipe_read ? 0 : first_needed_chunk);
229
230         /* Calculate the number of chunk offsets that are needed for the chunks
231          * being read.  */
232         const u64 num_needed_chunk_offsets =
233                 last_needed_chunk - read_start_chunk + 1 +
234                 (last_needed_chunk < num_chunks - 1);
235
236         /* Calculate the number of entries in the chunk table.  Normally, it's
237          * one less than the number of chunks, since the first chunk has no
238          * entry.  But in the alternate chunk table format, the chunk entries
239          * contain chunk sizes, not offsets, and there is one per chunk.  */
240         const u64 num_chunk_entries = (alt_chunk_table ? num_chunks : num_chunks - 1);
241
242         /* Set the size of each chunk table entry based on the resource's
243          * uncompressed size.  */
244         const u64 chunk_entry_size = get_chunk_entry_size(rspec->uncompressed_size,
245                                                           alt_chunk_table);
246
247         /* Calculate the size of the chunk table in bytes.  */
248         const u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
249
250         /* Calculate the size of the chunk table in bytes, including the header
251          * in the case of the alternate chunk table format.  */
252         const u64 chunk_table_full_size =
253                 (alt_chunk_table) ? chunk_table_size + sizeof(struct alt_chunk_table_header_disk)
254                                   : chunk_table_size;
255
256         if (!is_pipe_read) {
257                 /* Read the needed chunk table entries into memory and use them
258                  * to initialize the chunk_offsets array.  */
259
260                 u64 first_chunk_entry_to_read;
261                 u64 last_chunk_entry_to_read;
262
263                 if (alt_chunk_table) {
264                         /* The alternate chunk table contains chunk sizes, not
265                          * offsets, so we always must read all preceding entries
266                          * in order to determine offsets.  */
267                         first_chunk_entry_to_read = 0;
268                         last_chunk_entry_to_read = last_needed_chunk;
269                 } else {
270                         /* Here we must account for the fact that the first
271                          * chunk has no explicit chunk table entry.  */
272
273                         if (read_start_chunk == 0)
274                                 first_chunk_entry_to_read = 0;
275                         else
276                                 first_chunk_entry_to_read = read_start_chunk - 1;
277
278                         if (last_needed_chunk == 0)
279                                 last_chunk_entry_to_read = 0;
280                         else
281                                 last_chunk_entry_to_read = last_needed_chunk - 1;
282
283                         if (last_needed_chunk < num_chunks - 1)
284                                 last_chunk_entry_to_read++;
285                 }
286
287                 const u64 num_chunk_entries_to_read =
288                         last_chunk_entry_to_read - first_chunk_entry_to_read + 1;
289
290                 const u64 chunk_offsets_alloc_size =
291                         max(num_chunk_entries_to_read,
292                             num_needed_chunk_offsets) * sizeof(chunk_offsets[0]);
293
294                 if ((size_t)chunk_offsets_alloc_size != chunk_offsets_alloc_size)
295                         goto oom;
296
297                 if (chunk_offsets_alloc_size <= STACK_MAX) {
298                         chunk_offsets = alloca(chunk_offsets_alloc_size);
299                 } else {
300                         chunk_offsets = MALLOC(chunk_offsets_alloc_size);
301                         if (chunk_offsets == NULL)
302                                 goto oom;
303                         chunk_offsets_malloced = true;
304                 }
305
306                 const size_t chunk_table_size_to_read =
307                         num_chunk_entries_to_read * chunk_entry_size;
308
309                 const u64 file_offset_of_needed_chunk_entries =
310                         cur_read_offset
311                         + (first_chunk_entry_to_read * chunk_entry_size)
312                         + (rspec->is_pipable ? (rspec->size_in_wim - chunk_table_size) : 0);
313
314                 void * const chunk_table_data =
315                         (u8*)chunk_offsets +
316                         chunk_offsets_alloc_size -
317                         chunk_table_size_to_read;
318
319                 ret = full_pread(in_fd, chunk_table_data, chunk_table_size_to_read,
320                                  file_offset_of_needed_chunk_entries);
321                 if (ret)
322                         goto read_error;
323
324                 /* Now fill in chunk_offsets from the entries we have read in
325                  * chunk_tab_data.  We break aliasing rules here to avoid having
326                  * to allocate yet another array.  */
327                 typedef le64 _may_alias_attribute aliased_le64_t;
328                 typedef le32 _may_alias_attribute aliased_le32_t;
329                 u64 * chunk_offsets_p = chunk_offsets;
330
331                 if (alt_chunk_table) {
332                         u64 cur_offset = 0;
333                         aliased_le32_t *raw_entries = chunk_table_data;
334
335                         for (size_t i = 0; i < num_chunk_entries_to_read; i++) {
336                                 u32 entry = le32_to_cpu(raw_entries[i]);
337                                 if (i >= read_start_chunk)
338                                         *chunk_offsets_p++ = cur_offset;
339                                 cur_offset += entry;
340                         }
341                         if (last_needed_chunk < num_chunks - 1)
342                                 *chunk_offsets_p = cur_offset;
343                 } else {
344                         if (read_start_chunk == 0)
345                                 *chunk_offsets_p++ = 0;
346
347                         if (chunk_entry_size == 4) {
348                                 aliased_le32_t *raw_entries = chunk_table_data;
349                                 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
350                                         *chunk_offsets_p++ = le32_to_cpu(raw_entries[i]);
351                         } else {
352                                 aliased_le64_t *raw_entries = chunk_table_data;
353                                 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
354                                         *chunk_offsets_p++ = le64_to_cpu(raw_entries[i]);
355                         }
356                 }
357
358                 /* Set offset to beginning of first chunk to read.  */
359                 cur_read_offset += chunk_offsets[0];
360                 if (rspec->is_pipable)
361                         cur_read_offset += read_start_chunk * sizeof(struct pwm_chunk_hdr);
362                 else
363                         cur_read_offset += chunk_table_size;
364         }
365
366         /* Allocate buffer for holding the uncompressed data of each chunk.  */
367         if (chunk_size <= STACK_MAX) {
368                 ubuf = alloca(chunk_size);
369         } else {
370                 ubuf = MALLOC(chunk_size);
371                 if (ubuf == NULL)
372                         goto oom;
373                 ubuf_malloced = true;
374         }
375
376         /* Allocate a temporary buffer for reading compressed chunks, each of
377          * which can be at most @chunk_size - 1 bytes.  This excludes compressed
378          * chunks that are a full @chunk_size bytes, which are actually stored
379          * uncompressed.  */
380         if (chunk_size - 1 <= STACK_MAX) {
381                 cbuf = alloca(chunk_size - 1);
382         } else {
383                 cbuf = MALLOC(chunk_size - 1);
384                 if (cbuf == NULL)
385                         goto oom;
386                 cbuf_malloced = true;
387         }
388
389         /* Set current data range.  */
390         const struct data_range *cur_range = ranges;
391         const struct data_range * const end_range = &ranges[num_ranges];
392         u64 cur_range_pos = cur_range->offset;
393         u64 cur_range_end = cur_range->offset + cur_range->size;
394
395         /* Read and process each needed chunk.  */
396         for (u64 i = read_start_chunk; i <= last_needed_chunk; i++) {
397
398                 /* Calculate uncompressed size of next chunk.  */
399                 u32 chunk_usize;
400                 if ((i == num_chunks - 1) && (rspec->uncompressed_size & (chunk_size - 1)))
401                         chunk_usize = (rspec->uncompressed_size & (chunk_size - 1));
402                 else
403                         chunk_usize = chunk_size;
404
405                 /* Calculate compressed size of next chunk.  */
406                 u32 chunk_csize;
407                 if (is_pipe_read) {
408                         struct pwm_chunk_hdr chunk_hdr;
409
410                         ret = full_pread(in_fd, &chunk_hdr,
411                                          sizeof(chunk_hdr), cur_read_offset);
412                         if (ret)
413                                 goto read_error;
414                         chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
415                 } else {
416                         if (i == num_chunks - 1) {
417                                 chunk_csize = rspec->size_in_wim -
418                                               chunk_table_full_size -
419                                               chunk_offsets[i - read_start_chunk];
420                                 if (rspec->is_pipable)
421                                         chunk_csize -= num_chunks * sizeof(struct pwm_chunk_hdr);
422                         } else {
423                                 chunk_csize = chunk_offsets[i + 1 - read_start_chunk] -
424                                               chunk_offsets[i - read_start_chunk];
425                         }
426                 }
427                 if (chunk_csize == 0 || chunk_csize > chunk_usize) {
428                         ERROR("Invalid chunk size in compressed resource!");
429                         errno = EINVAL;
430                         ret = WIMLIB_ERR_DECOMPRESSION;
431                         goto out_free_memory;
432                 }
433                 if (rspec->is_pipable)
434                         cur_read_offset += sizeof(struct pwm_chunk_hdr);
435
436                 /* Offsets in the uncompressed resource at which this chunk
437                  * starts and ends.  */
438                 const u64 chunk_start_offset = i << chunk_order;
439                 const u64 chunk_end_offset = chunk_start_offset + chunk_usize;
440
441                 if (chunk_end_offset <= cur_range_pos) {
442
443                         /* The next range does not require data in this chunk,
444                          * so skip it.  */
445                         cur_read_offset += chunk_csize;
446                         if (is_pipe_read) {
447                                 u8 dummy;
448
449                                 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
450                                 if (ret)
451                                         goto read_error;
452                         }
453                 } else {
454
455                         /* Read the chunk and feed data to the callback
456                          * function.  */
457                         u8 *read_buf;
458
459                         if (chunk_csize == chunk_usize)
460                                 read_buf = ubuf;
461                         else
462                                 read_buf = cbuf;
463
464                         ret = full_pread(in_fd,
465                                          read_buf,
466                                          chunk_csize,
467                                          cur_read_offset);
468                         if (ret)
469                                 goto read_error;
470
471                         if (read_buf == cbuf) {
472                                 DEBUG("Decompressing chunk %"PRIu64" "
473                                       "(csize=%"PRIu32" usize=%"PRIu32")",
474                                       i, chunk_csize, chunk_usize);
475                                 ret = wimlib_decompress(cbuf,
476                                                         chunk_csize,
477                                                         ubuf,
478                                                         chunk_usize,
479                                                         decompressor);
480                                 if (ret) {
481                                         ERROR("Failed to decompress data!");
482                                         ret = WIMLIB_ERR_DECOMPRESSION;
483                                         errno = EINVAL;
484                                         goto out_free_memory;
485                                 }
486                         }
487                         cur_read_offset += chunk_csize;
488
489                         /* At least one range requires data in this chunk.  */
490                         do {
491                                 size_t start, end, size;
492
493                                 /* Calculate how many bytes of data should be
494                                  * sent to the callback function, taking into
495                                  * account that data sent to the callback
496                                  * function must not overlap range boundaries.
497                                  */
498                                 start = cur_range_pos - chunk_start_offset;
499                                 end = min(cur_range_end, chunk_end_offset) - chunk_start_offset;
500                                 size = end - start;
501
502                                 ret = (*cb)(&ubuf[start], size, cb_ctx);
503
504                                 if (ret)
505                                         goto out_free_memory;
506
507                                 cur_range_pos += size;
508                                 if (cur_range_pos == cur_range_end) {
509                                         /* Advance to next range.  */
510                                         if (++cur_range == end_range) {
511                                                 cur_range_pos = ~0ULL;
512                                         } else {
513                                                 cur_range_pos = cur_range->offset;
514                                                 cur_range_end = cur_range->offset + cur_range->size;
515                                         }
516                                 }
517                         } while (cur_range_pos < chunk_end_offset);
518                 }
519         }
520
521         if (is_pipe_read &&
522             last_offset == rspec->uncompressed_size - 1 &&
523             chunk_table_size)
524         {
525                 u8 dummy;
526                 /* If reading a pipable resource from a pipe and the full data
527                  * was requested, skip the chunk table at the end so that the
528                  * file descriptor is fully clear of the resource after this
529                  * returns.  */
530                 cur_read_offset += chunk_table_size;
531                 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
532                 if (ret)
533                         goto read_error;
534         }
535         ret = 0;
536
537 out_free_memory:
538         errno_save = errno;
539         if (decompressor) {
540                 wimlib_free_decompressor(rspec->wim->decompressor);
541                 rspec->wim->decompressor = decompressor;
542                 rspec->wim->decompressor_ctype = ctype;
543                 rspec->wim->decompressor_max_block_size = chunk_size;
544         }
545         if (chunk_offsets_malloced)
546                 FREE(chunk_offsets);
547         if (ubuf_malloced)
548                 FREE(ubuf);
549         if (cbuf_malloced)
550                 FREE(cbuf);
551         errno = errno_save;
552         return ret;
553
554 oom:
555         ERROR("Not enough memory available to read size=%"PRIu64" bytes "
556               "from compressed WIM resource!", last_offset - first_offset + 1);
557         errno = ENOMEM;
558         ret = WIMLIB_ERR_NOMEM;
559         goto out_free_memory;
560
561 read_error:
562         ERROR_WITH_ERRNO("Error reading compressed WIM resource!");
563         goto out_free_memory;
564 }
565
566 static int
567 fill_zeroes(u64 size, consume_data_callback_t cb, void *cb_ctx)
568 {
569         if (unlikely(size)) {
570                 u8 buf[min(size, BUFFER_SIZE)];
571
572                 memset(buf, 0, sizeof(buf));
573
574                 do {
575                         size_t len;
576                         int ret;
577
578                         len = min(size, BUFFER_SIZE);
579                         ret = cb(buf, len, cb_ctx);
580                         if (ret)
581                                 return ret;
582                         size -= len;
583                 } while (size);
584         }
585         return 0;
586 }
587
588 /* Read raw data from a file descriptor at the specified offset, feeding the
589  * data it in chunks into the specified callback function.  */
590 static int
591 read_raw_file_data(struct filedes *in_fd, u64 offset, u64 size,
592                    consume_data_callback_t cb, void *cb_ctx)
593 {
594         u8 buf[BUFFER_SIZE];
595         size_t bytes_to_read;
596         int ret;
597
598         while (size) {
599                 bytes_to_read = min(sizeof(buf), size);
600                 ret = full_pread(in_fd, buf, bytes_to_read, offset);
601                 if (ret) {
602                         ERROR_WITH_ERRNO("Read error");
603                         return ret;
604                 }
605                 ret = cb(buf, bytes_to_read, cb_ctx);
606                 if (ret)
607                         return ret;
608                 size -= bytes_to_read;
609                 offset += bytes_to_read;
610         }
611         return 0;
612 }
613
614 /* A consume_data_callback_t implementation that simply concatenates all chunks
615  * into a buffer.  */
616 static int
617 bufferer_cb(const void *chunk, size_t size, void *_ctx)
618 {
619         u8 **buf_p = _ctx;
620
621         *buf_p = mempcpy(*buf_p, chunk, size);
622         return 0;
623 }
624
625 /*
626  * read_partial_wim_resource()-
627  *
628  * Read a range of data from an uncompressed or compressed resource in a WIM
629  * file.
630  *
631  * @rspec
632  *      Specification of the WIM resource to read from.
633  * @offset
634  *      Offset within the uncompressed resource at which to start reading.
635  * @size
636  *      Number of bytes to read.
637  * @cb
638  *      Callback function to feed the data being read.  Each call provides the
639  *      next chunk of the requested data, uncompressed.  Each chunk will be of
640  *      nonzero size and will not cross range boundaries, but otherwise will be
641  *      of unspecified size.
642  * @cb_ctx
643  *      Parameter to pass to @cb_ctx.
644  *
645  * Return values:
646  *      WIMLIB_ERR_SUCCESS (0)
647  *      WIMLIB_ERR_READ                   (errno set)
648  *      WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
649  *      WIMLIB_ERR_NOMEM                  (errno set to ENOMEM)
650  *      WIMLIB_ERR_DECOMPRESSION          (errno set to EINVAL)
651  *
652  *      or other error code returned by the @cb function.
653  */
654 static int
655 read_partial_wim_resource(const struct wim_resource_spec *rspec,
656                           u64 offset, u64 size,
657                           consume_data_callback_t cb, void *cb_ctx)
658 {
659         /* Sanity checks.  */
660         wimlib_assert(offset + size >= offset);
661         wimlib_assert(offset + size <= rspec->uncompressed_size);
662
663         DEBUG("Reading %"PRIu64" @ %"PRIu64" from WIM resource  "
664               "%"PRIu64" => %"PRIu64" @ %"PRIu64,
665               size, offset, rspec->uncompressed_size,
666               rspec->size_in_wim, rspec->offset_in_wim);
667
668         /* Trivial case.  */
669         if (size == 0)
670                 return 0;
671
672         if (resource_is_compressed(rspec)) {
673                 struct data_range range = {
674                         .offset = offset,
675                         .size = size,
676                 };
677                 return read_compressed_wim_resource(rspec, &range, 1,
678                                                     cb, cb_ctx);
679         } else {
680                 /* Reading uncompressed resource.  For completeness, handle the
681                  * weird case where size_in_wim < uncompressed_size.  */
682
683                 u64 read_size;
684                 u64 zeroes_size;
685                 int ret;
686
687                 if (likely(offset + size <= rspec->size_in_wim) ||
688                     rspec->is_pipable)
689                 {
690                         read_size = size;
691                         zeroes_size = 0;
692                 } else {
693                         if (offset >= rspec->size_in_wim) {
694                                 read_size = 0;
695                                 zeroes_size = size;
696                         } else {
697                                 read_size = rspec->size_in_wim - offset;
698                                 zeroes_size = offset + size - rspec->size_in_wim;
699                         }
700                 }
701
702                 ret = read_raw_file_data(&rspec->wim->in_fd,
703                                          rspec->offset_in_wim + offset,
704                                          read_size,
705                                          cb,
706                                          cb_ctx);
707                 if (ret)
708                         return ret;
709
710                 return fill_zeroes(zeroes_size, cb, cb_ctx);
711         }
712 }
713
714 /* Read the specified range of uncompressed data from the specified stream,
715  * which must be located into a WIM file, into the specified buffer.  */
716 int
717 read_partial_wim_stream_into_buf(const struct wim_lookup_table_entry *lte,
718                                  size_t size, u64 offset, void *_buf)
719 {
720         u8 *buf = _buf;
721
722         wimlib_assert(lte->resource_location == RESOURCE_IN_WIM);
723
724         return read_partial_wim_resource(lte->rspec,
725                                          lte->offset_in_res + offset,
726                                          size,
727                                          bufferer_cb,
728                                          &buf);
729 }
730
731 /* A consume_data_callback_t implementation that simply ignores the data
732  * received.  */
733 static int
734 skip_chunk_cb(const void *chunk, size_t size, void *_ctx)
735 {
736         return 0;
737 }
738
739 /* Skip over the data of the specified stream, which must correspond to a full
740  * WIM resource.  */
741 int
742 skip_wim_stream(struct wim_lookup_table_entry *lte)
743 {
744         wimlib_assert(lte->resource_location == RESOURCE_IN_WIM);
745         wimlib_assert(!(lte->flags & WIM_RESHDR_FLAG_SOLID));
746         DEBUG("Skipping stream (size=%"PRIu64")", lte->size);
747         return read_partial_wim_resource(lte->rspec,
748                                          0,
749                                          lte->rspec->uncompressed_size,
750                                          skip_chunk_cb,
751                                          NULL);
752 }
753
754 static int
755 read_wim_stream_prefix(const struct wim_lookup_table_entry *lte, u64 size,
756                        consume_data_callback_t cb, void *cb_ctx)
757 {
758         return read_partial_wim_resource(lte->rspec, lte->offset_in_res, size,
759                                          cb, cb_ctx);
760 }
761
762 /* This function handles reading stream data that is located in an external
763  * file,  such as a file that has been added to the WIM image through execution
764  * of a wimlib_add_command.
765  *
766  * This assumes the file can be accessed using the standard POSIX open(),
767  * read(), and close().  On Windows this will not necessarily be the case (since
768  * the file may need FILE_FLAG_BACKUP_SEMANTICS to be opened, or the file may be
769  * encrypted), so Windows uses its own code for its equivalent case.  */
770 static int
771 read_file_on_disk_prefix(const struct wim_lookup_table_entry *lte, u64 size,
772                          consume_data_callback_t cb, void *cb_ctx)
773 {
774         int ret;
775         int raw_fd;
776         struct filedes fd;
777
778         wimlib_assert(size <= lte->size);
779
780         DEBUG("Reading %"PRIu64" bytes from \"%"TS"\"", size, lte->file_on_disk);
781
782         raw_fd = topen(lte->file_on_disk, O_BINARY | O_RDONLY);
783         if (raw_fd < 0) {
784                 ERROR_WITH_ERRNO("Can't open \"%"TS"\"", lte->file_on_disk);
785                 return WIMLIB_ERR_OPEN;
786         }
787         filedes_init(&fd, raw_fd);
788         ret = read_raw_file_data(&fd, 0, size, cb, cb_ctx);
789         filedes_close(&fd);
790         return ret;
791 }
792
793 #ifdef WITH_FUSE
794 static int
795 read_staging_file_prefix(const struct wim_lookup_table_entry *lte, u64 size,
796                          consume_data_callback_t cb, void *cb_ctx)
797 {
798         int raw_fd;
799         struct filedes fd;
800         int ret;
801
802         wimlib_assert(size <= lte->size);
803
804         DEBUG("Reading %"PRIu64" bytes from staging file \"%s\"",
805               size, lte->staging_file_name);
806
807         raw_fd = openat(lte->staging_dir_fd, lte->staging_file_name,
808                         O_RDONLY | O_NOFOLLOW);
809         if (raw_fd < 0) {
810                 ERROR_WITH_ERRNO("Can't open staging file \"%s\"",
811                                  lte->staging_file_name);
812                 return WIMLIB_ERR_OPEN;
813         }
814         filedes_init(&fd, raw_fd);
815         ret = read_raw_file_data(&fd, 0, size, cb, cb_ctx);
816         filedes_close(&fd);
817         return ret;
818 }
819 #endif
820
821 /* This function handles the trivial case of reading stream data that is, in
822  * fact, already located in an in-memory buffer.  */
823 static int
824 read_buffer_prefix(const struct wim_lookup_table_entry *lte,
825                    u64 size, consume_data_callback_t cb, void *cb_ctx)
826 {
827         wimlib_assert(size <= lte->size);
828         return (*cb)(lte->attached_buffer, size, cb_ctx);
829 }
830
831 typedef int (*read_stream_prefix_handler_t)(const struct wim_lookup_table_entry *lte,
832                                             u64 size,
833                                             consume_data_callback_t cb,
834                                             void *cb_ctx);
835
836 /*
837  * read_stream_prefix()-
838  *
839  * Reads the first @size bytes from a generic "stream", which may be located in
840  * any one of several locations, such as in a WIM file (compressed or
841  * uncompressed), in an external file, or directly in an in-memory buffer.
842  *
843  * This function feeds the data to a callback function @cb in chunks of
844  * unspecified size.
845  *
846  * Returns 0 on success; nonzero on error.  A nonzero value will be returned if
847  * the stream data cannot be successfully read (for a number of different
848  * reasons, depending on the stream location), or if @cb returned nonzero in
849  * which case that error code will be returned.
850  */
851 static int
852 read_stream_prefix(const struct wim_lookup_table_entry *lte, u64 size,
853                    consume_data_callback_t cb, void *cb_ctx)
854 {
855         static const read_stream_prefix_handler_t handlers[] = {
856                 [RESOURCE_IN_WIM]             = read_wim_stream_prefix,
857                 [RESOURCE_IN_FILE_ON_DISK]    = read_file_on_disk_prefix,
858                 [RESOURCE_IN_ATTACHED_BUFFER] = read_buffer_prefix,
859         #ifdef WITH_FUSE
860                 [RESOURCE_IN_STAGING_FILE]    = read_staging_file_prefix,
861         #endif
862         #ifdef WITH_NTFS_3G
863                 [RESOURCE_IN_NTFS_VOLUME]     = read_ntfs_file_prefix,
864         #endif
865         #ifdef __WIN32__
866                 [RESOURCE_IN_WINNT_FILE_ON_DISK] = read_winnt_file_prefix,
867                 [RESOURCE_WIN32_ENCRYPTED]    = read_win32_encrypted_file_prefix,
868         #endif
869         };
870         wimlib_assert(lte->resource_location < ARRAY_LEN(handlers)
871                       && handlers[lte->resource_location] != NULL);
872         return handlers[lte->resource_location](lte, size, cb, cb_ctx);
873 }
874
875 /* Read the full uncompressed data of the specified stream into the specified
876  * buffer, which must have space for at least lte->size bytes.  */
877 int
878 read_full_stream_into_buf(const struct wim_lookup_table_entry *lte, void *_buf)
879 {
880         u8 *buf = _buf;
881         return read_stream_prefix(lte, lte->size, bufferer_cb, &buf);
882 }
883
884 /* Retrieve the full uncompressed data of the specified stream.  A buffer large
885  * enough hold the data is allocated and returned in @buf_ret.  */
886 int
887 read_full_stream_into_alloc_buf(const struct wim_lookup_table_entry *lte,
888                                 void **buf_ret)
889 {
890         int ret;
891         void *buf;
892
893         if ((size_t)lte->size != lte->size) {
894                 ERROR("Can't read %"PRIu64" byte stream into "
895                       "memory", lte->size);
896                 return WIMLIB_ERR_NOMEM;
897         }
898
899         buf = MALLOC(lte->size);
900         if (buf == NULL)
901                 return WIMLIB_ERR_NOMEM;
902
903         ret = read_full_stream_into_buf(lte, buf);
904         if (ret) {
905                 FREE(buf);
906                 return ret;
907         }
908
909         *buf_ret = buf;
910         return 0;
911 }
912
913 /* Retrieve the full uncompressed data of the specified WIM resource.  A buffer
914  * large enough hold the data is allocated and returned in @buf_ret.  */
915 static int
916 wim_resource_spec_to_data(struct wim_resource_spec *rspec, void **buf_ret)
917 {
918         int ret;
919         struct wim_lookup_table_entry *lte;
920
921         lte = new_lookup_table_entry();
922         if (lte == NULL)
923                 return WIMLIB_ERR_NOMEM;
924
925         lte_bind_wim_resource_spec(lte, rspec);
926         lte->flags = rspec->flags;
927         lte->size = rspec->uncompressed_size;
928         lte->offset_in_res = 0;
929
930         ret = read_full_stream_into_alloc_buf(lte, buf_ret);
931
932         lte_unbind_wim_resource_spec(lte);
933         free_lookup_table_entry(lte);
934         return ret;
935 }
936
937 /* Retrieve the full uncompressed data of a WIM resource specified as a raw
938  * `wim_reshdr' and the corresponding WIM file.  A large enough hold the data is
939  * allocated and returned in @buf_ret.  */
940 int
941 wim_reshdr_to_data(const struct wim_reshdr *reshdr, WIMStruct *wim, void **buf_ret)
942 {
943         DEBUG("offset_in_wim=%"PRIu64", size_in_wim=%"PRIu64", "
944               "uncompressed_size=%"PRIu64,
945               reshdr->offset_in_wim, reshdr->size_in_wim,
946               reshdr->uncompressed_size);
947
948         struct wim_resource_spec rspec;
949         wim_res_hdr_to_spec(reshdr, wim, &rspec);
950         return wim_resource_spec_to_data(&rspec, buf_ret);
951 }
952
953 int
954 wim_reshdr_to_hash(const struct wim_reshdr *reshdr, WIMStruct *wim,
955                    u8 hash[SHA1_HASH_SIZE])
956 {
957         struct wim_resource_spec rspec;
958         int ret;
959         struct wim_lookup_table_entry *lte;
960
961         wim_res_hdr_to_spec(reshdr, wim, &rspec);
962
963         lte = new_lookup_table_entry();
964         if (lte == NULL)
965                 return WIMLIB_ERR_NOMEM;
966
967         lte_bind_wim_resource_spec(lte, &rspec);
968         lte->flags = rspec.flags;
969         lte->size = rspec.uncompressed_size;
970         lte->offset_in_res = 0;
971         lte->unhashed = 1;
972
973         ret = sha1_stream(lte);
974
975         lte_unbind_wim_resource_spec(lte);
976         copy_hash(hash, lte->hash);
977         free_lookup_table_entry(lte);
978         return ret;
979 }
980
981 struct streamifier_context {
982         struct read_stream_list_callbacks cbs;
983         struct wim_lookup_table_entry *cur_stream;
984         struct wim_lookup_table_entry *next_stream;
985         u64 cur_stream_offset;
986         struct wim_lookup_table_entry *final_stream;
987         size_t list_head_offset;
988 };
989
990 static struct wim_lookup_table_entry *
991 next_stream(struct wim_lookup_table_entry *lte, size_t list_head_offset)
992 {
993         struct list_head *cur;
994
995         cur = (struct list_head*)((u8*)lte + list_head_offset);
996
997         return (struct wim_lookup_table_entry*)((u8*)cur->next - list_head_offset);
998 }
999
1000 /* A consume_data_callback_t implementation that translates raw resource data
1001  * into streams, calling the begin_stream, consume_chunk, and end_stream
1002  * callback functions as appropriate.  */
1003 static int
1004 streamifier_cb(const void *chunk, size_t size, void *_ctx)
1005 {
1006         struct streamifier_context *ctx = _ctx;
1007         int ret;
1008
1009         DEBUG("%zu bytes passed to streamifier", size);
1010
1011         wimlib_assert(ctx->cur_stream != NULL);
1012         wimlib_assert(size <= ctx->cur_stream->size - ctx->cur_stream_offset);
1013
1014         if (ctx->cur_stream_offset == 0) {
1015
1016                 /* Starting a new stream.  */
1017                 DEBUG("Begin new stream (size=%"PRIu64").",
1018                       ctx->cur_stream->size);
1019
1020                 ret = (*ctx->cbs.begin_stream)(ctx->cur_stream,
1021                                                ctx->cbs.begin_stream_ctx);
1022                 if (ret)
1023                         return ret;
1024         }
1025
1026         /* Consume the chunk.  */
1027         ret = (*ctx->cbs.consume_chunk)(chunk, size,
1028                                         ctx->cbs.consume_chunk_ctx);
1029         ctx->cur_stream_offset += size;
1030         if (ret)
1031                 return ret;
1032
1033         if (ctx->cur_stream_offset == ctx->cur_stream->size) {
1034                 /* Finished reading all the data for a stream.  */
1035
1036                 ctx->cur_stream_offset = 0;
1037
1038                 DEBUG("End stream (size=%"PRIu64").", ctx->cur_stream->size);
1039                 ret = (*ctx->cbs.end_stream)(ctx->cur_stream, 0,
1040                                              ctx->cbs.end_stream_ctx);
1041                 if (ret)
1042                         return ret;
1043
1044                 /* Advance to next stream.  */
1045                 ctx->cur_stream = ctx->next_stream;
1046                 if (ctx->cur_stream != NULL) {
1047                         if (ctx->cur_stream != ctx->final_stream)
1048                                 ctx->next_stream = next_stream(ctx->cur_stream,
1049                                                                ctx->list_head_offset);
1050                         else
1051                                 ctx->next_stream = NULL;
1052                 }
1053         }
1054         return 0;
1055 }
1056
1057 struct hasher_context {
1058         SHA_CTX sha_ctx;
1059         int flags;
1060         struct read_stream_list_callbacks cbs;
1061 };
1062
1063 /* Callback for starting to read a stream while calculating its SHA1 message
1064  * digest.  */
1065 static int
1066 hasher_begin_stream(struct wim_lookup_table_entry *lte, void *_ctx)
1067 {
1068         struct hasher_context *ctx = _ctx;
1069
1070         sha1_init(&ctx->sha_ctx);
1071
1072         if (ctx->cbs.begin_stream == NULL)
1073                 return 0;
1074         else
1075                 return (*ctx->cbs.begin_stream)(lte, ctx->cbs.begin_stream_ctx);
1076 }
1077
1078 /* A consume_data_callback_t implementation that continues calculating the SHA1
1079  * message digest of the stream being read, then optionally passes the data on
1080  * to another consume_data_callback_t implementation.  This allows checking the
1081  * SHA1 message digest of a stream being extracted, for example.  */
1082 static int
1083 hasher_consume_chunk(const void *chunk, size_t size, void *_ctx)
1084 {
1085         struct hasher_context *ctx = _ctx;
1086
1087         sha1_update(&ctx->sha_ctx, chunk, size);
1088         if (ctx->cbs.consume_chunk == NULL)
1089                 return 0;
1090         else
1091                 return (*ctx->cbs.consume_chunk)(chunk, size, ctx->cbs.consume_chunk_ctx);
1092 }
1093
1094 /* Callback for finishing reading a stream while calculating its SHA1 message
1095  * digest.  */
1096 static int
1097 hasher_end_stream(struct wim_lookup_table_entry *lte, int status, void *_ctx)
1098 {
1099         struct hasher_context *ctx = _ctx;
1100         u8 hash[SHA1_HASH_SIZE];
1101         int ret;
1102
1103         if (status) {
1104                 /* Error occurred; the full stream may not have been read.  */
1105                 ret = status;
1106                 goto out_next_cb;
1107         }
1108
1109         /* Retrieve the final SHA1 message digest.  */
1110         sha1_final(hash, &ctx->sha_ctx);
1111
1112         if (lte->unhashed) {
1113                 if (ctx->flags & COMPUTE_MISSING_STREAM_HASHES) {
1114                         /* No SHA1 message digest was previously present for the
1115                          * stream.  Set it to the one just calculated.  */
1116                         DEBUG("Set SHA1 message digest for stream "
1117                               "(size=%"PRIu64").", lte->size);
1118                         copy_hash(lte->hash, hash);
1119                 }
1120         } else {
1121                 if (ctx->flags & VERIFY_STREAM_HASHES) {
1122                         /* The stream already had a SHA1 message digest present.  Verify
1123                          * that it is the same as the calculated value.  */
1124                         if (!hashes_equal(hash, lte->hash)) {
1125                                 if (wimlib_print_errors) {
1126                                         tchar expected_hashstr[SHA1_HASH_SIZE * 2 + 1];
1127                                         tchar actual_hashstr[SHA1_HASH_SIZE * 2 + 1];
1128                                         sprint_hash(lte->hash, expected_hashstr);
1129                                         sprint_hash(hash, actual_hashstr);
1130                                         ERROR("The stream is corrupted!\n"
1131                                               "        (Expected SHA1=%"TS",\n"
1132                                               "              got SHA1=%"TS")",
1133                                               expected_hashstr, actual_hashstr);
1134                                 }
1135                                 ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
1136                                 errno = EINVAL;
1137                                 goto out_next_cb;
1138                         }
1139                         DEBUG("SHA1 message digest okay for "
1140                               "stream (size=%"PRIu64").", lte->size);
1141                 }
1142         }
1143         ret = 0;
1144 out_next_cb:
1145         if (ctx->cbs.end_stream == NULL)
1146                 return ret;
1147         else
1148                 return (*ctx->cbs.end_stream)(lte, ret, ctx->cbs.end_stream_ctx);
1149 }
1150
1151 static int
1152 read_full_stream_with_cbs(struct wim_lookup_table_entry *lte,
1153                           const struct read_stream_list_callbacks *cbs)
1154 {
1155         int ret;
1156
1157         ret = (*cbs->begin_stream)(lte, cbs->begin_stream_ctx);
1158         if (ret)
1159                 return ret;
1160
1161         ret = read_stream_prefix(lte, lte->size, cbs->consume_chunk,
1162                                  cbs->consume_chunk_ctx);
1163
1164         return (*cbs->end_stream)(lte, ret, cbs->end_stream_ctx);
1165 }
1166
1167 /* Read the full data of the specified stream, passing the data into the
1168  * specified callbacks (all of which are optional) and either checking or
1169  * computing the SHA1 message digest of the stream.  */
1170 static int
1171 read_full_stream_with_sha1(struct wim_lookup_table_entry *lte,
1172                            const struct read_stream_list_callbacks *cbs)
1173 {
1174         struct hasher_context hasher_ctx = {
1175                 .flags = VERIFY_STREAM_HASHES | COMPUTE_MISSING_STREAM_HASHES,
1176                 .cbs = *cbs,
1177         };
1178         struct read_stream_list_callbacks hasher_cbs = {
1179                 .begin_stream           = hasher_begin_stream,
1180                 .begin_stream_ctx       = &hasher_ctx,
1181                 .consume_chunk          = hasher_consume_chunk,
1182                 .consume_chunk_ctx      = &hasher_ctx,
1183                 .end_stream             = hasher_end_stream,
1184                 .end_stream_ctx         = &hasher_ctx,
1185         };
1186         return read_full_stream_with_cbs(lte, &hasher_cbs);
1187 }
1188
1189 static int
1190 read_streams_in_solid_resource(struct wim_lookup_table_entry *first_stream,
1191                                struct wim_lookup_table_entry *last_stream,
1192                                u64 stream_count,
1193                                size_t list_head_offset,
1194                                const struct read_stream_list_callbacks *sink_cbs)
1195 {
1196         struct data_range *ranges;
1197         bool ranges_malloced;
1198         struct wim_lookup_table_entry *cur_stream;
1199         size_t i;
1200         int ret;
1201         u64 ranges_alloc_size;
1202
1203         DEBUG("Reading %"PRIu64" streams combined in same WIM resource",
1204               stream_count);
1205
1206         /* Setup data ranges array (one range per stream to read); this way
1207          * read_compressed_wim_resource() does not need to be aware of streams.
1208          */
1209
1210         ranges_alloc_size = stream_count * sizeof(ranges[0]);
1211
1212         if (unlikely((size_t)ranges_alloc_size != ranges_alloc_size)) {
1213                 ERROR("Too many streams in one resource!");
1214                 return WIMLIB_ERR_NOMEM;
1215         }
1216         if (likely(ranges_alloc_size <= STACK_MAX)) {
1217                 ranges = alloca(ranges_alloc_size);
1218                 ranges_malloced = false;
1219         } else {
1220                 ranges = MALLOC(ranges_alloc_size);
1221                 if (ranges == NULL) {
1222                         ERROR("Too many streams in one resource!");
1223                         return WIMLIB_ERR_NOMEM;
1224                 }
1225                 ranges_malloced = true;
1226         }
1227
1228         for (i = 0, cur_stream = first_stream;
1229              i < stream_count;
1230              i++, cur_stream = next_stream(cur_stream, list_head_offset))
1231         {
1232                 ranges[i].offset = cur_stream->offset_in_res;
1233                 ranges[i].size = cur_stream->size;
1234         }
1235
1236         struct streamifier_context streamifier_ctx = {
1237                 .cbs                    = *sink_cbs,
1238                 .cur_stream             = first_stream,
1239                 .next_stream            = next_stream(first_stream, list_head_offset),
1240                 .cur_stream_offset      = 0,
1241                 .final_stream           = last_stream,
1242                 .list_head_offset       = list_head_offset,
1243         };
1244
1245         ret = read_compressed_wim_resource(first_stream->rspec,
1246                                            ranges,
1247                                            stream_count,
1248                                            streamifier_cb,
1249                                            &streamifier_ctx);
1250
1251         if (ranges_malloced)
1252                 FREE(ranges);
1253
1254         if (ret) {
1255                 if (streamifier_ctx.cur_stream_offset != 0) {
1256                         ret = (*streamifier_ctx.cbs.end_stream)
1257                                 (streamifier_ctx.cur_stream,
1258                                  ret,
1259                                  streamifier_ctx.cbs.end_stream_ctx);
1260                 }
1261         }
1262         return ret;
1263 }
1264
1265 /*
1266  * Read a list of streams, each of which may be in any supported location (e.g.
1267  * in a WIM or in an external file).  Unlike read_stream_prefix() or the
1268  * functions which call it, this function optimizes the case where multiple
1269  * streams are combined into a single solid compressed WIM resource and reads
1270  * them all consecutively, only decompressing the data one time.
1271  *
1272  * @stream_list
1273  *      List of streams (represented as `struct wim_lookup_table_entry's) to
1274  *      read.
1275  * @list_head_offset
1276  *      Offset of the `struct list_head' within each `struct
1277  *      wim_lookup_table_entry' that makes up the @stream_list.
1278  * @cbs
1279  *      Callback functions to accept the stream data.
1280  * @flags
1281  *      Bitwise OR of zero or more of the following flags:
1282  *
1283  *      VERIFY_STREAM_HASHES:
1284  *              For all streams being read that have already had SHA1 message
1285  *              digests computed, calculate the SHA1 message digest of the read
1286  *              data and compare it with the previously computed value.  If they
1287  *              do not match, return WIMLIB_ERR_INVALID_RESOURCE_HASH.
1288  *
1289  *      COMPUTE_MISSING_STREAM_HASHES
1290  *              For all streams being read that have not yet had their SHA1
1291  *              message digests computed, calculate and save their SHA1 message
1292  *              digests.
1293  *
1294  *      STREAM_LIST_ALREADY_SORTED
1295  *              @stream_list is already sorted in sequential order for reading.
1296  *
1297  * The callback functions are allowed to delete the current stream from the list
1298  * if necessary.
1299  *
1300  * Returns 0 on success; a nonzero error code on failure.  Failure can occur due
1301  * to an error reading the data or due to an error status being returned by any
1302  * of the callback functions.
1303  */
1304 int
1305 read_stream_list(struct list_head *stream_list,
1306                  size_t list_head_offset,
1307                  const struct read_stream_list_callbacks *cbs,
1308                  int flags)
1309 {
1310         int ret;
1311         struct list_head *cur, *next;
1312         struct wim_lookup_table_entry *lte;
1313         struct hasher_context *hasher_ctx;
1314         struct read_stream_list_callbacks *sink_cbs;
1315
1316         if (!(flags & STREAM_LIST_ALREADY_SORTED)) {
1317                 ret = sort_stream_list_by_sequential_order(stream_list, list_head_offset);
1318                 if (ret)
1319                         return ret;
1320         }
1321
1322         if (flags & (VERIFY_STREAM_HASHES | COMPUTE_MISSING_STREAM_HASHES)) {
1323                 hasher_ctx = alloca(sizeof(*hasher_ctx));
1324                 *hasher_ctx = (struct hasher_context) {
1325                         .flags  = flags,
1326                         .cbs    = *cbs,
1327                 };
1328                 sink_cbs = alloca(sizeof(*sink_cbs));
1329                 *sink_cbs = (struct read_stream_list_callbacks) {
1330                         .begin_stream           = hasher_begin_stream,
1331                         .begin_stream_ctx       = hasher_ctx,
1332                         .consume_chunk          = hasher_consume_chunk,
1333                         .consume_chunk_ctx      = hasher_ctx,
1334                         .end_stream             = hasher_end_stream,
1335                         .end_stream_ctx         = hasher_ctx,
1336                 };
1337         } else {
1338                 sink_cbs = (struct read_stream_list_callbacks*)cbs;
1339         }
1340
1341         for (cur = stream_list->next, next = cur->next;
1342              cur != stream_list;
1343              cur = next, next = cur->next)
1344         {
1345                 lte = (struct wim_lookup_table_entry*)((u8*)cur - list_head_offset);
1346
1347                 if (lte->flags & WIM_RESHDR_FLAG_SOLID &&
1348                     lte->size != lte->rspec->uncompressed_size)
1349                 {
1350
1351                         struct wim_lookup_table_entry *lte_next, *lte_last;
1352                         struct list_head *next2;
1353                         u64 stream_count;
1354
1355                         /* The next stream is a proper sub-sequence of a WIM
1356                          * resource.  See if there are other streams in the same
1357                          * resource that need to be read.  Since
1358                          * sort_stream_list_by_sequential_order() sorted the
1359                          * streams by offset in the WIM, this can be determined
1360                          * by simply scanning forward in the list.  */
1361
1362                         lte_last = lte;
1363                         stream_count = 1;
1364                         for (next2 = next;
1365                              next2 != stream_list
1366                              && (lte_next = (struct wim_lookup_table_entry*)
1367                                                 ((u8*)next2 - list_head_offset),
1368                                  lte_next->resource_location == RESOURCE_IN_WIM
1369                                  && lte_next->rspec == lte->rspec);
1370                              next2 = next2->next)
1371                         {
1372                                 lte_last = lte_next;
1373                                 stream_count++;
1374                         }
1375                         if (stream_count > 1) {
1376                                 /* Reading multiple streams combined into a
1377                                  * single WIM resource.  They are in the stream
1378                                  * list, sorted by offset; @lte specifies the
1379                                  * first stream in the resource that needs to be
1380                                  * read and @lte_last specifies the last stream
1381                                  * in the resource that needs to be read.  */
1382                                 next = next2;
1383                                 ret = read_streams_in_solid_resource(lte, lte_last,
1384                                                                      stream_count,
1385                                                                      list_head_offset,
1386                                                                      sink_cbs);
1387                                 if (ret)
1388                                         return ret;
1389                                 continue;
1390                         }
1391                 }
1392
1393                 ret = read_full_stream_with_cbs(lte, sink_cbs);
1394                 if (ret && ret != BEGIN_STREAM_STATUS_SKIP_STREAM)
1395                         return ret;
1396         }
1397         return 0;
1398 }
1399
1400 /* Extract the first @size bytes of the specified stream.
1401  *
1402  * If @size specifies the full uncompressed size of the stream, then the SHA1
1403  * message digest of the uncompressed stream is checked while being extracted.
1404  *
1405  * The uncompressed data of the resource is passed in chunks of unspecified size
1406  * to the @extract_chunk function, passing it @extract_chunk_arg.  */
1407 int
1408 extract_stream(struct wim_lookup_table_entry *lte, u64 size,
1409                consume_data_callback_t extract_chunk, void *extract_chunk_arg)
1410 {
1411         wimlib_assert(size <= lte->size);
1412         if (size == lte->size) {
1413                 /* Do SHA1.  */
1414                 struct read_stream_list_callbacks cbs = {
1415                         .consume_chunk          = extract_chunk,
1416                         .consume_chunk_ctx      = extract_chunk_arg,
1417                 };
1418                 return read_full_stream_with_sha1(lte, &cbs);
1419         } else {
1420                 /* Don't do SHA1.  */
1421                 return read_stream_prefix(lte, size, extract_chunk,
1422                                           extract_chunk_arg);
1423         }
1424 }
1425
1426 /* A consume_data_callback_t implementation that writes the chunk of data to a
1427  * file descriptor.  */
1428 static int
1429 extract_chunk_to_fd(const void *chunk, size_t size, void *_fd_p)
1430 {
1431         struct filedes *fd = _fd_p;
1432
1433         int ret = full_write(fd, chunk, size);
1434         if (ret) {
1435                 ERROR_WITH_ERRNO("Error writing to file descriptor");
1436                 return ret;
1437         }
1438         return 0;
1439 }
1440
1441 /* Extract the first @size bytes of the specified stream to the specified file
1442  * descriptor.  */
1443 int
1444 extract_stream_to_fd(struct wim_lookup_table_entry *lte,
1445                      struct filedes *fd, u64 size)
1446 {
1447         return extract_stream(lte, size, extract_chunk_to_fd, fd);
1448 }
1449
1450 /* Extract the full uncompressed contents of the specified stream to the
1451  * specified file descriptor.  */
1452 int
1453 extract_full_stream_to_fd(struct wim_lookup_table_entry *lte,
1454                           struct filedes *fd)
1455 {
1456         return extract_stream_to_fd(lte, fd, lte->size);
1457 }
1458
1459 /* Calculate the SHA1 message digest of a stream and store it in @lte->hash.  */
1460 int
1461 sha1_stream(struct wim_lookup_table_entry *lte)
1462 {
1463         wimlib_assert(lte->unhashed);
1464         struct read_stream_list_callbacks cbs = {
1465         };
1466         return read_full_stream_with_sha1(lte, &cbs);
1467 }
1468
1469 /* Convert a short WIM resource header to a stand-alone WIM resource
1470  * specification.
1471  *
1472  * Note: for solid resources some fields still need to be overridden.
1473  */
1474 void
1475 wim_res_hdr_to_spec(const struct wim_reshdr *reshdr, WIMStruct *wim,
1476                     struct wim_resource_spec *rspec)
1477 {
1478         rspec->wim = wim;
1479         rspec->offset_in_wim = reshdr->offset_in_wim;
1480         rspec->size_in_wim = reshdr->size_in_wim;
1481         rspec->uncompressed_size = reshdr->uncompressed_size;
1482         INIT_LIST_HEAD(&rspec->stream_list);
1483         rspec->flags = reshdr->flags;
1484         rspec->is_pipable = wim_is_pipable(wim);
1485         if (rspec->flags & WIM_RESHDR_FLAG_COMPRESSED) {
1486                 rspec->compression_type = wim->compression_type;
1487                 rspec->chunk_size = wim->chunk_size;
1488         } else {
1489                 rspec->compression_type = WIMLIB_COMPRESSION_TYPE_NONE;
1490                 rspec->chunk_size = 0;
1491         }
1492 }
1493
1494 /* Convert a stand-alone resource specification to a WIM resource header.  */
1495 void
1496 wim_res_spec_to_hdr(const struct wim_resource_spec *rspec,
1497                     struct wim_reshdr *reshdr)
1498 {
1499         reshdr->offset_in_wim     = rspec->offset_in_wim;
1500         reshdr->size_in_wim       = rspec->size_in_wim;
1501         reshdr->flags             = rspec->flags;
1502         reshdr->uncompressed_size = rspec->uncompressed_size;
1503 }
1504
1505 /* Translates a WIM resource header from the on-disk format into an in-memory
1506  * format.  */
1507 void
1508 get_wim_reshdr(const struct wim_reshdr_disk *disk_reshdr,
1509                struct wim_reshdr *reshdr)
1510 {
1511         reshdr->offset_in_wim = le64_to_cpu(disk_reshdr->offset_in_wim);
1512         reshdr->size_in_wim = (((u64)disk_reshdr->size_in_wim[0] <<  0) |
1513                                ((u64)disk_reshdr->size_in_wim[1] <<  8) |
1514                                ((u64)disk_reshdr->size_in_wim[2] << 16) |
1515                                ((u64)disk_reshdr->size_in_wim[3] << 24) |
1516                                ((u64)disk_reshdr->size_in_wim[4] << 32) |
1517                                ((u64)disk_reshdr->size_in_wim[5] << 40) |
1518                                ((u64)disk_reshdr->size_in_wim[6] << 48));
1519         reshdr->uncompressed_size = le64_to_cpu(disk_reshdr->uncompressed_size);
1520         reshdr->flags = disk_reshdr->flags;
1521 }
1522
1523 /* Translates a WIM resource header from an in-memory format into the on-disk
1524  * format.  */
1525 void
1526 put_wim_reshdr(const struct wim_reshdr *reshdr,
1527                struct wim_reshdr_disk *disk_reshdr)
1528 {
1529         disk_reshdr->size_in_wim[0] = reshdr->size_in_wim  >>  0;
1530         disk_reshdr->size_in_wim[1] = reshdr->size_in_wim  >>  8;
1531         disk_reshdr->size_in_wim[2] = reshdr->size_in_wim  >> 16;
1532         disk_reshdr->size_in_wim[3] = reshdr->size_in_wim  >> 24;
1533         disk_reshdr->size_in_wim[4] = reshdr->size_in_wim  >> 32;
1534         disk_reshdr->size_in_wim[5] = reshdr->size_in_wim  >> 40;
1535         disk_reshdr->size_in_wim[6] = reshdr->size_in_wim  >> 48;
1536         disk_reshdr->flags = reshdr->flags;
1537         disk_reshdr->offset_in_wim = cpu_to_le64(reshdr->offset_in_wim);
1538         disk_reshdr->uncompressed_size = cpu_to_le64(reshdr->uncompressed_size);
1539 }