]> wimlib.net Git - wimlib/blob - src/resource.c
read_utf8_file_contents(): Move check for BOM here
[wimlib] / src / resource.c
1 /*
2  * resource.c
3  *
4  * Code for reading streams and resources, including compressed WIM resources.
5  */
6
7 /*
8  * Copyright (C) 2012, 2013 Eric Biggers
9  *
10  * This file is part of wimlib, a library for working with WIM files.
11  *
12  * wimlib is free software; you can redistribute it and/or modify it under the
13  * terms of the GNU General Public License as published by the Free Software
14  * Foundation; either version 3 of the License, or (at your option) any later
15  * version.
16  *
17  * wimlib is distributed in the hope that it will be useful, but WITHOUT ANY
18  * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
19  * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * wimlib; if not, see http://www.gnu.org/licenses/.
23  */
24
25 #ifdef HAVE_CONFIG_H
26 #  include "config.h"
27 #endif
28
29 #include "wimlib.h"
30 #include "wimlib/endianness.h"
31 #include "wimlib/error.h"
32 #include "wimlib/file_io.h"
33 #include "wimlib/lookup_table.h"
34 #include "wimlib/resource.h"
35 #include "wimlib/sha1.h"
36
37 #ifdef __WIN32__
38 /* for read_win32_file_prefix(), read_win32_encrypted_file_prefix() */
39 #  include "wimlib/win32.h"
40 #endif
41
42 #ifdef WITH_NTFS_3G
43 /* for read_ntfs_file_prefix() */
44 #  include "wimlib/ntfs_3g.h"
45 #endif
46
47 #ifdef HAVE_ALLOCA_H
48 #  include <alloca.h>
49 #endif
50 #include <errno.h>
51 #include <fcntl.h>
52 #include <stdlib.h>
53 #include <unistd.h>
54
55 /*
56  *                         Compressed WIM resources
57  *
58  * A compressed resource in a WIM consists of a number of compressed chunks,
59  * each of which decompresses to a fixed chunk size (given in the WIM header;
60  * usually 32768) except possibly the last, which always decompresses to any
61  * remaining bytes.  In addition, immediately before the chunks, a table (the
62  * "chunk table") provides the offset, in bytes relative to the end of the chunk
63  * table, of the start of each compressed chunk, except for the first chunk
64  * which is omitted as it always has an offset of 0.  Therefore, a compressed
65  * resource with N chunks will have a chunk table with N - 1 entries.
66  *
67  * Additional information:
68  *
69  * - Entries in the chunk table are 4 bytes each, except if the uncompressed
70  *   size of the resource is greater than 4 GiB, in which case the entries in
71  *   the chunk table are 8 bytes each.  In either case, the entries are unsigned
72  *   little-endian integers.
73  *
74  * - The chunk table is included in the compressed size of the resource provided
75  *   in the corresponding entry in the WIM's stream lookup table.
76  *
77  * - The compressed size of a chunk is never greater than the uncompressed size.
78  *   From the compressor's point of view, chunks that would have compressed to a
79  *   size greater than or equal to their original size are in fact stored
80  *   uncompressed.  From the decompresser's point of view, chunks with
81  *   compressed size equal to their uncompressed size are in fact uncompressed.
82  *
83  * Furthermore, wimlib supports its own "pipable" WIM format, and for this the
84  * structure of compressed resources was modified to allow piped reading and
85  * writing.  To make sequential writing possible, the chunk table is placed
86  * after the chunks rather than before the chunks, and to make sequential
87  * reading possible, each chunk is prefixed with a 4-byte header giving its
88  * compressed size as a 32-bit, unsigned, little-endian integer.  Otherwise the
89  * details are the same.
90  */
91
92
93 struct data_range {
94         u64 offset;
95         u64 size;
96 };
97
98 /*
99  * read_compressed_wim_resource() -
100  *
101  * Read data from a compressed WIM resource.
102  *
103  * @rspec
104  *      Specification of the compressed WIM resource to read from.
105  * @ranges
106  *      Nonoverlapping, nonempty ranges of the uncompressed resource data to
107  *      read, sorted by increasing offset.
108  * @num_ranges
109  *      Number of ranges in @ranges; must be at least 1.
110  * @cb
111  *      Callback function to feed the data being read.  Each call provides the
112  *      next chunk of the requested data, uncompressed.  Each chunk will be of
113  *      nonzero size and will not cross range boundaries, but otherwise will be
114  *      of unspecified size.
115  * @cb_ctx
116  *      Parameter to pass to @cb_ctx.
117  *
118  * Possible return values:
119  *
120  *      WIMLIB_ERR_SUCCESS (0)
121  *      WIMLIB_ERR_READ                   (errno set)
122  *      WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
123  *      WIMLIB_ERR_NOMEM                  (errno set to ENOMEM)
124  *      WIMLIB_ERR_DECOMPRESSION          (errno set to EINVAL)
125  *
126  *      or other error code returned by the @cb function.
127  */
128 static int
129 read_compressed_wim_resource(const struct wim_resource_spec * const rspec,
130                              const struct data_range * const ranges,
131                              const size_t num_ranges,
132                              const consume_data_callback_t cb,
133                              void * const cb_ctx)
134 {
135         int ret;
136         int errno_save;
137
138         u64 *chunk_offsets = NULL;
139         u8 *ubuf = NULL;
140         void *cbuf = NULL;
141         bool chunk_offsets_malloced = false;
142         bool ubuf_malloced = false;
143         bool cbuf_malloced = false;
144         struct wimlib_decompressor *decompressor = NULL;
145
146         /* Sanity checks  */
147         wimlib_assert(rspec != NULL);
148         wimlib_assert(resource_is_compressed(rspec));
149         wimlib_assert(cb != NULL);
150         wimlib_assert(num_ranges != 0);
151         for (size_t i = 0; i < num_ranges; i++) {
152                 DEBUG("Range %zu/%zu: %"PRIu64"@+%"PRIu64" / %"PRIu64,
153                       i + 1, num_ranges, ranges[i].size, ranges[i].offset,
154                       rspec->uncompressed_size);
155                 wimlib_assert(ranges[i].size != 0);
156                 wimlib_assert(ranges[i].offset + ranges[i].size >= ranges[i].size);
157                 wimlib_assert(ranges[i].offset + ranges[i].size <= rspec->uncompressed_size);
158         }
159         for (size_t i = 0; i < num_ranges - 1; i++)
160                 wimlib_assert(ranges[i].offset + ranges[i].size <= ranges[i + 1].offset);
161
162         /* Get the offsets of the first and last bytes of the read.  */
163         const u64 first_offset = ranges[0].offset;
164         const u64 last_offset = ranges[num_ranges - 1].offset + ranges[num_ranges - 1].size - 1;
165
166         /* Get the file descriptor for the WIM.  */
167         struct filedes * const in_fd = &rspec->wim->in_fd;
168
169         /* Determine if we're reading a pipable resource from a pipe or not.  */
170         const bool is_pipe_read = !filedes_is_seekable(in_fd);
171
172         /* Determine if the chunk table is in an altenate format.  */
173         const bool alt_chunk_table = (rspec->flags & WIM_RESHDR_FLAG_PACKED_STREAMS)
174                                         && !is_pipe_read;
175
176         /* Get the maximum size of uncompressed chunks in this resource, which
177          * we require be a power of 2.  */
178         u32 chunk_size = 0;
179         u64 cur_read_offset = rspec->offset_in_wim;
180         int ctype = WIMLIB_COMPRESSION_TYPE_NONE;
181         if (alt_chunk_table) {
182                 /* Alternate chunk table format.  Its header specifies the chunk
183                  * size and compression format.  */
184                 struct alt_chunk_table_header_disk hdr;
185
186                 ret = full_pread(in_fd, &hdr, sizeof(hdr), cur_read_offset);
187                 if (ret)
188                         goto read_error;
189                 cur_read_offset += sizeof(hdr);
190
191                 chunk_size = le32_to_cpu(hdr.chunk_size);
192                 ctype = le32_to_cpu(hdr.compression_format);
193
194                 /* Format numbers must be the same as in WIMGAPI to be
195                  * compatible.  */
196                 BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_NONE != 0);
197                 BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZX != 1);
198                 BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_XPRESS != 2);
199                 BUILD_BUG_ON(WIMLIB_COMPRESSION_TYPE_LZMS != 3);
200         } else {
201                 /* "Normal" format: the maximum uncompressed chunk size and the
202                  * compression format default to those of the WIM itself.  */
203                 chunk_size = rspec->wim->chunk_size;
204                 ctype = rspec->wim->compression_type;
205         }
206         if (!is_power_of_2(chunk_size)) {
207                 ERROR("Invalid compressed resource: "
208                       "expected power-of-2 chunk size (got %u)", chunk_size);
209                 ret = WIMLIB_ERR_INVALID_CHUNK_SIZE;
210                 goto out_free_memory;
211         }
212
213         /* Get valid decompressor.  */
214         if (ctype == rspec->wim->decompressor_ctype &&
215             chunk_size == rspec->wim->decompressor_max_block_size)
216         {
217                 /* Cached decompressor.  */
218                 decompressor = rspec->wim->decompressor;
219                 rspec->wim->decompressor_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
220                 rspec->wim->decompressor = NULL;
221         } else {
222                 ret = wimlib_create_decompressor(ctype, chunk_size, NULL,
223                                                  &decompressor);
224                 if (ret)
225                         goto out_free_memory;
226         }
227
228         const u32 chunk_order = bsr32(chunk_size);
229
230         /* Calculate the total number of chunks the resource is divided into.  */
231         const u64 num_chunks = (rspec->uncompressed_size + chunk_size - 1) >> chunk_order;
232
233         /* Calculate the 0-based indices of the first and last chunks containing
234          * data that needs to be passed to the callback.  */
235         const u64 first_needed_chunk = first_offset >> chunk_order;
236         const u64 last_needed_chunk = last_offset >> chunk_order;
237
238         /* Calculate the 0-based index of the first chunk that actually needs to
239          * be read.  This is normally first_needed_chunk, but for pipe reads we
240          * must always start from the 0th chunk.  */
241         const u64 read_start_chunk = (is_pipe_read ? 0 : first_needed_chunk);
242
243         /* Calculate the number of chunk offsets that are needed for the chunks
244          * being read.  */
245         const u64 num_needed_chunk_offsets =
246                 last_needed_chunk - read_start_chunk + 1 +
247                 (last_needed_chunk < num_chunks - 1);
248
249         /* Calculate the number of entries in the chunk table.  Normally, it's
250          * one less than the number of chunks, since the first chunk has no
251          * entry.  But in the alternate chunk table format, the chunk entries
252          * contain chunk sizes, not offsets, and there is one per chunk.  */
253         const u64 num_chunk_entries = (alt_chunk_table ? num_chunks : num_chunks - 1);
254
255         /* Set the size of each chunk table entry based on the resource's
256          * uncompressed size.  XXX:  Does the alternate chunk table really
257          * always have 4-byte entries?  */
258         const u64 chunk_entry_size =
259                 (rspec->uncompressed_size > (1ULL << 32) && !alt_chunk_table)
260                         ? 8 : 4;
261
262         /* Calculate the size of the chunk table in bytes.  */
263         const u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
264
265         /* Calculate the size of the chunk table in bytes, including the header
266          * in the case of the alternate chunk table format.  */
267         const u64 chunk_table_full_size =
268                 (alt_chunk_table) ? chunk_table_size + sizeof(struct alt_chunk_table_header_disk)
269                                   : chunk_table_size;
270
271         if (!is_pipe_read) {
272                 /* Read the needed chunk table entries into memory and use them
273                  * to initialize the chunk_offsets array.  */
274
275                 u64 first_chunk_entry_to_read;
276                 u64 last_chunk_entry_to_read;
277
278                 if (alt_chunk_table) {
279                         /* The alternate chunk table contains chunk sizes, not
280                          * offsets, so we always must read all preceding entries
281                          * in order to determine offsets.  */
282                         first_chunk_entry_to_read = 0;
283                         last_chunk_entry_to_read = last_needed_chunk;
284                 } else {
285                         /* Here we must account for the fact that the first
286                          * chunk has no explicit chunk table entry.  */
287
288                         if (read_start_chunk == 0)
289                                 first_chunk_entry_to_read = 0;
290                         else
291                                 first_chunk_entry_to_read = read_start_chunk - 1;
292
293                         if (last_needed_chunk == 0)
294                                 last_chunk_entry_to_read = 0;
295                         else
296                                 last_chunk_entry_to_read = last_needed_chunk - 1;
297
298                         if (last_needed_chunk < num_chunks - 1)
299                                 last_chunk_entry_to_read++;
300                 }
301
302                 const u64 num_chunk_entries_to_read =
303                         last_chunk_entry_to_read - first_chunk_entry_to_read + 1;
304
305                 const u64 chunk_offsets_alloc_size =
306                         max(num_chunk_entries_to_read,
307                             num_needed_chunk_offsets) * sizeof(chunk_offsets[0]);
308
309                 if ((size_t)chunk_offsets_alloc_size != chunk_offsets_alloc_size)
310                         goto oom;
311
312                 if (chunk_offsets_alloc_size <= STACK_MAX) {
313                         chunk_offsets = alloca(chunk_offsets_alloc_size);
314                 } else {
315                         chunk_offsets = MALLOC(chunk_offsets_alloc_size);
316                         if (chunk_offsets == NULL)
317                                 goto oom;
318                         chunk_offsets_malloced = true;
319                 }
320
321                 const size_t chunk_table_size_to_read =
322                         num_chunk_entries_to_read * chunk_entry_size;
323
324                 const u64 file_offset_of_needed_chunk_entries =
325                         cur_read_offset
326                         + (first_chunk_entry_to_read * chunk_entry_size)
327                         + (rspec->is_pipable ? (rspec->size_in_wim - chunk_table_size) : 0);
328
329                 void * const chunk_table_data =
330                         (u8*)chunk_offsets +
331                         chunk_offsets_alloc_size -
332                         chunk_table_size_to_read;
333
334                 ret = full_pread(in_fd, chunk_table_data, chunk_table_size_to_read,
335                                  file_offset_of_needed_chunk_entries);
336                 if (ret)
337                         goto read_error;
338
339                 /* Now fill in chunk_offsets from the entries we have read in
340                  * chunk_tab_data.  We break aliasing rules here to avoid having
341                  * to allocate yet another array.  */
342                 typedef le64 __attribute__((may_alias)) aliased_le64_t;
343                 typedef le32 __attribute__((may_alias)) aliased_le32_t;
344                 u64 * chunk_offsets_p = chunk_offsets;
345
346                 if (alt_chunk_table) {
347                         u64 cur_offset = 0;
348                         aliased_le32_t *raw_entries = chunk_table_data;
349
350                         for (size_t i = 0; i < num_chunk_entries_to_read; i++) {
351                                 u32 entry = le32_to_cpu(raw_entries[i]);
352                                 if (i >= read_start_chunk)
353                                         *chunk_offsets_p++ = cur_offset;
354                                 cur_offset += entry;
355                         }
356                         if (last_needed_chunk < num_chunks - 1)
357                                 *chunk_offsets_p = cur_offset;
358                 } else {
359                         if (read_start_chunk == 0)
360                                 *chunk_offsets_p++ = 0;
361
362                         if (chunk_entry_size == 4) {
363                                 aliased_le32_t *raw_entries = chunk_table_data;
364                                 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
365                                         *chunk_offsets_p++ = le32_to_cpu(raw_entries[i]);
366                         } else {
367                                 aliased_le64_t *raw_entries = chunk_table_data;
368                                 for (size_t i = 0; i < num_chunk_entries_to_read; i++)
369                                         *chunk_offsets_p++ = le64_to_cpu(raw_entries[i]);
370                         }
371                 }
372
373                 /* Set offset to beginning of first chunk to read.  */
374                 cur_read_offset += chunk_offsets[0];
375                 if (rspec->is_pipable)
376                         cur_read_offset += read_start_chunk * sizeof(struct pwm_chunk_hdr);
377                 else
378                         cur_read_offset += chunk_table_size;
379         }
380
381         /* Allocate buffer for holding the uncompressed data of each chunk.  */
382         if (chunk_size <= STACK_MAX) {
383                 ubuf = alloca(chunk_size);
384         } else {
385                 ubuf = MALLOC(chunk_size);
386                 if (ubuf == NULL)
387                         goto oom;
388                 ubuf_malloced = true;
389         }
390
391         /* Allocate a temporary buffer for reading compressed chunks, each of
392          * which can be at most @chunk_size - 1 bytes.  This excludes compressed
393          * chunks that are a full @chunk_size bytes, which are actually stored
394          * uncompressed.  */
395         if (chunk_size - 1 <= STACK_MAX) {
396                 cbuf = alloca(chunk_size - 1);
397         } else {
398                 cbuf = MALLOC(chunk_size - 1);
399                 if (cbuf == NULL)
400                         goto oom;
401                 cbuf_malloced = true;
402         }
403
404         /* Set current data range.  */
405         const struct data_range *cur_range = ranges;
406         const struct data_range * const end_range = &ranges[num_ranges];
407         u64 cur_range_pos = cur_range->offset;
408         u64 cur_range_end = cur_range->offset + cur_range->size;
409
410         /* Read and process each needed chunk.  */
411         for (u64 i = read_start_chunk; i <= last_needed_chunk; i++) {
412
413                 /* Calculate uncompressed size of next chunk.  */
414                 u32 chunk_usize;
415                 if ((i == num_chunks - 1) && (rspec->uncompressed_size & (chunk_size - 1)))
416                         chunk_usize = (rspec->uncompressed_size & (chunk_size - 1));
417                 else
418                         chunk_usize = chunk_size;
419
420                 /* Calculate compressed size of next chunk.  */
421                 u32 chunk_csize;
422                 if (is_pipe_read) {
423                         struct pwm_chunk_hdr chunk_hdr;
424
425                         ret = full_pread(in_fd, &chunk_hdr,
426                                          sizeof(chunk_hdr), cur_read_offset);
427                         if (ret)
428                                 goto read_error;
429                         chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
430                 } else {
431                         if (i == num_chunks - 1) {
432                                 chunk_csize = rspec->size_in_wim -
433                                               chunk_table_full_size -
434                                               chunk_offsets[i - read_start_chunk];
435                                 if (rspec->is_pipable)
436                                         chunk_csize -= num_chunks * sizeof(struct pwm_chunk_hdr);
437                         } else {
438                                 chunk_csize = chunk_offsets[i + 1 - read_start_chunk] -
439                                               chunk_offsets[i - read_start_chunk];
440                         }
441                 }
442                 if (chunk_csize == 0 || chunk_csize > chunk_usize) {
443                         ERROR("Invalid chunk size in compressed resource!");
444                         errno = EINVAL;
445                         ret = WIMLIB_ERR_DECOMPRESSION;
446                         goto out_free_memory;
447                 }
448                 if (rspec->is_pipable)
449                         cur_read_offset += sizeof(struct pwm_chunk_hdr);
450
451                 /* Offsets in the uncompressed resource at which this chunk
452                  * starts and ends.  */
453                 const u64 chunk_start_offset = i << chunk_order;
454                 const u64 chunk_end_offset = chunk_start_offset + chunk_usize;
455
456                 if (chunk_end_offset <= cur_range_pos) {
457
458                         /* The next range does not require data in this chunk,
459                          * so skip it.  */
460                         cur_read_offset += chunk_csize;
461                         if (is_pipe_read) {
462                                 u8 dummy;
463
464                                 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
465                                 if (ret)
466                                         goto read_error;
467                         }
468                 } else {
469
470                         /* Read the chunk and feed data to the callback
471                          * function.  */
472                         u8 *read_buf;
473
474                         if (chunk_csize == chunk_usize)
475                                 read_buf = ubuf;
476                         else
477                                 read_buf = cbuf;
478
479                         ret = full_pread(in_fd,
480                                          read_buf,
481                                          chunk_csize,
482                                          cur_read_offset);
483                         if (ret)
484                                 goto read_error;
485
486                         if (read_buf == cbuf) {
487                                 DEBUG("Decompressing chunk %"PRIu64" "
488                                       "(csize=%"PRIu32" usize=%"PRIu32")",
489                                       i, chunk_csize, chunk_usize);
490                                 ret = wimlib_decompress(cbuf,
491                                                         chunk_csize,
492                                                         ubuf,
493                                                         chunk_usize,
494                                                         decompressor);
495                                 if (ret) {
496                                         ERROR("Failed to decompress data!");
497                                         ret = WIMLIB_ERR_DECOMPRESSION;
498                                         errno = EINVAL;
499                                         goto out_free_memory;
500                                 }
501                         }
502                         cur_read_offset += chunk_csize;
503
504                         /* At least one range requires data in this chunk.  */
505                         do {
506                                 size_t start, end, size;
507
508                                 /* Calculate how many bytes of data should be
509                                  * sent to the callback function, taking into
510                                  * account that data sent to the callback
511                                  * function must not overlap range boundaries.
512                                  */
513                                 start = cur_range_pos - chunk_start_offset;
514                                 end = min(cur_range_end, chunk_end_offset) - chunk_start_offset;
515                                 size = end - start;
516
517                                 ret = (*cb)(&ubuf[start], size, cb_ctx);
518
519                                 if (ret)
520                                         goto out_free_memory;
521
522                                 cur_range_pos += size;
523                                 if (cur_range_pos == cur_range_end) {
524                                         /* Advance to next range.  */
525                                         if (++cur_range == end_range) {
526                                                 cur_range_pos = ~0ULL;
527                                         } else {
528                                                 cur_range_pos = cur_range->offset;
529                                                 cur_range_end = cur_range->offset + cur_range->size;
530                                         }
531                                 }
532                         } while (cur_range_pos < chunk_end_offset);
533                 }
534         }
535
536         if (is_pipe_read &&
537             last_offset == rspec->uncompressed_size - 1 &&
538             chunk_table_size)
539         {
540                 u8 dummy;
541                 /* If reading a pipable resource from a pipe and the full data
542                  * was requested, skip the chunk table at the end so that the
543                  * file descriptor is fully clear of the resource after this
544                  * returns.  */
545                 cur_read_offset += chunk_table_size;
546                 ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
547                 if (ret)
548                         goto read_error;
549         }
550         ret = 0;
551
552 out_free_memory:
553         errno_save = errno;
554         if (decompressor) {
555                 wimlib_free_decompressor(rspec->wim->decompressor);
556                 rspec->wim->decompressor = decompressor;
557                 rspec->wim->decompressor_ctype = ctype;
558                 rspec->wim->decompressor_max_block_size = chunk_size;
559         }
560         if (chunk_offsets_malloced)
561                 FREE(chunk_offsets);
562         if (ubuf_malloced)
563                 FREE(ubuf);
564         if (cbuf_malloced)
565                 FREE(cbuf);
566         errno = errno_save;
567         return ret;
568
569 oom:
570         ERROR("Not enough memory available to read size=%"PRIu64" bytes "
571               "from compressed WIM resource!", last_offset - first_offset + 1);
572         errno = ENOMEM;
573         ret = WIMLIB_ERR_NOMEM;
574         goto out_free_memory;
575
576 read_error:
577         ERROR_WITH_ERRNO("Error reading compressed WIM resource!");
578         goto out_free_memory;
579 }
580
581 /* Read raw data from a file descriptor at the specified offset, feeding the
582  * data it in chunks into the specified callback function.  */
583 static int
584 read_raw_file_data(struct filedes *in_fd, u64 offset, u64 size,
585                    consume_data_callback_t cb, void *cb_ctx)
586 {
587         u8 buf[BUFFER_SIZE];
588         size_t bytes_to_read;
589         int ret;
590
591         while (size) {
592                 bytes_to_read = min(sizeof(buf), size);
593                 ret = full_pread(in_fd, buf, bytes_to_read, offset);
594                 if (ret) {
595                         ERROR_WITH_ERRNO("Read error");
596                         return ret;
597                 }
598                 ret = cb(buf, bytes_to_read, cb_ctx);
599                 if (ret)
600                         return ret;
601                 size -= bytes_to_read;
602                 offset += bytes_to_read;
603         }
604         return 0;
605 }
606
607 /* A consume_data_callback_t implementation that simply concatenates all chunks
608  * into a buffer.  */
609 static int
610 bufferer_cb(const void *chunk, size_t size, void *_ctx)
611 {
612         u8 **buf_p = _ctx;
613
614         *buf_p = mempcpy(*buf_p, chunk, size);
615         return 0;
616 }
617
618 /*
619  * read_partial_wim_resource()-
620  *
621  * Read a range of data from an uncompressed or compressed resource in a WIM
622  * file.
623  *
624  * @rspec
625  *      Specification of the WIM resource to read from.
626  * @offset
627  *      Offset within the uncompressed resource at which to start reading.
628  * @size
629  *      Number of bytes to read.
630  * @cb
631  *      Callback function to feed the data being read.  Each call provides the
632  *      next chunk of the requested data, uncompressed.  Each chunk will be of
633  *      nonzero size and will not cross range boundaries, but otherwise will be
634  *      of unspecified size.
635  * @cb_ctx
636  *      Parameter to pass to @cb_ctx.
637  *
638  * Return values:
639  *      WIMLIB_ERR_SUCCESS (0)
640  *      WIMLIB_ERR_READ                   (errno set)
641  *      WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to 0)
642  *      WIMLIB_ERR_NOMEM                  (errno set to ENOMEM)
643  *      WIMLIB_ERR_DECOMPRESSION          (errno set to EINVAL)
644  *
645  *      or other error code returned by the @cb function.
646  */
647 static int
648 read_partial_wim_resource(const struct wim_resource_spec *rspec,
649                           u64 offset, u64 size,
650                           consume_data_callback_t cb, void *cb_ctx)
651 {
652         /* Sanity checks.  */
653         wimlib_assert(offset + size >= offset);
654         wimlib_assert(offset + size <= rspec->uncompressed_size);
655
656         DEBUG("Reading %"PRIu64" @ %"PRIu64" from WIM resource  "
657               "%"PRIu64" => %"PRIu64" @ %"PRIu64,
658               size, offset, rspec->uncompressed_size,
659               rspec->size_in_wim, rspec->offset_in_wim);
660
661         /* Trivial case.  */
662         if (size == 0)
663                 return 0;
664
665         if (resource_is_compressed(rspec)) {
666                 struct data_range range = {
667                         .offset = offset,
668                         .size = size,
669                 };
670                 return read_compressed_wim_resource(rspec, &range, 1,
671                                                     cb, cb_ctx);
672         } else {
673                 return read_raw_file_data(&rspec->wim->in_fd,
674                                           rspec->offset_in_wim + offset,
675                                           size,
676                                           cb,
677                                           cb_ctx);
678         }
679 }
680
681 /* Read the specified range of uncompressed data from the specified stream,
682  * which must be located into a WIM file, into the specified buffer.  */
683 int
684 read_partial_wim_stream_into_buf(const struct wim_lookup_table_entry *lte,
685                                  size_t size, u64 offset, void *_buf)
686 {
687         u8 *buf = _buf;
688
689         wimlib_assert(lte->resource_location == RESOURCE_IN_WIM);
690
691         return read_partial_wim_resource(lte->rspec,
692                                          lte->offset_in_res + offset,
693                                          size,
694                                          bufferer_cb,
695                                          &buf);
696 }
697
698 /* A consume_data_callback_t implementation that simply ignores the data
699  * received.  */
700 static int
701 skip_chunk_cb(const void *chunk, size_t size, void *_ctx)
702 {
703         return 0;
704 }
705
706 /* Skip over the data of the specified stream, which must correspond to a full
707  * WIM resource.  */
708 int
709 skip_wim_stream(struct wim_lookup_table_entry *lte)
710 {
711         wimlib_assert(lte->resource_location == RESOURCE_IN_WIM);
712         wimlib_assert(!(lte->flags & WIM_RESHDR_FLAG_PACKED_STREAMS));
713         DEBUG("Skipping stream (size=%"PRIu64")", lte->size);
714         return read_partial_wim_resource(lte->rspec,
715                                          0,
716                                          lte->rspec->uncompressed_size,
717                                          skip_chunk_cb,
718                                          NULL);
719 }
720
721 static int
722 read_wim_stream_prefix(const struct wim_lookup_table_entry *lte, u64 size,
723                        consume_data_callback_t cb, void *cb_ctx)
724 {
725         return read_partial_wim_resource(lte->rspec, lte->offset_in_res, size,
726                                          cb, cb_ctx);
727 }
728
729 #ifndef __WIN32__
730 /* This function handles reading stream data that is located in an external
731  * file,  such as a file that has been added to the WIM image through execution
732  * of a wimlib_add_command.
733  *
734  * This assumes the file can be accessed using the standard POSIX open(),
735  * read(), and close().  On Windows this will not necessarily be the case (since
736  * the file may need FILE_FLAG_BACKUP_SEMANTICS to be opened, or the file may be
737  * encrypted), so Windows uses its own code for its equivalent case.  */
738 static int
739 read_file_on_disk_prefix(const struct wim_lookup_table_entry *lte, u64 size,
740                          consume_data_callback_t cb, void *cb_ctx)
741 {
742         int ret;
743         int raw_fd;
744         struct filedes fd;
745
746         wimlib_assert(size <= lte->size);
747
748         DEBUG("Reading %"PRIu64" bytes from \"%"TS"\"", size, lte->file_on_disk);
749
750         raw_fd = open(lte->file_on_disk, O_BINARY | O_RDONLY);
751         if (raw_fd < 0) {
752                 ERROR_WITH_ERRNO("Can't open \"%"TS"\"", lte->file_on_disk);
753                 return WIMLIB_ERR_OPEN;
754         }
755         filedes_init(&fd, raw_fd);
756         ret = read_raw_file_data(&fd, 0, size, cb, cb_ctx);
757         filedes_close(&fd);
758         return ret;
759 }
760 #endif /* !__WIN32__ */
761
762 /* This function handles the trivial case of reading stream data that is, in
763  * fact, already located in an in-memory buffer.  */
764 static int
765 read_buffer_prefix(const struct wim_lookup_table_entry *lte,
766                    u64 size, consume_data_callback_t cb, void *cb_ctx)
767 {
768         wimlib_assert(size <= lte->size);
769         return (*cb)(lte->attached_buffer, size, cb_ctx);
770 }
771
772 typedef int (*read_stream_prefix_handler_t)(const struct wim_lookup_table_entry *lte,
773                                             u64 size,
774                                             consume_data_callback_t cb,
775                                             void *cb_ctx);
776
777 /*
778  * read_stream_prefix()-
779  *
780  * Reads the first @size bytes from a generic "stream", which may be located in
781  * any one of several locations, such as in a WIM file (compressed or
782  * uncompressed), in an external file, or directly in an in-memory buffer.
783  *
784  * This function feeds the data to a callback function @cb in chunks of
785  * unspecified size.
786  *
787  * Returns 0 on success; nonzero on error.  A nonzero value will be returned if
788  * the stream data cannot be successfully read (for a number of different
789  * reasons, depending on the stream location), or if @cb returned nonzero in
790  * which case that error code will be returned.
791  */
792 static int
793 read_stream_prefix(const struct wim_lookup_table_entry *lte, u64 size,
794                    consume_data_callback_t cb, void *cb_ctx)
795 {
796         static const read_stream_prefix_handler_t handlers[] = {
797                 [RESOURCE_IN_WIM]             = read_wim_stream_prefix,
798         #ifdef __WIN32__
799                 [RESOURCE_IN_FILE_ON_DISK]    = read_win32_file_prefix,
800         #else
801                 [RESOURCE_IN_FILE_ON_DISK]    = read_file_on_disk_prefix,
802         #endif
803                 [RESOURCE_IN_ATTACHED_BUFFER] = read_buffer_prefix,
804         #ifdef WITH_FUSE
805                 [RESOURCE_IN_STAGING_FILE]    = read_file_on_disk_prefix,
806         #endif
807         #ifdef WITH_NTFS_3G
808                 [RESOURCE_IN_NTFS_VOLUME]     = read_ntfs_file_prefix,
809         #endif
810         #ifdef __WIN32__
811                 [RESOURCE_WIN32_ENCRYPTED]    = read_win32_encrypted_file_prefix,
812         #endif
813         };
814         wimlib_assert(lte->resource_location < ARRAY_LEN(handlers)
815                       && handlers[lte->resource_location] != NULL);
816         return handlers[lte->resource_location](lte, size, cb, cb_ctx);
817 }
818
819 /* Read the full uncompressed data of the specified stream into the specified
820  * buffer, which must have space for at least lte->size bytes.  */
821 int
822 read_full_stream_into_buf(const struct wim_lookup_table_entry *lte, void *_buf)
823 {
824         u8 *buf = _buf;
825         return read_stream_prefix(lte, lte->size, bufferer_cb, &buf);
826 }
827
828 /* Retrieve the full uncompressed data of the specified stream.  A buffer large
829  * enough hold the data is allocated and returned in @buf_ret.  */
830 int
831 read_full_stream_into_alloc_buf(const struct wim_lookup_table_entry *lte,
832                                 void **buf_ret)
833 {
834         int ret;
835         void *buf;
836
837         if ((size_t)lte->size != lte->size) {
838                 ERROR("Can't read %"PRIu64" byte stream into "
839                       "memory", lte->size);
840                 return WIMLIB_ERR_NOMEM;
841         }
842
843         buf = MALLOC(lte->size);
844         if (buf == NULL)
845                 return WIMLIB_ERR_NOMEM;
846
847         ret = read_full_stream_into_buf(lte, buf);
848         if (ret) {
849                 FREE(buf);
850                 return ret;
851         }
852
853         *buf_ret = buf;
854         return 0;
855 }
856
857 /* Retrieve the full uncompressed data of the specified WIM resource.  A buffer
858  * large enough hold the data is allocated and returned in @buf_ret.  */
859 static int
860 wim_resource_spec_to_data(struct wim_resource_spec *rspec, void **buf_ret)
861 {
862         int ret;
863         struct wim_lookup_table_entry *lte;
864
865         lte = new_lookup_table_entry();
866         if (lte == NULL)
867                 return WIMLIB_ERR_NOMEM;
868
869         lte_bind_wim_resource_spec(lte, rspec);
870         lte->flags = rspec->flags;
871         lte->size = rspec->uncompressed_size;
872         lte->offset_in_res = 0;
873
874         ret = read_full_stream_into_alloc_buf(lte, buf_ret);
875
876         lte_unbind_wim_resource_spec(lte);
877         free_lookup_table_entry(lte);
878         return ret;
879 }
880
881 /* Retrieve the full uncompressed data of a WIM resource specified as a raw
882  * `wim_reshdr' and the corresponding WIM file.  A large enough hold the data is
883  * allocated and returned in @buf_ret.  */
884 int
885 wim_reshdr_to_data(const struct wim_reshdr *reshdr, WIMStruct *wim, void **buf_ret)
886 {
887         DEBUG("offset_in_wim=%"PRIu64", size_in_wim=%"PRIu64", "
888               "uncompressed_size=%"PRIu64,
889               reshdr->offset_in_wim, reshdr->size_in_wim,
890               reshdr->uncompressed_size);
891
892         struct wim_resource_spec rspec;
893         wim_res_hdr_to_spec(reshdr, wim, &rspec);
894         return wim_resource_spec_to_data(&rspec, buf_ret);
895 }
896
897 struct streamifier_context {
898         struct read_stream_list_callbacks cbs;
899         struct wim_lookup_table_entry *cur_stream;
900         struct wim_lookup_table_entry *next_stream;
901         u64 cur_stream_offset;
902         struct wim_lookup_table_entry *final_stream;
903         size_t list_head_offset;
904 };
905
906 static struct wim_lookup_table_entry *
907 next_stream(struct wim_lookup_table_entry *lte, size_t list_head_offset)
908 {
909         struct list_head *cur;
910
911         cur = (struct list_head*)((u8*)lte + list_head_offset);
912
913         return (struct wim_lookup_table_entry*)((u8*)cur->next - list_head_offset);
914 }
915
916 /* A consume_data_callback_t implementation that translates raw resource data
917  * into streams, calling the begin_stream, consume_chunk, and end_stream
918  * callback functions as appropriate.  */
919 static int
920 streamifier_cb(const void *chunk, size_t size, void *_ctx)
921 {
922         struct streamifier_context *ctx = _ctx;
923         int ret;
924
925         DEBUG("%zu bytes passed to streamifier", size);
926
927         wimlib_assert(ctx->cur_stream != NULL);
928         wimlib_assert(size <= ctx->cur_stream->size - ctx->cur_stream_offset);
929
930         if (ctx->cur_stream_offset == 0) {
931                 /* Starting a new stream.  */
932                 DEBUG("Begin new stream (size=%"PRIu64").",
933                       ctx->cur_stream->size);
934                 ret = (*ctx->cbs.begin_stream)(ctx->cur_stream, true,
935                                                ctx->cbs.begin_stream_ctx);
936                 if (ret)
937                         return ret;
938         }
939
940         /* Consume the chunk.  */
941         ret = (*ctx->cbs.consume_chunk)(chunk, size,
942                                         ctx->cbs.consume_chunk_ctx);
943         if (ret)
944                 return ret;
945         ctx->cur_stream_offset += size;
946
947         if (ctx->cur_stream_offset == ctx->cur_stream->size) {
948                 /* Finished reading all the data for a stream.  */
949
950                 ctx->cur_stream_offset = 0;
951
952                 DEBUG("End stream (size=%"PRIu64").", ctx->cur_stream->size);
953                 ret = (*ctx->cbs.end_stream)(ctx->cur_stream, 0,
954                                              ctx->cbs.end_stream_ctx);
955                 if (ret)
956                         return ret;
957
958                 /* Advance to next stream.  */
959                 ctx->cur_stream = ctx->next_stream;
960                 if (ctx->cur_stream != NULL) {
961                         if (ctx->cur_stream != ctx->final_stream)
962                                 ctx->next_stream = next_stream(ctx->cur_stream,
963                                                                ctx->list_head_offset);
964                         else
965                                 ctx->next_stream = NULL;
966                 }
967         }
968         return 0;
969 }
970
971 struct hasher_context {
972         SHA_CTX sha_ctx;
973         int flags;
974         struct read_stream_list_callbacks cbs;
975 };
976
977 /* Callback for starting to read a stream while calculating its SHA1 message
978  * digest.  */
979 static int
980 hasher_begin_stream(struct wim_lookup_table_entry *lte, bool is_partial_res,
981                     void *_ctx)
982 {
983         struct hasher_context *ctx = _ctx;
984
985         sha1_init(&ctx->sha_ctx);
986
987         if (ctx->cbs.begin_stream == NULL)
988                 return 0;
989         else
990                 return (*ctx->cbs.begin_stream)(lte, is_partial_res,
991                                                 ctx->cbs.begin_stream_ctx);
992 }
993
994 /* A consume_data_callback_t implementation that continues calculating the SHA1
995  * message digest of the stream being read, then optionally passes the data on
996  * to another consume_data_callback_t implementation.  This allows checking the
997  * SHA1 message digest of a stream being extracted, for example.  */
998 static int
999 hasher_consume_chunk(const void *chunk, size_t size, void *_ctx)
1000 {
1001         struct hasher_context *ctx = _ctx;
1002
1003         sha1_update(&ctx->sha_ctx, chunk, size);
1004         if (ctx->cbs.consume_chunk == NULL)
1005                 return 0;
1006         else
1007                 return (*ctx->cbs.consume_chunk)(chunk, size, ctx->cbs.consume_chunk_ctx);
1008 }
1009
1010 /* Callback for finishing reading a stream while calculating its SHA1 message
1011  * digest.  */
1012 static int
1013 hasher_end_stream(struct wim_lookup_table_entry *lte, int status, void *_ctx)
1014 {
1015         struct hasher_context *ctx = _ctx;
1016         u8 hash[SHA1_HASH_SIZE];
1017         int ret;
1018
1019         if (status) {
1020                 /* Error occurred; the full stream may not have been read.  */
1021                 ret = status;
1022                 goto out_next_cb;
1023         }
1024
1025         /* Retrieve the final SHA1 message digest.  */
1026         sha1_final(hash, &ctx->sha_ctx);
1027
1028         if (lte->unhashed) {
1029                 if (ctx->flags & COMPUTE_MISSING_STREAM_HASHES) {
1030                         /* No SHA1 message digest was previously present for the
1031                          * stream.  Set it to the one just calculated.  */
1032                         DEBUG("Set SHA1 message digest for stream "
1033                               "(size=%"PRIu64").", lte->size);
1034                         copy_hash(lte->hash, hash);
1035                 }
1036         } else {
1037                 if (ctx->flags & VERIFY_STREAM_HASHES) {
1038                         /* The stream already had a SHA1 message digest present.  Verify
1039                          * that it is the same as the calculated value.  */
1040                         if (!hashes_equal(hash, lte->hash)) {
1041                                 if (wimlib_print_errors) {
1042                                         ERROR("Invalid SHA1 message digest "
1043                                               "on the following WIM stream:");
1044                                         print_lookup_table_entry(lte, stderr);
1045                                 }
1046                                 ret = WIMLIB_ERR_INVALID_RESOURCE_HASH;
1047                                 errno = EINVAL;
1048                                 goto out_next_cb;
1049                         }
1050                         DEBUG("SHA1 message digest okay for "
1051                               "stream (size=%"PRIu64").", lte->size);
1052                 }
1053         }
1054         ret = 0;
1055 out_next_cb:
1056         if (ctx->cbs.end_stream == NULL)
1057                 return ret;
1058         else
1059                 return (*ctx->cbs.end_stream)(lte, ret, ctx->cbs.end_stream_ctx);
1060 }
1061
1062 static int
1063 read_full_stream_with_cbs(struct wim_lookup_table_entry *lte,
1064                           const struct read_stream_list_callbacks *cbs)
1065 {
1066         int ret;
1067
1068         ret = (*cbs->begin_stream)(lte, false, cbs->begin_stream_ctx);
1069         if (ret)
1070                 return ret;
1071
1072         ret = read_stream_prefix(lte, lte->size, cbs->consume_chunk,
1073                                  cbs->consume_chunk_ctx);
1074
1075         return (*cbs->end_stream)(lte, ret, cbs->end_stream_ctx);
1076 }
1077
1078 /* Read the full data of the specified stream, passing the data into the
1079  * specified callbacks (all of which are optional) and either checking or
1080  * computing the SHA1 message digest of the stream.  */
1081 static int
1082 read_full_stream_with_sha1(struct wim_lookup_table_entry *lte,
1083                            const struct read_stream_list_callbacks *cbs)
1084 {
1085         struct hasher_context hasher_ctx = {
1086                 .flags = VERIFY_STREAM_HASHES | COMPUTE_MISSING_STREAM_HASHES,
1087                 .cbs = *cbs,
1088         };
1089         struct read_stream_list_callbacks hasher_cbs = {
1090                 .begin_stream           = hasher_begin_stream,
1091                 .begin_stream_ctx       = &hasher_ctx,
1092                 .consume_chunk          = hasher_consume_chunk,
1093                 .consume_chunk_ctx      = &hasher_ctx,
1094                 .end_stream             = hasher_end_stream,
1095                 .end_stream_ctx         = &hasher_ctx,
1096
1097         };
1098         return read_full_stream_with_cbs(lte, &hasher_cbs);
1099 }
1100
1101 /*
1102  * Read a list of streams, each of which may be in any supported location (e.g.
1103  * in a WIM or in an external file).  Unlike read_stream_prefix() or the
1104  * functions which call it, this function optimizes the case where multiple
1105  * streams are packed into a single compressed WIM resource and reads them all
1106  * consecutively, only decompressing the data one time.
1107  *
1108  * @stream_list
1109  *      List of streams (represented as `struct wim_lookup_table_entry's) to
1110  *      read.
1111  * @list_head_offset
1112  *      Offset of the `struct list_head' within each `struct
1113  *      wim_lookup_table_entry' that makes up the @stream_list.
1114  * @cbs
1115  *      Callback functions to accept the stream data.
1116  * @flags
1117  *      Bitwise OR of zero or more of the following flags:
1118  *
1119  *      VERIFY_STREAM_HASHES:
1120  *              For all streams being read that have already had SHA1 message
1121  *              digests computed, calculate the SHA1 message digest of the read
1122  *              data and compare it with the previously computed value.  If they
1123  *              do not match, return WIMLIB_ERR_INVALID_RESOURCE_HASH.
1124  *
1125  *      COMPUTE_MISSING_STREAM_HASHES
1126  *              For all streams being read that have not yet had their SHA1
1127  *              message digests computed, calculate and save their SHA1 message
1128  *              digests.
1129  *
1130  *      STREAM_LIST_ALREADY_SORTED
1131  *              @stream_list is already sorted in sequential order for reading.
1132  *
1133  * The callback functions are allowed to delete the current stream from the list
1134  * if necessary.
1135  *
1136  * Returns 0 on success; a nonzero error code on failure.  Failure can occur due
1137  * to an error reading the data or due to an error status being returned by any
1138  * of the callback functions.
1139  */
1140 int
1141 read_stream_list(struct list_head *stream_list,
1142                  size_t list_head_offset,
1143                  const struct read_stream_list_callbacks *cbs,
1144                  int flags)
1145 {
1146         int ret;
1147         struct list_head *cur, *next;
1148         struct wim_lookup_table_entry *lte;
1149         struct hasher_context *hasher_ctx;
1150         struct read_stream_list_callbacks *sink_cbs;
1151
1152         if (!(flags & STREAM_LIST_ALREADY_SORTED)) {
1153                 ret = sort_stream_list_by_sequential_order(stream_list, list_head_offset);
1154                 if (ret)
1155                         return ret;
1156         }
1157
1158         if (flags & (VERIFY_STREAM_HASHES | COMPUTE_MISSING_STREAM_HASHES)) {
1159                 hasher_ctx = alloca(sizeof(*hasher_ctx));
1160                 *hasher_ctx = (struct hasher_context) {
1161                         .flags  = flags,
1162                         .cbs    = *cbs,
1163                 };
1164                 sink_cbs = alloca(sizeof(*sink_cbs));
1165                 *sink_cbs = (struct read_stream_list_callbacks) {
1166                         .begin_stream           = hasher_begin_stream,
1167                         .begin_stream_ctx       = hasher_ctx,
1168                         .consume_chunk          = hasher_consume_chunk,
1169                         .consume_chunk_ctx      = hasher_ctx,
1170                         .end_stream             = hasher_end_stream,
1171                         .end_stream_ctx         = hasher_ctx,
1172                 };
1173         } else {
1174                 sink_cbs = (struct read_stream_list_callbacks*)cbs;
1175         }
1176
1177         for (cur = stream_list->next, next = cur->next;
1178              cur != stream_list;
1179              cur = next, next = cur->next)
1180         {
1181                 lte = (struct wim_lookup_table_entry*)((u8*)cur - list_head_offset);
1182
1183                 if (lte->flags & WIM_RESHDR_FLAG_PACKED_STREAMS &&
1184                     lte->size != lte->rspec->uncompressed_size)
1185                 {
1186
1187                         struct wim_lookup_table_entry *lte_next, *lte_last;
1188                         struct list_head *next2;
1189                         size_t stream_count;
1190
1191                         /* The next stream is a proper sub-sequence of a WIM
1192                          * resource.  See if there are other streams in the same
1193                          * resource that need to be read.  Since
1194                          * sort_stream_list_by_sequential_order() sorted the
1195                          * streams by offset in the WIM, this can be determined
1196                          * by simply scanning forward in the list.  */
1197
1198                         lte_last = lte;
1199                         stream_count = 1;
1200                         for (next2 = next;
1201                              next2 != stream_list
1202                              && (lte_next = (struct wim_lookup_table_entry*)
1203                                                 ((u8*)next2 - list_head_offset),
1204                                  lte_next->resource_location == RESOURCE_IN_WIM
1205                                  && lte_next->rspec == lte->rspec);
1206                              next2 = next2->next)
1207                         {
1208                                 lte_last = lte_next;
1209                                 stream_count++;
1210                         }
1211                         if (stream_count > 1) {
1212                                 /* Reading multiple streams combined into a
1213                                  * single WIM resource.  They are in the stream
1214                                  * list, sorted by offset; @lte specifies the
1215                                  * first stream in the resource that needs to be
1216                                  * read and @lte_last specifies the last stream
1217                                  * in the resource that needs to be read.  */
1218
1219                                 DEBUG("Reading %zu streams combined in same "
1220                                       "WIM resource", stream_count);
1221
1222                                 next = next2;
1223
1224                                 struct data_range ranges[stream_count];
1225
1226                                 {
1227                                         struct list_head *next3;
1228                                         size_t i;
1229                                         struct wim_lookup_table_entry *lte_cur;
1230
1231                                         next3 = cur;
1232                                         for (i = 0; i < stream_count; i++) {
1233                                                 lte_cur = (struct wim_lookup_table_entry*)
1234                                                         ((u8*)next3 - list_head_offset);
1235                                                 ranges[i].offset = lte_cur->offset_in_res;
1236                                                 ranges[i].size = lte_cur->size;
1237                                                 next3 = next3->next;
1238                                         }
1239                                 }
1240
1241                                 struct streamifier_context streamifier_ctx = {
1242                                         .cbs                    = *sink_cbs,
1243                                         .cur_stream             = lte,
1244                                         .next_stream            = next_stream(lte, list_head_offset),
1245                                         .cur_stream_offset      = 0,
1246                                         .final_stream           = lte_last,
1247                                         .list_head_offset       = list_head_offset,
1248                                 };
1249
1250                                 ret = read_compressed_wim_resource(lte->rspec,
1251                                                                    ranges,
1252                                                                    stream_count,
1253                                                                    streamifier_cb,
1254                                                                    &streamifier_ctx);
1255
1256                                 if (ret) {
1257                                         if (streamifier_ctx.cur_stream_offset != 0) {
1258                                                 ret = (*streamifier_ctx.cbs.end_stream)
1259                                                         (streamifier_ctx.cur_stream,
1260                                                          ret,
1261                                                          streamifier_ctx.cbs.end_stream_ctx);
1262                                         }
1263                                         return ret;
1264                                 }
1265                                 continue;
1266                         }
1267                 }
1268
1269                 ret = read_full_stream_with_cbs(lte, sink_cbs);
1270                 if (ret && ret != BEGIN_STREAM_STATUS_SKIP_STREAM)
1271                         return ret;
1272         }
1273         return 0;
1274 }
1275
1276 /* Extract the first @size bytes of the specified stream.
1277  *
1278  * If @size specifies the full uncompressed size of the stream, then the SHA1
1279  * message digest of the uncompressed stream is checked while being extracted.
1280  *
1281  * The uncompressed data of the resource is passed in chunks of unspecified size
1282  * to the @extract_chunk function, passing it @extract_chunk_arg.  */
1283 int
1284 extract_stream(struct wim_lookup_table_entry *lte, u64 size,
1285                consume_data_callback_t extract_chunk, void *extract_chunk_arg)
1286 {
1287         wimlib_assert(size <= lte->size);
1288         if (size == lte->size) {
1289                 /* Do SHA1.  */
1290                 struct read_stream_list_callbacks cbs = {
1291                         .consume_chunk          = extract_chunk,
1292                         .consume_chunk_ctx      = extract_chunk_arg,
1293                 };
1294                 return read_full_stream_with_sha1(lte, &cbs);
1295         } else {
1296                 /* Don't do SHA1.  */
1297                 return read_stream_prefix(lte, size, extract_chunk,
1298                                           extract_chunk_arg);
1299         }
1300 }
1301
1302 /* A consume_data_callback_t implementation that writes the chunk of data to a
1303  * file descriptor.  */
1304 int
1305 extract_chunk_to_fd(const void *chunk, size_t size, void *_fd_p)
1306 {
1307         struct filedes *fd = _fd_p;
1308
1309         int ret = full_write(fd, chunk, size);
1310         if (ret) {
1311                 ERROR_WITH_ERRNO("Error writing to file descriptor");
1312                 return ret;
1313         }
1314         return 0;
1315 }
1316
1317 /* Extract the first @size bytes of the specified stream to the specified file
1318  * descriptor.  */
1319 int
1320 extract_stream_to_fd(struct wim_lookup_table_entry *lte,
1321                      struct filedes *fd, u64 size)
1322 {
1323         return extract_stream(lte, size, extract_chunk_to_fd, fd);
1324 }
1325
1326 /* Extract the full uncompressed contents of the specified stream to the
1327  * specified file descriptor.  */
1328 int
1329 extract_full_stream_to_fd(struct wim_lookup_table_entry *lte,
1330                           struct filedes *fd)
1331 {
1332         return extract_stream_to_fd(lte, fd, lte->size);
1333 }
1334
1335 /* Calculate the SHA1 message digest of a stream and store it in @lte->hash.  */
1336 int
1337 sha1_stream(struct wim_lookup_table_entry *lte)
1338 {
1339         wimlib_assert(lte->unhashed);
1340         struct read_stream_list_callbacks cbs = {
1341         };
1342         return read_full_stream_with_sha1(lte, &cbs);
1343 }
1344
1345 /* Convert a short WIM resource header to a stand-alone WIM resource
1346  * specification.  */
1347 void
1348 wim_res_hdr_to_spec(const struct wim_reshdr *reshdr, WIMStruct *wim,
1349                     struct wim_resource_spec *rspec)
1350 {
1351         rspec->wim = wim;
1352         rspec->offset_in_wim = reshdr->offset_in_wim;
1353         rspec->size_in_wim = reshdr->size_in_wim;
1354         rspec->uncompressed_size = reshdr->uncompressed_size;
1355         INIT_LIST_HEAD(&rspec->stream_list);
1356         rspec->flags = reshdr->flags;
1357         rspec->is_pipable = wim_is_pipable(wim);
1358 }
1359
1360 /* Convert a stand-alone resource specification to a WIM resource header.  */
1361 void
1362 wim_res_spec_to_hdr(const struct wim_resource_spec *rspec,
1363                     struct wim_reshdr *reshdr)
1364 {
1365         reshdr->offset_in_wim     = rspec->offset_in_wim;
1366         reshdr->size_in_wim       = rspec->size_in_wim;
1367         reshdr->flags             = rspec->flags;
1368         reshdr->uncompressed_size = rspec->uncompressed_size;
1369 }
1370
1371 /* Translates a WIM resource header from the on-disk format into an in-memory
1372  * format.  */
1373 void
1374 get_wim_reshdr(const struct wim_reshdr_disk *disk_reshdr,
1375                struct wim_reshdr *reshdr)
1376 {
1377         reshdr->offset_in_wim = le64_to_cpu(disk_reshdr->offset_in_wim);
1378         reshdr->size_in_wim = (((u64)disk_reshdr->size_in_wim[0] <<  0) |
1379                                ((u64)disk_reshdr->size_in_wim[1] <<  8) |
1380                                ((u64)disk_reshdr->size_in_wim[2] << 16) |
1381                                ((u64)disk_reshdr->size_in_wim[3] << 24) |
1382                                ((u64)disk_reshdr->size_in_wim[4] << 32) |
1383                                ((u64)disk_reshdr->size_in_wim[5] << 40) |
1384                                ((u64)disk_reshdr->size_in_wim[6] << 48));
1385         reshdr->uncompressed_size = le64_to_cpu(disk_reshdr->uncompressed_size);
1386         reshdr->flags = disk_reshdr->flags;
1387 }
1388
1389 /* Translates a WIM resource header from an in-memory format into the on-disk
1390  * format.  */
1391 void
1392 put_wim_reshdr(const struct wim_reshdr *reshdr,
1393                struct wim_reshdr_disk *disk_reshdr)
1394 {
1395         disk_reshdr->size_in_wim[0] = reshdr->size_in_wim  >>  0;
1396         disk_reshdr->size_in_wim[1] = reshdr->size_in_wim  >>  8;
1397         disk_reshdr->size_in_wim[2] = reshdr->size_in_wim  >> 16;
1398         disk_reshdr->size_in_wim[3] = reshdr->size_in_wim  >> 24;
1399         disk_reshdr->size_in_wim[4] = reshdr->size_in_wim  >> 32;
1400         disk_reshdr->size_in_wim[5] = reshdr->size_in_wim  >> 40;
1401         disk_reshdr->size_in_wim[6] = reshdr->size_in_wim  >> 48;
1402         disk_reshdr->flags = reshdr->flags;
1403         disk_reshdr->offset_in_wim = cpu_to_le64(reshdr->offset_in_wim);
1404         disk_reshdr->uncompressed_size = cpu_to_le64(reshdr->uncompressed_size);
1405 }